hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5971d820cd9591fb3ba07ca6dc3d006b8c920e17
| 1,906
|
py
|
Python
|
pavdhutils/tokenize.py
|
vierth/pavut
|
0c9c5032e80c7a7184fea72d9a4f4e46fb47c83d
|
[
"Apache-2.0"
] | null | null | null |
pavdhutils/tokenize.py
|
vierth/pavut
|
0c9c5032e80c7a7184fea72d9a4f4e46fb47c83d
|
[
"Apache-2.0"
] | null | null | null |
pavdhutils/tokenize.py
|
vierth/pavut
|
0c9c5032e80c7a7184fea72d9a4f4e46fb47c83d
|
[
"Apache-2.0"
] | null | null | null |
"""Classes in this file tokenize input strings"""
from pavdhutils.errors import CustomException
import sys
class Tokenize:
def __init__(self, text, method="char", lang="zh", regex=r"\w+",
removewhitespace=True):
self.methods = ["char", "word", "regex"]
self.mstring = ", ".join(self.methods[:-1]) + f" or {self.methods[-1]}"
self.languages = ["zh", "en"]
# Check to see if specified methods is available
if method not in self.methods:
raise CustomException(f"{method} is not a valid option. Try {self.mstring}")
# if character tokenization, turn into list
if method == "char":
# remove whitespace if desired
if removewhitespace:
self.tokens_ = list(text.replace(" ", ""))
else:
self.tokens_ = list(text)
# elif method == "word":
# if lang == "zh":
# print("No word tokenization yet")
# sys.exit()
# elif lang == "en":
def ngrams(self, n=1, gram_div=""):
self.ngrams_ = [gram_div.join(self.tokens_[i:i+n]) for i in
range(len(self.tokens_)-(n-1))]
def get_tokens(self):
return self.tokens_
def get_ngrams(self):
return self.ngrams_
def get_ngrams_string(self, div=" "):
return div.join(self.ngrams_)
def get_tokenized(self, version="tokens"):
if version == "tokens":
return " ".join(self.tokens_)
elif version == "ngrams":
try:
return " ".join(self.ngrams_)
except AttributeError:
print("No ngrams found yet, returning one grams by default")
self.ngrams(2)
return " ".join(self.ngrams_)
def chineseTokenize(text):
pass
def englishTokenize(text):
pass
| 32.305085
| 88
| 0.544071
|
5fbae803f2bec7cca0068b90ba74118206bfda1e
| 10,997
|
py
|
Python
|
proyecto/flask/Lib/site-packages/whoosh/lang/snowball/spanish.py
|
grupoprog3/proyecto_final
|
56fa4d33852a347476e721bf02bb3bc53a7b7a70
|
[
"Apache-2.0"
] | 3
|
2017-04-27T09:37:25.000Z
|
2017-08-12T16:25:22.000Z
|
proyecto/flask/Lib/site-packages/whoosh/lang/snowball/spanish.py
|
grupoprog3/proyecto_final
|
56fa4d33852a347476e721bf02bb3bc53a7b7a70
|
[
"Apache-2.0"
] | 27
|
2017-04-01T15:06:36.000Z
|
2021-02-08T20:19:58.000Z
|
proyecto/flask/Lib/site-packages/whoosh/lang/snowball/spanish.py
|
grupoprog3/proyecto_final
|
56fa4d33852a347476e721bf02bb3bc53a7b7a70
|
[
"Apache-2.0"
] | 1
|
2021-05-10T08:41:12.000Z
|
2021-05-10T08:41:12.000Z
|
from .bases import _StandardStemmer
from whoosh.compat import u
class SpanishStemmer(_StandardStemmer):
"""
The Spanish Snowball stemmer.
:cvar __vowels: The Spanish vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Spanish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/spanish/stemmer.html
"""
__vowels = u("aeiou\xE1\xE9\xED\xF3\xFA\xFC")
__step0_suffixes = ("selas", "selos", "sela", "selo", "las",
"les", "los", "nos", "me", "se", "la", "le",
"lo")
__step1_suffixes = ('amientos', 'imientos', 'amiento', 'imiento',
'aciones', 'uciones', 'adoras', 'adores',
'ancias', u('log\xEDas'), 'encias', 'amente',
'idades', 'anzas', 'ismos', 'ables', 'ibles',
'istas', 'adora', u('aci\xF3n'), 'antes',
'ancia', u('log\xEDa'), u('uci\xf3n'), 'encia',
'mente', 'anza', 'icos', 'icas', 'ismo',
'able', 'ible', 'ista', 'osos', 'osas',
'ador', 'ante', 'idad', 'ivas', 'ivos',
'ico',
'ica', 'oso', 'osa', 'iva', 'ivo')
__step2a_suffixes = ('yeron', 'yendo', 'yamos', 'yais', 'yan',
'yen', 'yas', 'yes', 'ya', 'ye', 'yo',
u('y\xF3'))
__step2b_suffixes = (u('ar\xEDamos'), u('er\xEDamos'), u('ir\xEDamos'),
u('i\xE9ramos'), u('i\xE9semos'), u('ar\xEDais'),
'aremos', u('er\xEDais'), 'eremos',
u('ir\xEDais'), 'iremos', 'ierais', 'ieseis',
'asteis', 'isteis', u('\xE1bamos'),
u('\xE1ramos'), u('\xE1semos'), u('ar\xEDan'),
u('ar\xEDas'), u('ar\xE9is'), u('er\xEDan'),
u('er\xEDas'), u('er\xE9is'), u('ir\xEDan'),
u('ir\xEDas'), u('ir\xE9is'),
'ieran', 'iesen', 'ieron', 'iendo', 'ieras',
'ieses', 'abais', 'arais', 'aseis',
u('\xE9amos'), u('ar\xE1n'), u('ar\xE1s'),
u('ar\xEDa'), u('er\xE1n'), u('er\xE1s'),
u('er\xEDa'), u('ir\xE1n'), u('ir\xE1s'),
u('ir\xEDa'), 'iera', 'iese', 'aste', 'iste',
'aban', 'aran', 'asen', 'aron', 'ando',
'abas', 'adas', 'idas', 'aras', 'ases',
u('\xEDais'), 'ados', 'idos', 'amos', 'imos',
'emos', u('ar\xE1'), u('ar\xE9'), u('er\xE1'),
u('er\xE9'), u('ir\xE1'), u('ir\xE9'), 'aba',
'ada', 'ida', 'ara', 'ase', u('\xEDan'),
'ado', 'ido', u('\xEDas'), u('\xE1is'),
u('\xE9is'), u('\xEDa'), 'ad', 'ed', 'id',
'an', u('i\xF3'), 'ar', 'er', 'ir', 'as',
u('\xEDs'), 'en', 'es')
__step3_suffixes = ("os", "a", "e", "o", u("\xE1"),
u("\xE9"), u("\xED"), u("\xF3"))
def stem(self, word):
"""
Stem a Spanish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if rv.endswith(suffix):
if rv[:-len(suffix)].endswith((u("i\xE9ndo"),
u("\xE1ndo"),
u("\xE1r"), u("\xE9r"),
u("\xEDr"))):
word = (word[:-len(suffix)].replace(u("\xE1"), "a")
.replace(u("\xE9"), "e")
.replace(u("\xED"), "i"))
r1 = (r1[:-len(suffix)].replace(u("\xE1"), "a")
.replace(u("\xE9"), "e")
.replace(u("\xED"), "i"))
r2 = (r2[:-len(suffix)].replace(u("\xE1"), "a")
.replace(u("\xE9"), "e")
.replace(u("\xED"), "i"))
rv = (rv[:-len(suffix)].replace(u("\xE1"), "a")
.replace(u("\xE9"), "e")
.replace(u("\xED"), "i"))
elif rv[:-len(suffix)].endswith(("ando", "iendo",
"ar", "er", "ir")):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[:-len(suffix)].endswith("yendo") and
word[:-len(suffix)].endswith("uyendo")):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("adora", "ador", u("aci\xF3n"), "adoras",
"adores", "aciones", "ante", "antes",
"ancia", "ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in (u("log\xEDa"), u("log\xEDas")):
word = word.replace(suffix, "log")
rv = rv.replace(suffix, "log")
elif suffix in (u("uci\xF3n"), "uciones"):
word = word.replace(suffix, "u")
rv = rv.replace(suffix, "u")
elif suffix in ("encia", "encias"):
word = word.replace(suffix, "ente")
rv = rv.replace(suffix, "ente")
elif suffix == "mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith(("ante", "able", "ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idad", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in ("abil", "ic", "iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in ("ivo", "iva", "ivos", "ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix) - 1:-len(suffix)] == "u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix in ("en", "es", u("\xE9is"), "emos"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if word.endswith("gu"):
word = word[:-1]
if rv.endswith("gu"):
rv = rv[:-1]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
if suffix in ("e", u("\xE9")):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if len(word) >= 2 and word[-2:] == "gu" and rv[-1] == "u":
word = word[:-1]
else:
word = word[:-len(suffix)]
break
word = (word.replace(u("\xE1"), "a").replace(u("\xE9"), "e")
.replace(u("\xED"), "i").replace(u("\xF3"), "o")
.replace(u("\xFA"), "u"))
return word
| 44.164659
| 80
| 0.371192
|
66e2360d4f2f97d76b48c75a61bac1ea7a76c976
| 9,706
|
py
|
Python
|
models/swae_plus.py
|
Meso272/PyTorch-VAE
|
b1f80082a92c706969a63162ae083b9f7d15d9aa
|
[
"Apache-2.0"
] | null | null | null |
models/swae_plus.py
|
Meso272/PyTorch-VAE
|
b1f80082a92c706969a63162ae083b9f7d15d9aa
|
[
"Apache-2.0"
] | null | null | null |
models/swae_plus.py
|
Meso272/PyTorch-VAE
|
b1f80082a92c706969a63162ae083b9f7d15d9aa
|
[
"Apache-2.0"
] | 1
|
2022-02-11T23:22:41.000Z
|
2022-02-11T23:22:41.000Z
|
import torch
from models import BaseVAE
from torch import nn
from torch.nn import functional as F
from torch import distributions as dist
from .types_ import *
class SWAE_PLUS(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
reg_weight: int = 100,
wasserstein_deg: float= 2.,
num_projections: int = 50,
projection_dist: str = 'normal',
**kwargs) -> None:
super(SWAE_PLUS, self).__init__()
self.in_channels=in_channels
self.latent_dim = latent_dim
self.reg_weight = reg_weight
self.p = wasserstein_deg
self.num_projections = num_projections
self.proj_dist = projection_dist
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=in_channels,
kernel_size= 3, stride= 1, padding = 1),###added layer
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_z = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i],
kernel_size=3,
stride = 1,
padding=1,
output_padding=0),##added layer
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer_1 = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride = 1,
padding=1,
output_padding=0),##added layer
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
)
self.final_layer_2=nn.Sequential(nn.Conv2d(hidden_dims[-1], out_channels= self.in_channels,
kernel_size= 3, padding= 1),
nn.Tanh())
modules=[]
#doubleout
modules.append(
nn.Sequential(
nn.Conv2d(self.in_channels, out_channels= 64,
kernel_size= 3, padding= 1),
nn.LeakyReLU())
)
#singleout
'''
modules.append(
nn.Sequential(
nn.Conv2d(hidden_dims[-1], out_channels= 64,
kernel_size= 3, padding= 1),
nn.LeakyReLU())
)
'''
for i in range(5):
modules.append(
nn.Sequential(
nn.Conv2d(64, out_channels= 64,
kernel_size= 3, padding= 1),
nn.BatchNorm2d(64),
nn.LeakyReLU())
)
modules.append(
nn.Sequential(
nn.Conv2d(64, out_channels= self.in_channels,
kernel_size= 3, padding= 1),
nn.Tanh())
)
self.final_layer_3=nn.Sequential(*modules)
def encode(self, input: Tensor) -> Tensor:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
z = self.fc_z(result)
return z
def decode(self, z: Tensor) -> Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer_1(result)
result= self.final_layer_2(result) ##doubleout
result= self.final_layer_3(result)
return result
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
z = self.encode(input)
return [self.decode(z), input, z]
def get_features(self, input: Tensor, **kwargs)-> Tensor:
z=self.encode(input)
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer_1(result)
return result
def loss_function(self,
*args,
**kwargs) -> dict:
recons = args[0]
input = args[1]
z = args[2]
batch_size = input.size(0)
bias_corr = batch_size * (batch_size - 1)
reg_weight = self.reg_weight / bias_corr
recons_loss_l2 = F.mse_loss(recons, input)
recons_loss_l1 = F.l1_loss(recons, input)
swd_loss = self.compute_swd(z, self.p, reg_weight)
loss = recons_loss_l2 + recons_loss_l1 + swd_loss
return {'loss': loss, 'Reconstruction_Loss':(recons_loss_l2 + recons_loss_l1), 'SWD': swd_loss}
def get_random_projections(self, latent_dim: int, num_samples: int) -> Tensor:
"""
Returns random samples from latent distribution's (Gaussian)
unit sphere for projecting the encoded samples and the
distribution samples.
:param latent_dim: (Int) Dimensionality of the latent space (D)
:param num_samples: (Int) Number of samples required (S)
:return: Random projections from the latent unit sphere
"""
if self.proj_dist == 'normal':
rand_samples = torch.randn(num_samples, latent_dim)
elif self.proj_dist == 'cauchy':
rand_samples = dist.Cauchy(torch.tensor([0.0]),
torch.tensor([1.0])).sample((num_samples, latent_dim)).squeeze()
else:
raise ValueError('Unknown projection distribution.')
rand_proj = rand_samples / rand_samples.norm(dim=1).view(-1,1)
return rand_proj # [S x D]
def compute_swd(self,
z: Tensor,
p: float,
reg_weight: float) -> Tensor:
"""
Computes the Sliced Wasserstein Distance (SWD) - which consists of
randomly projecting the encoded and prior vectors and computing
their Wasserstein distance along those projections.
:param z: Latent samples # [N x D]
:param p: Value for the p^th Wasserstein distance
:param reg_weight:
:return:
"""
prior_z = torch.randn_like(z) # [N x D]
device = z.device
proj_matrix = self.get_random_projections(self.latent_dim,
num_samples=self.num_projections).transpose(0,1).to(device)
latent_projections = z.matmul(proj_matrix) # [N x S]
prior_projections = prior_z.matmul(proj_matrix) # [N x S]
# The Wasserstein distance is computed by sorting the two projections
# across the batches and computing their element-wise l2 distance
w_dist = torch.sort(latent_projections.t(), dim=1)[0] - \
torch.sort(prior_projections.t(), dim=1)[0]
w_dist = w_dist.pow(p)
return reg_weight * w_dist.mean()
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
| 36.216418
| 109
| 0.505358
|
b5fc8c660b494b6db3d100eaf760c0b9f50588d9
| 853
|
py
|
Python
|
hasher-matcher-actioner/hmalib/lambdas/actions/reactioner.py
|
king40or1/ThreatExchange
|
95680d1568241bf63249f91480bbf1c7bbe9b699
|
[
"BSD-3-Clause"
] | null | null | null |
hasher-matcher-actioner/hmalib/lambdas/actions/reactioner.py
|
king40or1/ThreatExchange
|
95680d1568241bf63249f91480bbf1c7bbe9b699
|
[
"BSD-3-Clause"
] | null | null | null |
hasher-matcher-actioner/hmalib/lambdas/actions/reactioner.py
|
king40or1/ThreatExchange
|
95680d1568241bf63249f91480bbf1c7bbe9b699
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from hmalib.common.logging import get_logger
from hmalib.common.actioner_models import ReactionMessage
logger = get_logger(__name__)
def lambda_handler(event, context):
"""
This is the main entry point for reacting to ThreatExchange. The action evaluator
sends a reaction message by way of the reactions queue and here's where they're
popped off and dealt with.
"""
for sqs_record in event["Records"]:
# TODO research max # sqs records / lambda_handler invocation
reaction_message = ReactionMessage.from_aws_message(
json.loads(sqs_record["body"])
)
logger.info("Reacting: reaction_message = %s", reaction_message)
return {"reaction_completed": "true"}
if __name__ == "__main__":
pass
| 28.433333
| 85
| 0.712778
|
5bd9a927037cf5f79ef0a1422f11d4681aea5e1b
| 19,600
|
py
|
Python
|
Zebra_puzzle_classes.py
|
c-okelly/Data_Algo_zebra_puzzle
|
36820accc761b084ddcbe55b5feaad7e150b6920
|
[
"MIT"
] | null | null | null |
Zebra_puzzle_classes.py
|
c-okelly/Data_Algo_zebra_puzzle
|
36820accc761b084ddcbe55b5feaad7e150b6920
|
[
"MIT"
] | null | null | null |
Zebra_puzzle_classes.py
|
c-okelly/Data_Algo_zebra_puzzle
|
36820accc761b084ddcbe55b5feaad7e150b6920
|
[
"MIT"
] | null | null | null |
# Author Conor O'Kelly
# This file will contain all of the classes required for the puzzle
import copy
class Problem:
__problem_solved = False
__varialbes_list = []
__constraint_set = []
__multi_solution_list = []
def __init__(self,variables):
# Generate type and domain no from data input
self.variables = variables
self.varialbe_types = variables[-1]
self.no_domains = len(variables[0])
# Call function to create varialbes from list
self.__create_variables()
def __repr__(self):
return "Main problems class. The problem is currently sloved => " + str(self.problem_solved)
def print_current_results(self):
count = 0
for item in self.__varialbes_list:
print(item)
#Break line every fifth line
count += 1
if count%(self.no_domains) == 0:
print("")
def print_mulit_results(self):
print("There are a total of ", len(self.__multi_solution_list), "solutions based on the current constraint set.")
count = 0
for list_item in self.__multi_solution_list:
print("Solution number ", count)
count += 1
self.__varialbes_list = list_item
self.print_current_results()
def print_final_resutls_single(self):
if self.test_if_problem_sloved() == False:
print("No solution for the problem was found")
else:
print("The solution to the problem is")
self.print_current_results()
def __create_variables(self):
# Create a variables for each of the catagories
for no_1 in range(0,len(self.varialbe_types)):
for no_2 in range(0,len(self.variables[no_1])):
var = Variable(self.variables[no_1][no_2],self.varialbe_types[no_1],self.no_domains)
self.__varialbes_list.append(var)
def get_varialbe_by_name(self,search_name):
for variable in self.__varialbes_list:
if variable.name == search_name:
return variable
def create_constraint(self,constriant):
# Validate the constant has beeen set to a domain within the limits of current problem
valid_con = True
if type(constriant) == Constraint_equality_var_cons:
if constriant.ob_constant_1 in list(range(1,self.no_domains+1)):
valid_con = True
else:
valid_con = False
print("The constraint ",constriant, "is not valid")
if valid_con:
self.__constraint_set.append(constriant)
# Function to apply reduction
def apply_reduction(self):
no_domains_reduced = True
no_domains_left = self.count_no_domains_left()
while no_domains_reduced == True:
# Cycle through all constraints and execute each one
for constraint in self.__constraint_set:
# var equal to var constraint
if type(constraint) == Constraint_equality_var_var:
constraint.is_satisfied(self.get_varialbe_by_name(constraint.ob_variable_1), self.get_varialbe_by_name(constraint.ob_variable_2))
# var equal to constant constraint
elif type(constraint) == Constraint_equality_var_cons:
constraint.is_satisfied(self.get_varialbe_by_name(constraint.ob_variable_1), constraint.ob_constant_1)
# var plus constant constraint
elif type(constraint) == Constraint_equality_var_plus_cons:
constraint.is_satisfied(self.get_varialbe_by_name(constraint.ob_variable_1),self.get_varialbe_by_name(constraint.ob_variable_2),constraint.ob_constant_1,self.no_domains)
elif type(constraint) == Constraint_difference_var_var:
for var_type in self.varialbe_types:
# Get all variables of current type
list_variable_of_same_type = self.return_varialbes_by_type(var_type)
# Apply constraint
constraint.is_satisfied(list_variable_of_same_type)
# print(self.count_no_domains_left())
if (no_domains_left == self.count_no_domains_left()):
no_domains_reduced = False
else:
no_domains_left = self.count_no_domains_left()
return
# Function to apply domain splitting - Now working for multi solution
def domain_splitting(self,results_single=0):
# Assign variables we start with
starting_variables_list = self.__varialbes_list
# Add starting variables combination to list
list_of_possible_variable_combinations = [self.__varialbes_list]
# Solution to the puzzle
solution_varialbes = []
solved = False
# Multiple solutions
multi_solution_variables = []
# Check starting domain combination is not already invalied
if self.test_is_any_domain_empty() == False:
# # While problem is unsolved
# while solved == False:
# Multi solution for loop
for current_var_list in list_of_possible_variable_combinations:
# print(len(list_of_possible_variable_combinations))
# Pop First item on the list
#current_var_list = list_of_possible_variable_combinations.pop(0)
# Find first list variable that has multiple domains
list_item = 0
count = 0
for var in current_var_list:
if var.domain.is_reduced_to_one_value() == False:
list_item = count
break
else:
count += 1
pass
# Split domain for first item with multiple domain values
for domain_value in current_var_list[list_item].domain.domain_values:
# Create copy list
copy_of_variable_list = copy.deepcopy(current_var_list)
copy_of_variable_list[list_item].domain.domain_values = [domain_value]
# # Set copy list as instance current list
self.__varialbes_list = copy_of_variable_list
self.apply_reduction()
## If current varialbes are valid and not already in list add to possible list
if self.test_is_any_domain_empty() == False and copy_of_variable_list not in list_of_possible_variable_combinations:
list_of_possible_variable_combinations.append(copy_of_variable_list)
print("Currenlt there are ",self.count_no_domains_left()," variables")
# Add solution to multi solutoin list if solved and not already in list
if self.test_if_problem_sloved() == True and copy_of_variable_list not in self.__multi_solution_list:
self.__multi_solution_list.append(copy_of_variable_list)
# If only single results are wanted break when first one found
if results_single == 0 and len(self.__multi_solution_list) >= 1:
break
else:
print("Problem in invalid at start of splitting. No solution possible")
# Set current variable set to first solution if solution exists if not skip and leave as current varialbe set
try:
self.__varialbes_list = self.__multi_solution_list[0]
except:
pass
return multi_solution_variables
def set_variable_by_name(self,name,domain_value_to_set):
var = self.get_varialbe_by_name(name)
var.domain.domain_values = [domain_value_to_set]
def return_varialbes_by_type(self,search_type):
variable_list = []
for var in self.__varialbes_list:
if var.type == search_type:
variable_list.append(var)
return variable_list
def test_if_problem_sloved(self):
problem_solved = True
for var in self.__varialbes_list:
current_var_solved = var.domain.is_reduced_to_one_value()
# print(var, current_var_solved)
if current_var_solved == False:
problem_solved = False
return problem_solved
def test_is_any_domain_empty(self):
any_domain_empty = False
for var in self.__varialbes_list:
if var.domain.is_empty() == True:
any_domain_empty = True
return any_domain_empty
def count_no_domains_left(self):
count = 0
for var in self.__varialbes_list:
no_values = var.domain.count()
count += no_values
return count
class Variable:
def __init__(self,name,type,domain_choice = 5):
self.name = name
self.type = type
self.domain = Domain(domain_choice)
def __repr__(self):
return "Variable " +self.name+" with type "+self.type+" with current domains of " + str(self.domain.domain_values)
def __eq__(self, other):
return self.domain == other
class Domain:
def __init__(self,no_values=5):
self.domain_values = []
self.__create(no_values)
def __repr__(self):
string_version = str(self.domain_values)
return string_version
def __eq__(self, other):
return self.domain_values == other
def __create(self,no_values):
values = list(range(1,no_values+1))
self.domain_values = values
def delete(self, target_value):
self.domain_values.remove(target_value)
# Function not finished
def split_in_half(self):
return
def count(self):
return len(self.domain_values)
def is_empty(self):
if len(self.domain_values) == 0:
return True
else:
return False
def is_reduced_to_one_value(self):
if len(self.domain_values) == 1:
return True
else:
return False
class Constraints:
def __init__(self):
return
# def __repr__(self):
# return "Main constraint class with all constraints"
# Abstract method - Check that constraint is staisfied
def is_satisfied(self):
return
# Abstract method - Keep common domain values
def reduction(self):
return
class Constraint_equality_var_var(Constraints):
def __init__(self,variable_1,variable_2):
self.ob_variable_1 = variable_1
self.ob_variable_2 = variable_2
return
#Do they have at least 1 value in common.
def is_satisfied(self,variable_1,variable_2):
# print(variable_1.domain, variable_2.domain)
if variable_1 == variable_2:
return True
else:
inseresction = list(set(variable_1.domain.domain_values).intersection(variable_2.domain.domain_values))
# print("Intersection",inseresction)
# Remove options from domain if not common in both
variable_1.domain.domain_values = inseresction
variable_2.domain.domain_values = inseresction
return True
def __repr__(self):
return "Equality constraint for " + str(self.ob_variable_1) + " equal to "+ str(self.ob_variable_2)
class Constraint_equality_var_cons(Constraints):
def __init__(self,variable_1,constant_1):
self.ob_variable_1 = variable_1
self.ob_constant_1 = constant_1
def is_satisfied(self,variable_1,constant_1):
# If constant is in the list replace domain list with single value
if constant_1 in variable_1.domain.domain_values:
variable_1.domain.domain_values = [constant_1]
# No longer an issues as this type of constaint is validate on creation.
# Not sure if this error will ever arise?
else:
print(constant_1, "is not in the list")
def __repr__(self):
return "Equality constraint for varialbe and constant " + str(self.ob_variable_1) + " equal to "+ str(self.ob_constant_1)
class Constraint_equality_var_plus_cons(Constraints):
# Extra variable either side. If 0 only look for plus constant. If one can but plus or minus constant
def __init__(self,variable_1,varialbe_2,constant_1,either_side):
self.ob_variable_1 = variable_1
self.ob_variable_2 = varialbe_2
self.ob_constant_1 = constant_1
self.either_side = either_side
def is_satisfied(self,variable_1,variable_2,constant_1,no_domains):
# print(variable_1,variable_2,constant_1)
domain_range = list(range(1,no_domains+1))
possilbe_locations = []
if self.either_side == 0:
# V1 has house V2 to the right
for value in variable_2.domain.domain_values:
possilbe_domain = value + 1
possilbe_locations.append(possilbe_domain)
# Set variable 1 to new possible domains
# Only allow legal domains
legal_domains = list(set(domain_range).intersection(possilbe_locations))
# Only take domains already in variable domain list
select_domains = list(set(legal_domains).intersection(variable_1.domain.domain_values))
variable_1.domain.domain_values = select_domains
location_for_v2 = []
for value in variable_1.domain.domain_values:
location_for_v2.append(value - 1)
# Set variable 2 to new possible domains
# Only allow legal domains
legal_domains = list(set(domain_range).intersection(location_for_v2))
# Only take domains already in variable domain list
select_domains = list(set(legal_domains).intersection(variable_2.domain.domain_values))
variable_2.domain.domain_values = select_domains
elif self.either_side:
right_of_house = []
left_of_house = []
for value in variable_2.domain.domain_values:
# Add domain value for house to right
possible_domain = value + 1
right_of_house.append(possible_domain)
# Add domain value for house to left
possible_domain = value - 1
left_of_house.append(possible_domain)
# Get list of unique elements from set
possilbe_locations = list(set(left_of_house+right_of_house))
# Set variable 1 to new possilbe domains
# Only allow legal domains
legal_domains = list(set(domain_range).intersection(possilbe_locations))
# Only take domains already in variable domain list
select_domains = list(set(legal_domains).intersection(variable_1.domain.domain_values))
variable_1.domain.domain_values = select_domains
right_of_house_2 = []
left_of_house_2 = []
for value in variable_1.domain.domain_values:
# Add domain value for house to right
possible_domain = value + 1
right_of_house_2.append(possible_domain)
# Add domain value for house to left
possible_domain = value - 1
left_of_house_2.append(possible_domain)
# Get list of unique elements from set
possilbe_locations = list(set(left_of_house_2+right_of_house_2))
# Set variable 1 to new possilbe domains
# Only allow legal domains
legal_domains = list(set(domain_range).intersection(possilbe_locations))
# Only take domains already in variable domain list
select_domains = list(set(legal_domains).intersection(variable_2.domain.domain_values))
variable_2.domain.domain_values = select_domains
# print(variable_1,variable_2,constant_1)
def __repr__(self):
return "Equality constraint for variable_1 equally to to varialbe_2 + constant => " + self.ob_variable_1 + " equal to " + self.ob_variable_2 +" plus " + str(self.ob_constant_1)
class Constraint_difference_var_var(Constraints):
def __init__(self):
return
def is_satisfied(self,list_1_variable_type):
for variable in list_1_variable_type:
# Check if reduced to 1 value
if variable.domain.is_reduced_to_one_value():
# Cycle through type_list_of_variables, skip where the variable from first loop is same as varialbe form second
# Other wise remove the domain that variable 1 has from other variables in list
for variable_2 in list_1_variable_type:
if variable.name != variable_2.name:
# Try to delete domain no
try:
variable_2.domain.delete(variable.domain.domain_values[0])
except:
pass
def __repr__(self):
return "Constraint for varialbe_1 not being equal to varialbe_2 in same type"
if __name__ == '__main__':
# Slightly altered from standard format so late item in list is the variable types.
# Format would allow program to be more flexible in future
variables = [["English", "Spaniard", "Ukrainian", "Norwegian", "Japanese"],
["Red","Green","Ivory","Yellow","Blue"],
["Dog","Snails","Fox","Zebra","Horse"],
["Snakes and Ladders", "Cluedo", "Pictionary", "Travel The World", "Backgammon"],
["Coffee","Milk","Orange Juice","Tea","Water"],
["Nationality", "Color", "Pet","Board Game","Drink"]]
# Create the problem
zebra_problem = Problem(variables)
# Create constraints
zebra_problem.create_constraint(Constraint_equality_var_var("Red","English"))
zebra_problem.create_constraint(Constraint_equality_var_var("Spaniard","Dog"))
zebra_problem.create_constraint(Constraint_equality_var_var("Coffee","Green"))
zebra_problem.create_constraint(Constraint_equality_var_var("Ukrainian","Tea"))
zebra_problem.create_constraint(Constraint_equality_var_plus_cons("Green","Ivory",1,0))
zebra_problem.create_constraint(Constraint_equality_var_var("Snakes and Ladders","Snails"))
zebra_problem.create_constraint(Constraint_equality_var_var("Cluedo","Yellow"))
zebra_problem.create_constraint(Constraint_equality_var_cons("Milk",3))
zebra_problem.create_constraint(Constraint_equality_var_cons("Norwegian",1))
zebra_problem.create_constraint(Constraint_equality_var_plus_cons("Pictionary","Fox",1,1))
zebra_problem.create_constraint(Constraint_equality_var_plus_cons("Cluedo","Horse",1,1))
zebra_problem.create_constraint(Constraint_equality_var_var("Travel The World","Orange Juice"))
zebra_problem.create_constraint(Constraint_equality_var_var("Japanese","Backgammon"))
zebra_problem.create_constraint(Constraint_equality_var_plus_cons("Norwegian","Blue",1,1))
# General constraint for domains being unable to have the same value
zebra_problem.create_constraint(Constraint_difference_var_var())
# Run reduction till no more domains can be reduced
zebra_problem.apply_reduction()
# Run domain splitting. If multiple results wanted set varialbe to 1
zebra_problem.domain_splitting(results_single=0)
# Print current results - Single results
# zebra_problem.print_final_resutls_single()
# Print results - Multiple results
zebra_problem.print_mulit_results()
| 38.28125
| 189
| 0.648214
|
072e6ba52f86a5cc9e5c0f75ed2a541d1b4e4e6e
| 396
|
py
|
Python
|
gram/migrations/0004_image_comments_number.py
|
marknesh/instagram-app
|
514ec6e59ad127857234245b05130431fa3262cc
|
[
"MIT"
] | null | null | null |
gram/migrations/0004_image_comments_number.py
|
marknesh/instagram-app
|
514ec6e59ad127857234245b05130431fa3262cc
|
[
"MIT"
] | 10
|
2020-03-08T21:13:29.000Z
|
2021-04-08T19:41:14.000Z
|
gram/migrations/0004_image_comments_number.py
|
marknesh/instagram-app
|
514ec6e59ad127857234245b05130431fa3262cc
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-03-07 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0003_auto_20200307_1503'),
]
operations = [
migrations.AddField(
model_name='image',
name='comments_number',
field=models.PositiveIntegerField(default=0),
),
]
| 20.842105
| 57
| 0.608586
|
ff84fdc248ecf3e8f27dccdd140865dd21c5390f
| 5,053
|
py
|
Python
|
src/Products/ZODBMountPoint/__init__.py
|
zms-publishing/Products.TemporaryFolder
|
56d520b9a6a26f6e230c908c6a00c5f1f871c495
|
[
"ZPL-2.1"
] | null | null | null |
src/Products/ZODBMountPoint/__init__.py
|
zms-publishing/Products.TemporaryFolder
|
56d520b9a6a26f6e230c908c6a00c5f1f871c495
|
[
"ZPL-2.1"
] | null | null | null |
src/Products/ZODBMountPoint/__init__.py
|
zms-publishing/Products.TemporaryFolder
|
56d520b9a6a26f6e230c908c6a00c5f1f871c495
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZODBMountPoint product.
"""
from logging import getLogger
import sys
LOG = getLogger('Products.TemporaryFolder')
def commit(note):
import transaction
transaction.get().note(note)
transaction.commit()
def install_tempfolder_and_sdc(app):
from App.config import getConfiguration
from Acquisition import aq_base
from .MountedObject import manage_addMounts, MountedObject
from .MountedObject import getConfiguration as getDBTabConfiguration
dbtab_config = getDBTabConfiguration()
tf = getattr(app, 'temp_folder', None)
if getattr(tf, 'meta_type', None) == MountedObject.meta_type:
# tf is a MountPoint object. This means that the temp_folder
# couldn't be mounted properly (the meta_type would have been
# the meta type of the container class otherwise). The
# MountPoint object writes a message to zLOG so we don't
# need to.
return
if tf is None:
if dbtab_config is None:
# DefaultConfiguration, do nothing
return
mount_paths = [ x[0] for x in dbtab_config.listMountPaths() ]
if not '/temp_folder' in mount_paths:
# we won't be able to create the mount point properly
LOG.error('Could not initialize a Temporary Folder because '
'a database was not configured to be mounted at '
'the /temp_folder mount point')
return
try:
manage_addMounts(app, ('/temp_folder',))
commit(u'Added temp_folder')
tf = app.temp_folder
except:
LOG.error('Could not add a /temp_folder mount point due to an '
'error.', exc_info=sys.exc_info())
return
# Ensure that there is a transient object container in the temp folder
config = getConfiguration()
if not hasattr(aq_base(tf), 'session_data'):
try:
from Products.Transience.Transience import TransientObjectContainer
except ImportError:
return
addnotify = getattr(config, 'session_add_notify_script_path', None)
delnotify = getattr(config, 'session_delete_notify_script_path',
None)
default_limit = 1000
default_period_secs = 20
default_timeout_mins = 20
limit = getattr(config, 'maximum_number_of_session_objects',
default_limit)
timeout_spec = getattr(config, 'session_timeout_minutes',
default_timeout_mins)
period_spec = getattr(config, 'session_resolution_seconds',
default_period_secs)
if addnotify and app.unrestrictedTraverse(addnotify, None) is None:
LOG.warn('failed to use nonexistent "%s" script as '
'session-add-notify-script-path' % addnotify)
addnotify=None
if delnotify and app.unrestrictedTraverse(delnotify, None) is None:
LOG.warn('failed to use nonexistent "%s" script as '
'session-delete-notify-script-path' % delnotify)
delnotify=None
toc = TransientObjectContainer('session_data',
'Session Data Container',
timeout_mins = timeout_spec,
addNotification = addnotify,
delNotification = delnotify,
limit=limit,
period_secs = period_spec)
tf._setObject('session_data', toc)
tf_reserved = getattr(tf, '_reserved_names', ())
if 'session_data' not in tf_reserved:
tf._reserved_names = tf_reserved + ('session_data',)
commit(u'Added session_data to temp_folder')
return tf # return the tempfolder object for test purposes
def initialize(context):
# Configure and load databases if not already done.
from . import MountedObject
context.registerClass(
MountedObject.MountedObject,
constructors=(MountedObject.manage_addMountsForm,
MountedObject.manage_getMountStatus,
MountedObject.manage_addMounts,),
)
app = context.getApplication() # new API added after Zope 4.0b4
if app is not None:
install_tempfolder_and_sdc(app)
| 40.103175
| 79
| 0.605977
|
3aecb42489f40d779968b2959bda855ad4ff9c39
| 2,518
|
py
|
Python
|
alpha/abstobmc.py
|
heryxpc/prototype
|
45c9c61c43115a97adc9690ad2269be9c07eb47d
|
[
"Apache-2.0"
] | null | null | null |
alpha/abstobmc.py
|
heryxpc/prototype
|
45c9c61c43115a97adc9690ad2269be9c07eb47d
|
[
"Apache-2.0"
] | null | null | null |
alpha/abstobmc.py
|
heryxpc/prototype
|
45c9c61c43115a97adc9690ad2269be9c07eb47d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Abstract to Bounded Model Checking module that executes sequentially
software verification tools on concurrent programs.
Aim is to prove that conjunction of Abstract Interpreation and Bounded
Model Checking provide enhanced performance on software verification
on concurrent programs.
"""
from argparse import ArgumentParser
from core.toolchain import ToolChain
class AbsToBMC:
"""Main class to orchestate execution of software verification tools"""
arguments = []
def __init__(self, arguments):
""" Creates a new AbsToBMC object based on input arguments """
self.arguments = arguments
@staticmethod
def parseArguments():
"""
Parse all the arguments from CLI
"""
parser = ArgumentParser(
description="""Executes a set of verification tools on given
input file""")
AbsToBMC.__addArgs(parser)
arguments = parser.parse_args()
return arguments
@staticmethod
def __addArgs(args):
""" Defines valid CLI arguments"""
# print "Usage ./abstobmc.py <options> <input_file>\n"
# print "Where options are:\n"
# print " -h Display this help"
args.add_argument("input_file",
help="Input file on which to run verification tools")
args.add_argument("-s", "--enable-cseq", action="store_true",
help="Enables execution of CSeq sequentialization")
args.add_argument("-g", "--enable-pagai", action="store_true",
help="Enables execution of Pagai")
args.add_argument("-p", "--enable-pips", action="store_true",
help="Enables execution of PIPS")
args.add_argument("-a", "--enable-annot", action="store_true",
help="Enables annotations translation to assume statements")
args.add_argument("-b", "--enable-cbmc", action="store_true",
help="Enables execution of CBMC")
args.add_argument("--cseq-args",
help="Arguments to be passed to CSeq.")
args.add_argument("--pagai-args",
help="Arguments to be passed to Pagai")
args.add_argument("--pips-args",
help="Arguments to be passed to PIPS")
def orchestrate(self):
"""
Executes the chain of software verification tools according to
arguments received at command line
"""
args = vars(self.arguments)
ToolChain(args)
def main():
"""
Main function of AbsToBMC module
"""
arguments = AbsToBMC.parseArguments()
if arguments:
instance = AbsToBMC(arguments)
instance.orchestrate()
# if len(sys.argv) > 1:
# abstobmc = AbsToBMC(sys)
# else:
# AbsToBMC.usage()
# return
if __name__ == "__main__":
main()
| 27.67033
| 72
| 0.706116
|
06541b395485ff386b27b6c5d5b04cb7e083c771
| 17,388
|
py
|
Python
|
pysnmp-with-texts/HUAWEI-SLB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/HUAWEI-SLB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/HUAWEI-SLB-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HUAWEI-SLB-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-SLB-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:48:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
MibIdentifier, TimeTicks, ModuleIdentity, Integer32, Gauge32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Bits, iso, Counter32, ObjectIdentity, Unsigned32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "ModuleIdentity", "Integer32", "Gauge32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Bits", "iso", "Counter32", "ObjectIdentity", "Unsigned32", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
hwSLBMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225))
hwSLBMIB.setRevisions(('2009-11-30 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hwSLBMIB.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: hwSLBMIB.setLastUpdated('200911301200Z')
if mibBuilder.loadTexts: hwSLBMIB.setOrganization('Huawei Technologies Co., Ltd.')
if mibBuilder.loadTexts: hwSLBMIB.setContactInfo(' NanJing Institute,Huawei Technologies Co.,Ltd. HuiHong Mansion,No.91 BaiXia Rd. NanJing, P.R. of China Zipcode:210001 Http://www.huawei.com E-mail:support@huawei.com ')
if mibBuilder.loadTexts: hwSLBMIB.setDescription('The MIB describes Server Load Balance')
hwSlbMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1))
hwSlbTrapObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1))
hwIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 1), IpAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwIpAddress.setStatus('current')
if mibBuilder.loadTexts: hwIpAddress.setDescription('The object indicates the IP address of a load balance member.')
hwMemberName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwMemberName.setStatus('current')
if mibBuilder.loadTexts: hwMemberName.setDescription("The object indicates the member in a load balance group, each member in the load balance group will provide same services to clients, system will select one member to serve the clients' requests.")
hwGroupName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwGroupName.setStatus('current')
if mibBuilder.loadTexts: hwGroupName.setDescription('The object indicates the group name of members, each group members may contain same services and typically reside in the same physical location in a data center. ')
hwPort = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwPort.setStatus('current')
if mibBuilder.loadTexts: hwPort.setDescription('The TCP or UDP port number, the range can from 1 to 65535.')
hwProbeName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwProbeName.setStatus('current')
if mibBuilder.loadTexts: hwProbeName.setDescription('The object indicates a probe that is used to detect failures for members in a group. When you initially configure a health probe, you should define its type and name.')
hwProbeType = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("icmp", 1), ("tcp", 2), ("udp", 3), ("http", 4)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwProbeType.setStatus('current')
if mibBuilder.loadTexts: hwProbeType.setDescription('The object indicates probe type. S9300 support ICMP, TCP, UDP, HTTP probe types.')
hwConnectionNum = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4000000))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwConnectionNum.setStatus('current')
if mibBuilder.loadTexts: hwConnectionNum.setDescription('The object indicates the connection number of a group member, usually five-tuple is used to describe a connection(source IP, detination IP, source port, destination port, protocal).')
hwMasterGroup = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwMasterGroup.setStatus('current')
if mibBuilder.loadTexts: hwMasterGroup.setDescription('The object indicates the master group. In order to improve reliability, S9300 can bind master group and backup group together to provide load balance service, operators can config two load balance group: one as master, the other as backup')
hwMasterGroupActiveNum = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwMasterGroupActiveNum.setStatus('current')
if mibBuilder.loadTexts: hwMasterGroupActiveNum.setDescription('The object indicates the active member number in the master server group.')
hwMasterGroupTotalNum = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwMasterGroupTotalNum.setStatus('current')
if mibBuilder.loadTexts: hwMasterGroupTotalNum.setDescription('The object indicates the total number of in-service members in the master group.')
hwBackupGroup = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwBackupGroup.setStatus('current')
if mibBuilder.loadTexts: hwBackupGroup.setDescription('The object indicates the backup group. In order to improve reliability, S9300 can bind master group and backup group together to provide load balance service, operators can config two load balance group one as master, the other as backup')
hwBackupGroupActiveNum = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwBackupGroupActiveNum.setStatus('current')
if mibBuilder.loadTexts: hwBackupGroupActiveNum.setDescription('The object indicates the active member number in the backup server group.')
hwBackupGroupTotalNum = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwBackupGroupTotalNum.setStatus('current')
if mibBuilder.loadTexts: hwBackupGroupTotalNum.setDescription('The object indicates the total number of in-service members in the backup group.')
hwActionName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwActionName.setStatus('current')
if mibBuilder.loadTexts: hwActionName.setDescription('The object indicates the policy action. Operators can define different actions for clients request: forward packets, drop packets, load balance, sticky load balance.')
hwCurWorkGroupName = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 1, 15), OctetString().subtype(subtypeSpec=ValueSizeConstraint(32, 32)).setFixedLength(32)).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: hwCurWorkGroupName.setStatus('current')
if mibBuilder.loadTexts: hwCurWorkGroupName.setDescription('The object indicates the current working group, the working group may be master group or backup group.')
hwSlbNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2))
hwMemberInstanceStateUp = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 1)).setObjects(("HUAWEI-SLB-MIB", "hwGroupName"), ("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwIpAddress"), ("HUAWEI-SLB-MIB", "hwPort"))
if mibBuilder.loadTexts: hwMemberInstanceStateUp.setStatus('current')
if mibBuilder.loadTexts: hwMemberInstanceStateUp.setDescription("Member state changes to up. S9300 will select the member to accept clients' new requests.")
hwMemberInstanceStateDown = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 2)).setObjects(("HUAWEI-SLB-MIB", "hwGroupName"), ("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwIpAddress"), ("HUAWEI-SLB-MIB", "hwPort"))
if mibBuilder.loadTexts: hwMemberInstanceStateDown.setStatus('current')
if mibBuilder.loadTexts: hwMemberInstanceStateDown.setDescription("Member state changes to down. S9300 will not select the member when clients' new requests come.")
hwGroupStateSwitchover = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 3)).setObjects(("HUAWEI-SLB-MIB", "hwActionName"), ("HUAWEI-SLB-MIB", "hwMasterGroup"), ("HUAWEI-SLB-MIB", "hwMasterGroupActiveNum"), ("HUAWEI-SLB-MIB", "hwMasterGroupTotalNum"), ("HUAWEI-SLB-MIB", "hwBackupGroup"), ("HUAWEI-SLB-MIB", "hwBackupGroupActiveNum"), ("HUAWEI-SLB-MIB", "hwBackupGroupTotalNum"), ("HUAWEI-SLB-MIB", "hwCurWorkGroupName"))
if mibBuilder.loadTexts: hwGroupStateSwitchover.setStatus('current')
if mibBuilder.loadTexts: hwGroupStateSwitchover.setDescription("When there are members' state change, current work group may change from master group to backup group or from backup group to master group.")
hwMemberConnectionFull = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 4)).setObjects(("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwConnectionNum"))
if mibBuilder.loadTexts: hwMemberConnectionFull.setStatus('current')
if mibBuilder.loadTexts: hwMemberConnectionFull.setDescription('The connections for the physical member attains threshold.')
hwMemberConnectionFullRestore = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 5)).setObjects(("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwConnectionNum"))
if mibBuilder.loadTexts: hwMemberConnectionFullRestore.setStatus('current')
if mibBuilder.loadTexts: hwMemberConnectionFullRestore.setDescription('The connections for the physical member restores to normal.')
hwMemberInstanceConnectionFull = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 6)).setObjects(("HUAWEI-SLB-MIB", "hwGroupName"), ("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwConnectionNum"))
if mibBuilder.loadTexts: hwMemberInstanceConnectionFull.setStatus('current')
if mibBuilder.loadTexts: hwMemberInstanceConnectionFull.setDescription('The connections for the group member attains threshold.')
hwMemberInstanceConnectionFullRestore = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 7)).setObjects(("HUAWEI-SLB-MIB", "hwGroupName"), ("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwConnectionNum"))
if mibBuilder.loadTexts: hwMemberInstanceConnectionFullRestore.setStatus('current')
if mibBuilder.loadTexts: hwMemberInstanceConnectionFullRestore.setDescription('The connections for the group member restores nomal.')
hwProbeInstanceStateUp = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 8)).setObjects(("HUAWEI-SLB-MIB", "hwGroupName"), ("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwProbeName"), ("HUAWEI-SLB-MIB", "hwProbeType"), ("HUAWEI-SLB-MIB", "hwIpAddress"), ("HUAWEI-SLB-MIB", "hwPort"))
if mibBuilder.loadTexts: hwProbeInstanceStateUp.setStatus('current')
if mibBuilder.loadTexts: hwProbeInstanceStateUp.setDescription('Probe is used to check load balance member health, when there are expected response from load balance member, probe state will change to up.')
hwProbeInstanceStateDown = NotificationType((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 2, 9)).setObjects(("HUAWEI-SLB-MIB", "hwGroupName"), ("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwProbeName"), ("HUAWEI-SLB-MIB", "hwProbeType"), ("HUAWEI-SLB-MIB", "hwIpAddress"), ("HUAWEI-SLB-MIB", "hwPort"))
if mibBuilder.loadTexts: hwProbeInstanceStateDown.setStatus('current')
if mibBuilder.loadTexts: hwProbeInstanceStateDown.setDescription('Probe is used to check load balance member health, when there are no expected response from load balance member, probe state will change to down.')
hwSlbConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 3))
hwSlbGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 3, 1))
hwSlbTrapObjectsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 3, 1, 1)).setObjects(("HUAWEI-SLB-MIB", "hwIpAddress"), ("HUAWEI-SLB-MIB", "hwMemberName"), ("HUAWEI-SLB-MIB", "hwGroupName"), ("HUAWEI-SLB-MIB", "hwPort"), ("HUAWEI-SLB-MIB", "hwProbeName"), ("HUAWEI-SLB-MIB", "hwProbeType"), ("HUAWEI-SLB-MIB", "hwConnectionNum"), ("HUAWEI-SLB-MIB", "hwActionName"), ("HUAWEI-SLB-MIB", "hwMasterGroup"), ("HUAWEI-SLB-MIB", "hwMasterGroupActiveNum"), ("HUAWEI-SLB-MIB", "hwMasterGroupTotalNum"), ("HUAWEI-SLB-MIB", "hwBackupGroup"), ("HUAWEI-SLB-MIB", "hwBackupGroupActiveNum"), ("HUAWEI-SLB-MIB", "hwBackupGroupTotalNum"), ("HUAWEI-SLB-MIB", "hwCurWorkGroupName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSlbTrapObjectsGroup = hwSlbTrapObjectsGroup.setStatus('current')
if mibBuilder.loadTexts: hwSlbTrapObjectsGroup.setDescription('The group of objects that comprise SlbTrapObjects.')
hwSlbNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 3, 1, 2)).setObjects(("HUAWEI-SLB-MIB", "hwMemberInstanceStateUp"), ("HUAWEI-SLB-MIB", "hwMemberInstanceStateDown"), ("HUAWEI-SLB-MIB", "hwGroupStateSwitchover"), ("HUAWEI-SLB-MIB", "hwMemberInstanceConnectionFull"), ("HUAWEI-SLB-MIB", "hwMemberInstanceConnectionFullRestore"), ("HUAWEI-SLB-MIB", "hwProbeInstanceStateUp"), ("HUAWEI-SLB-MIB", "hwProbeInstanceStateDown"), ("HUAWEI-SLB-MIB", "hwMemberConnectionFull"), ("HUAWEI-SLB-MIB", "hwMemberConnectionFullRestore"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSlbNotificationsGroup = hwSlbNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts: hwSlbNotificationsGroup.setDescription('The group of objects that comprise SlbNotifications.')
hwSlbCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 3, 2))
hwSlbCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 225, 1, 3, 2, 1)).setObjects(("HUAWEI-SLB-MIB", "hwSlbTrapObjectsGroup"), ("HUAWEI-SLB-MIB", "hwSlbNotificationsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwSlbCompliance = hwSlbCompliance.setStatus('current')
if mibBuilder.loadTexts: hwSlbCompliance.setDescription('The compliance statement for entities implementing Huawei server load balance MIB.')
mibBuilder.exportSymbols("HUAWEI-SLB-MIB", hwSlbCompliances=hwSlbCompliances, hwMemberInstanceConnectionFullRestore=hwMemberInstanceConnectionFullRestore, hwMemberName=hwMemberName, hwConnectionNum=hwConnectionNum, hwSlbMibObjects=hwSlbMibObjects, hwSlbNotifications=hwSlbNotifications, hwIpAddress=hwIpAddress, hwBackupGroupTotalNum=hwBackupGroupTotalNum, hwMemberInstanceConnectionFull=hwMemberInstanceConnectionFull, hwProbeInstanceStateDown=hwProbeInstanceStateDown, hwSlbNotificationsGroup=hwSlbNotificationsGroup, PYSNMP_MODULE_ID=hwSLBMIB, hwSlbGroups=hwSlbGroups, hwBackupGroup=hwBackupGroup, hwCurWorkGroupName=hwCurWorkGroupName, hwSlbCompliance=hwSlbCompliance, hwMasterGroupTotalNum=hwMasterGroupTotalNum, hwSlbTrapObjects=hwSlbTrapObjects, hwProbeType=hwProbeType, hwGroupStateSwitchover=hwGroupStateSwitchover, hwMemberConnectionFull=hwMemberConnectionFull, hwSlbConformance=hwSlbConformance, hwMemberInstanceStateUp=hwMemberInstanceStateUp, hwMemberInstanceStateDown=hwMemberInstanceStateDown, hwMemberConnectionFullRestore=hwMemberConnectionFullRestore, hwProbeInstanceStateUp=hwProbeInstanceStateUp, hwSLBMIB=hwSLBMIB, hwBackupGroupActiveNum=hwBackupGroupActiveNum, hwSlbTrapObjectsGroup=hwSlbTrapObjectsGroup, hwMasterGroupActiveNum=hwMasterGroupActiveNum, hwActionName=hwActionName, hwPort=hwPort, hwGroupName=hwGroupName, hwProbeName=hwProbeName, hwMasterGroup=hwMasterGroup)
| 149.896552
| 1,395
| 0.774327
|
28050dc3ca0d272aa33d4ced78c7558b90699a16
| 9,249
|
py
|
Python
|
certipy/template.py
|
wisdark/Certipy
|
5d95b01c46263ede8e04057c5ebdab9b38b083a0
|
[
"MIT"
] | 367
|
2021-10-21T19:23:47.000Z
|
2022-03-30T21:55:05.000Z
|
certipy/template.py
|
wisdark/Certipy
|
5d95b01c46263ede8e04057c5ebdab9b38b083a0
|
[
"MIT"
] | 38
|
2021-10-18T01:31:43.000Z
|
2022-03-14T08:21:22.000Z
|
certipy/template.py
|
wisdark/Certipy
|
5d95b01c46263ede8e04057c5ebdab9b38b083a0
|
[
"MIT"
] | 58
|
2021-11-03T09:40:04.000Z
|
2022-03-26T23:17:35.000Z
|
import argparse
import json
import logging
from typing import Callable, Dict, Tuple
import ldap3
from ldap3.protocol.microsoft import security_descriptor_control
from certipy import target
from certipy.ldap import LDAPConnection, LDAPEntry
from certipy.target import Target
NAME = "template"
PROTECTED_ATTRIBUTES = [
"objectClass",
"cn",
"distinguishedName",
"whenCreated",
"whenChanged",
"name",
"objectGUID",
"objectCategory",
"dSCorePropagationData",
"msPKI-Cert-Template-OID",
"uSNCreated",
"uSNChanged",
"displayName",
"instanceType",
"revision",
"msPKI-Template-Schema-Version",
"msPKI-Template-Minor-Revision",
]
# SubCA template configuration with full control for 'Authenticated Users'
CONFIGURATION_TEMPLATE = {
"showInAdvancedViewOnly": [b"TRUE"],
"nTSecurityDescriptor": [
b"\x01\x00\x04\x9c0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00\x02\x00\x1c\x00\x01\x00\x00\x00\x00\x00\x14\x00\xff\x01\x0f\x00\x01\x01\x00\x00\x00\x00\x00\x05\x0b\x00\x00\x00\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00\x00\x00\xc8\xa3\x1f\xdd\xe9\xba\xb8\x90,\xaes\xbb\xf4\x01\x00\x00" # Authenticated Users - Full Control
],
"flags": [b"131793"],
"pKIDefaultKeySpec": [b"2"],
"pKIKeyUsage": [b"\x86\x00"],
"pKIMaxIssuingDepth": [b"-1"],
"pKICriticalExtensions": [b"2.5.29.19", b"2.5.29.15"],
"pKIExpirationPeriod": [b"\x00@\x1e\xa4\xe8e\xfa\xff"],
"pKIOverlapPeriod": [b"\x00\x80\xa6\n\xff\xde\xff\xff"],
"pKIDefaultCSPs": [b"1,Microsoft Enhanced Cryptographic Provider v1.0"],
"msPKI-RA-Signature": [b"0"],
"msPKI-Enrollment-Flag": [b"0"],
"msPKI-Private-Key-Flag": [b"16842768"],
"msPKI-Certificate-Name-Flag": [b"1"],
"msPKI-Minimal-Key-Size": [b"2048"],
}
class Template:
def __init__(
self,
target: "Target",
template: str = None,
configuration: str = None,
save_old: bool = False,
scheme: str = "ldaps",
connection: LDAPConnection = None,
**kwargs,
):
self.target = target
self.template_name = template
self.configuration = configuration
self.save_old = save_old
self.scheme = scheme
self.kwargs = kwargs
self._connection = connection
@property
def connection(self) -> LDAPConnection:
if self._connection is not None:
return self._connection
self._connection = LDAPConnection(self.target, self.scheme)
self._connection.connect()
return self._connection
def configuration_to_json(self, configuration: dict) -> str:
output = {}
for key, value in configuration.items():
if key in PROTECTED_ATTRIBUTES:
continue
if type(value) == list:
output[key] = list(map(lambda x: x.hex(), value))
else:
output[key] = value.hex()
return json.dumps(output)
def get_configuration(self, template) -> LDAPEntry:
results = self.connection.search(
"(&(cn=%s)(objectClass=pKICertificateTemplate))" % template,
search_base=self.connection.configuration_path,
query_sd=True,
)
if len(results) == 0:
results = self.connection.search(
"(&(displayName=%s)(objectClass=pKICertificateTemplate))" % template,
search_base=self.connection.configuration_path,
query_sd=True,
)
if len(results) == 0:
logging.error(
"Could not find any certificate template for %s" % repr(template)
)
return None
if len(results) > 1:
# This should never happen, but just in case
logging.error(
"Found multiple certificate templates identified by %s" % repr(template)
)
return None
template = results[0]
return template
def json_to_configuration(self, configuration_json: str) -> Dict:
output = {}
for key, value in configuration_json.items():
if key in PROTECTED_ATTRIBUTES:
continue
if type(value) == list:
output[key] = list(map(lambda x: bytes.fromhex(x), value))
else:
output[key] = bytes.fromhex(value)
return output
def load_configuration(self, configuration: str) -> Dict:
with open(configuration, "r") as f:
configuration_json = json.load(f)
return self.json_to_configuration(configuration_json)
def set_configuration(self) -> bool:
if self.template_name is None:
logging.error("A template (-template) is required")
return False
if self.configuration is not None:
new_configuration = self.load_configuration(self.configuration)
else:
new_configuration = CONFIGURATION_TEMPLATE
old_configuration = self.get_configuration(self.template_name)
if old_configuration is None:
return False
if self.save_old:
old_configuration_json = self.configuration_to_json(
old_configuration["raw_attributes"]
)
out_file = "%s.json" % old_configuration.get("cn")
with open(out_file, "w") as f:
f.write(old_configuration_json)
logging.info(
"Saved old configuration for %s to %s"
% (repr(self.template_name), repr(out_file))
)
changes = {}
for key in old_configuration["raw_attributes"].keys():
if key in PROTECTED_ATTRIBUTES:
continue
if key not in new_configuration:
changes[key] = [
(
ldap3.MODIFY_DELETE,
[],
)
]
pass
if key in new_configuration:
old_values = old_configuration.get_raw(key)
new_values = new_configuration[key]
if all(list(map(lambda x: x in new_values, old_values))):
continue
changes[key] = [
(
ldap3.MODIFY_REPLACE,
new_configuration[key],
)
]
for key, value in new_configuration.items():
if (
key in changes
or key in PROTECTED_ATTRIBUTES
or key in old_configuration["raw_attributes"]
):
continue
changes[key] = [
(
ldap3.MODIFY_ADD,
value,
)
]
if len(changes.keys()) == 0:
logging.warning(
"New configuration is the same as old configuration. Not updating"
)
return False
logging.info(
"Updating certificate template %s" % repr(old_configuration.get("cn"))
)
result = self.connection.modify(
old_configuration.get("distinguishedName"),
changes,
controls=security_descriptor_control(sdflags=0x4),
)
if result["result"] == 0:
logging.info("Successfully updated %s" % repr(old_configuration.get("cn")))
return True
elif result["result"] == ldap3.core.results.RESULT_INSUFFICIENT_ACCESS_RIGHTS:
logging.error(
"User %s doesn't have permission to update these attributes on %s"
% (repr(self.target.username), repr(old_configuration.get("cn")))
)
else:
logging.error("Got error: %s" % result["message"])
def entry(options: argparse.Namespace) -> None:
target = Target.from_options(options)
del options.target
template = Template(target=target, **vars(options))
template.set_configuration()
def add_subparser(subparsers: argparse._SubParsersAction) -> Tuple[str, Callable]:
subparser = subparsers.add_parser(NAME, help="Manage certificate templates")
subparser.add_argument(
"-template", action="store", metavar="template name", required=True
)
subparser.add_argument("-debug", action="store_true", help="Turn debug output on")
group = subparser.add_argument_group("configuration options")
group.add_argument(
"-configuration",
action="store",
metavar="configuration file",
help="Configuration to apply to the certificate template. If omitted, a default vulnerable configuration (ESC1) will be applied. Useful for restoring an old configuration",
)
group.add_argument(
"-save-old",
action="store_true",
help="Save the old configuration",
)
group = subparser.add_argument_group("connection options")
group.add_argument(
"-scheme",
action="store",
metavar="ldap scheme",
choices=["ldap", "ldaps"],
default="ldaps",
)
target.add_argument_group(subparser, connection_options=group)
return NAME, entry
| 32.114583
| 344
| 0.583523
|
442ecbb0683c73c8f2d5e9b61ef8b65b4e4101ba
| 8,798
|
py
|
Python
|
argoverse/visualization/vis_mask.py
|
gargrohin/argoverse-api
|
6f695673b3ee5783955d0f9c72dedb95a8013fe1
|
[
"MIT"
] | 560
|
2019-06-20T00:15:25.000Z
|
2022-03-29T15:21:31.000Z
|
argoverse/visualization/vis_mask.py
|
gargrohin/argoverse-api
|
6f695673b3ee5783955d0f9c72dedb95a8013fe1
|
[
"MIT"
] | 200
|
2019-06-20T05:21:35.000Z
|
2022-03-27T01:30:00.000Z
|
argoverse/visualization/vis_mask.py
|
gargrohin/argoverse-api
|
6f695673b3ee5783955d0f9c72dedb95a8013fe1
|
[
"MIT"
] | 193
|
2019-06-19T20:51:23.000Z
|
2022-03-24T08:46:24.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# <Modifications copyright (C) 2019, Argo AI, LLC>
"""
This tool is loosely based off of Facebook's Mask R-CNN visualization tool.
https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/vis.py
"""
import os
from typing import Any, List, Optional, Sequence, Tuple, Union
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
from argoverse.visualization.colormap import colormap
plt.rcParams["pdf.fonttype"] = 42 # For editing in Adobe Illustrator
_GRAY = (218, 227, 218)
_GREEN = (18, 127, 15)
_WHITE = (255, 255, 255)
Segment = Tuple[float, float, float, float]
def vis_mask(image: np.ndarray, mask: np.ndarray, color: Union[float, np.ndarray], alpha: float = 0.4) -> np.ndarray:
"""Visualize a single binary mask by blending a colored mask with image.
Args:
image: The input image (either RGB or BGR) w/ values in the [0,255] range
mask: The mask to visualize. Integer array, with values in [0,1]
representing mask region
color: The color for the mask, either single float or length 3 array
of integers in [0,255] representing RGB or BGR values
alpha: The alpha level for the mask. Represents blending coefficient
(higher alpha shows more of mask, lower alpha preserves original image)
Returns:
The modified 3-color image. Represents a blended image
of original RGB image and specified colors in mask region.
"""
image = image.astype(np.float32)
idx = np.nonzero(mask)
image[idx[0], idx[1], :] *= 1.0 - alpha
image[idx[0], idx[1], :] += alpha * color
return image.astype(np.uint8)
def vis_class(
image: np.ndarray,
pos: Tuple[float, float],
class_str: str,
font_scale: float = 50.0,
) -> np.ndarray:
"""Visualizes a class.
Args:
image: The image
pos: The position for the text
class_str: The name of the class
font_scale: Text size
Returns:
The modified image
"""
image = image.astype(np.uint8)
x0, y0 = int(pos[0]), int(pos[1])
# Compute text size.
txt = class_str
font = cv2.FONT_HERSHEY_SIMPLEX
((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
# Show text.
txt_tl = x0, y0 + int(0.3 * txt_h)
cv2.putText(image, txt, txt_tl, font, font_scale, _WHITE, lineType=cv2.LINE_AA)
return image
def vis_bbox(image: np.ndarray, bbox: Tuple[int, int, int, int], thickness: int = 1) -> np.ndarray:
"""Visualize a bounding box.
Args:
image: The input image
bbox: Bounding box
thickness: Line thickness
Returns:
The modified image
"""
image = image.astype(np.uint8)
x0, y0, w, h = bbox
x1, y1 = int(x0 + w), int(y0 + h)
x0, y0 = int(x0), int(y0)
cv2.rectangle(image, (x0, y0), (x1, y1), _GREEN, thickness=thickness)
return image
def decode_segment_to_mask(segm: Segment, image: np.ndarray) -> np.ndarray:
"""Create a mask from a segment
Args:
segm: The segment
image: The associated image
Returns:
A mask built from the given segment and image
"""
xmin, ymin, xmax, ymax = segm
mask = np.zeros((image.shape[0], image.shape[1]))
mask[int(ymin) : int(ymax), int(xmin) : int(xmax)] = 1
return mask
def vis_one_image_opencv(
image: np.ndarray,
boxes: np.ndarray,
segms: Optional[Sequence[Segment]] = None,
show_box: bool = False,
show_class: bool = True,
) -> np.ndarray:
"""Constructs a numpy array with the detections visualized.
Args:
image: The image data
boxes: The box data
segms: Segmentations
show_box: Whether to show the boxes
show_class: Whether to show the object classes
Return:
The newly constructed image
"""
if boxes is None or boxes.shape[0] == 0:
return image
if segms:
color_list = colormap()
mask_color_id = 0
# Display in largest to smallest order to reduce occlusion
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
for i in sorted_inds:
bbox = boxes[i, :4]
boxes[i, -1]
# show box (off by default)
if show_box:
image = vis_bbox(image, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]))
# show class (off by default)
if show_class:
class_str = "hello"
image = vis_class(image, (bbox[0], bbox[1] - 2), class_str)
# show mask
if segms and len(segms) > i:
color_mask = color_list[mask_color_id % len(color_list), 0:3]
mask_color_id += 1
mask = decode_segment_to_mask(segms[i], image)
image = vis_mask(image, mask, color_mask)
return image
def vis_one_image(
image: np.ndarray,
image_name: str,
output_dir: str,
boxes: np.ndarray,
segms: Optional[Sequence[Segment]] = None,
dpi: int = 200,
box_alpha: float = 0.0,
show_class: bool = True,
extension: str = "pdf",
) -> None:
"""Visual debugging of detections.
Args:
image: The image data
image_name: The name of the image
output_dir: Directory to output to
boxes: Boxes
segms: Segmentations
dpi: DPI
box_alpha: Alpha channel of the boxes
show_class: Whether to show object classes
extension: Extension of the output file
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if boxes is None or boxes.shape[0] == 0:
return
color_list = colormap(rgb=True) / 255
plt.get_cmap("rainbow")
fig = plt.figure(frameon=False)
fig.set_size_inches(image.shape[1] / dpi, image.shape[0] / dpi)
ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0])
ax.axis("off")
fig.add_axes(ax)
ax.imshow(image)
sorted_inds: Union[List[Any], np.ndarray]
if boxes is None:
sorted_inds = [] # avoid crash when 'boxes' is None
else:
# Display in largest to smallest order to reduce occlusion
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
mask_color_id = 0
for i in sorted_inds:
bbox = boxes[i, :4]
# show box (off by default)
ax.add_patch(
plt.Rectangle(
(bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1],
fill=False,
edgecolor="g",
linewidth=0.5,
alpha=box_alpha,
)
)
if show_class:
ax.text(
bbox[0],
bbox[1] - 2,
"WHERE IS THE TEXT car",
fontsize=30,
family="serif",
bbox=dict(facecolor="g", alpha=0.4, pad=0, edgecolor="none"),
color="white",
)
# show mask
if segms is not None and len(segms) > i:
img = np.ones(image.shape)
color_mask = color_list[mask_color_id % len(color_list), 0:3]
mask_color_id += 1
w_ratio = 0.4
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
for c in range(3):
img[:, :, c] = color_mask[c]
e = decode_segment_to_mask(segms[i], image)
e = e.astype(np.uint8)
_, contours, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
for contour in contours:
polygon = Polygon(
contour.reshape((-1, 2)),
fill=True,
facecolor=color_mask,
edgecolor="w",
linewidth=1.2,
alpha=0.5,
)
ax.add_patch(polygon)
output_name = os.path.basename(image_name) + "." + extension
fig.savefig(os.path.join(output_dir, "{}".format(output_name)), dpi=dpi)
plt.close("all")
| 29.92517
| 117
| 0.586042
|
6d1a6cfb97d9d0862ed818d98542441dec556b12
| 433
|
py
|
Python
|
Desafio066.py
|
GabrielSanchesRosa/Python
|
3a129e27e076b2a91af03d68ede50b9c45c50217
|
[
"MIT"
] | null | null | null |
Desafio066.py
|
GabrielSanchesRosa/Python
|
3a129e27e076b2a91af03d68ede50b9c45c50217
|
[
"MIT"
] | null | null | null |
Desafio066.py
|
GabrielSanchesRosa/Python
|
3a129e27e076b2a91af03d68ede50b9c45c50217
|
[
"MIT"
] | null | null | null |
# Crie um programa que leia vários números inteiros pelo teclado. O programa só vai parar quando o usuário digitar 999, que é a condição de parada. No final mostres quantos números foram digitados e a soma entre eles (desconsiderando o flag).
soma = cont = 0
while True:
num = int(input("Digite um valor (999 para parar): "))
if num == 999:
break
cont += 1
soma += num
print(f"A soma dos {cont} valores foi {soma}.")
| 30.928571
| 242
| 0.704388
|
4e3645c305bedbea6ca72b2b0763cf19cf45067e
| 3,994
|
py
|
Python
|
sentiment_analysis.py
|
Krishna00111/Mining-Social-Media
|
34ebb196d82dbf1722baa2ecdc1790887eeea1bc
|
[
"Apache-2.0"
] | null | null | null |
sentiment_analysis.py
|
Krishna00111/Mining-Social-Media
|
34ebb196d82dbf1722baa2ecdc1790887eeea1bc
|
[
"Apache-2.0"
] | null | null | null |
sentiment_analysis.py
|
Krishna00111/Mining-Social-Media
|
34ebb196d82dbf1722baa2ecdc1790887eeea1bc
|
[
"Apache-2.0"
] | null | null | null |
import re
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob
import pandas as pd
#
# def clean_tweet(tweet):
# '''
# Utility function to clean tweet text by removing links, special characters
# using simple regex statements.
# '''
# return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])
# | (\w +:\ / \ / \S +)", " ", tweet).split())
def get_tweet_sentiment(tweet):
'''
Utility function to classify sentiment of passed tweet
using textblob's sentiment method
'''
# create TextBlob object of passed tweet text
analysis = TextBlob(tweet)
# set sentiment
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
def analyzeTweets(tweetsReceived):
'''
Main function to fetch tweets and parse them.
'''
# empty list to store parsed tweets
tweets = []
try:
# call twitter api to fetch tweets
#fetched_tweets = self.api.search(q=query, count=count)
fetched_tweets = tweetsReceived
# parsing tweets one by one
for tweet in fetched_tweets:
# empty dictionary to store required params of a tweet
parsed_tweet = {}
# saving text of tweet
parsed_tweet['text'] = tweet['text']
# saving sentiment of tweet
parsed_tweet['sentiment'] = get_tweet_sentiment(tweet['text'])
# appending parsed tweet to tweets list
if tweet.retweet_count > 0:
# if tweet has retweets, ensure that it is appended only once
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
# return parsed tweets
return tweets
except tweepy.TweepError as e:
# print error (if any)
print("Error : " + str(e))
def main():
# creating object of TwitterClient Class
#api = TwitterClient()
# calling function to get tweets
colnames = ['favorites', 'created_at', 'retweets', 'text']
# df = pd.read_csv('sap1.csv', usecols=colnames)
# df.describe()
df = pd.read_csv('cleaned_mention_tweets.csv', usecols=colnames)
tweets = df
tweets = analyzeTweets(tweets)
#tweets = api.get_tweets(query='Donald Trump', count=200)
# picking positive tweets from tweets
ptweets = [tweet for tweet in finaltweets if tweet['sentiment'] == 'positive']
# percentage of positive tweets
print("Positive tweets percentage: {} %".format(100 * len(ptweets) / len(finaltweets)))
# picking negative tweets from tweets
ntweets = [tweet for tweet in finaltweets if tweet['sentiment'] == 'negative']
# percentage of negative tweets
print("Negative tweets percentage: {} %".format(100 * len(ntweets) / len(finaltweets)))
# percentage of neutral tweets
print("Neutral tweets percentage: {} % \
".format(100 * len(tweets - ntweets - ptweets) / len(finaltweets)))
# printing first 5 positive tweets
print("\n\nPositive tweets:")
for tweet in ptweets[:10]:
print(tweet['text'])
# printing first 5 negative tweets
print("\n\nNegative tweets:")
for tweet in ntweets[:10]:
print(tweet['text'])
if __name__ == "__main__":
# calling main function
main()
finaltweets = []
for count in range(0, 20095):
parsed_tweet = {}
parsed_tweet['text'] = tweets['text'].values[count]
parsed_tweet['sentiment'] = get_tweet_sentiment(tweets['text'].values[count])
if tweets['retweets'].values[count] > 0:
if parsed_tweet not in finaltweets:
finaltweets.append(parsed_tweet)
else:
finaltweets.append(parsed_tweet)
| 33.563025
| 92
| 0.601903
|
32a6293d4731e30f83f197f03654a86e153f0d36
| 10,074
|
py
|
Python
|
gamestonk_terminal/options/yfinance_view.py
|
jbhurat/GamestonkTerminal
|
06957c27f3bea36028a242a68a4a1aabbb566577
|
[
"MIT"
] | 1
|
2021-07-25T20:34:29.000Z
|
2021-07-25T20:34:29.000Z
|
gamestonk_terminal/options/yfinance_view.py
|
TomiToivio/GamestonkTerminal
|
419c3691db220c467d2979b19ca308b3b800c0bd
|
[
"MIT"
] | 1
|
2022-02-10T06:49:37.000Z
|
2022-02-10T06:49:37.000Z
|
gamestonk_terminal/options/yfinance_view.py
|
TomiToivio/GamestonkTerminal
|
419c3691db220c467d2979b19ca308b3b800c0bd
|
[
"MIT"
] | null | null | null |
"""Yfinance options view"""
__docformat__ = "numpy"
import argparse
from bisect import bisect_left
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import yfinance as yf
import numpy as np
import gamestonk_terminal.feature_flags as gtff
import gamestonk_terminal.config_plot as cfp
from gamestonk_terminal.helper_funcs import (
plot_autoscale,
)
from gamestonk_terminal.options import op_helpers
def plot_oi(
calls: pd.DataFrame,
puts: pd.DataFrame,
ticker: str,
expiry: str,
ns_parser: argparse.Namespace,
):
"""Plot open interest
Parameters
----------
calls: pd.DataFrame
Dataframe of call options
puts: pd.DataFrame
Dataframe of put options
ticker: str
Ticker
expiry: str
Expiry date for options
ns_parser: argparse.Namespace
Parsed namespace
"""
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
if ns_parser.min == -1:
min_strike = 0.75 * current_price
else:
min_strike = ns_parser.min
if ns_parser.max == -1:
max_strike = 1.25 * current_price
else:
max_strike = ns_parser.max
if ns_parser.calls and ns_parser.puts:
print("Both flags selected, please select one", "\n")
return
call_oi = calls.set_index("strike")["openInterest"] / 1000
put_oi = puts.set_index("strike")["openInterest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"openInterest_x": "OI_call", "openInterest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
plt.style.use("classic")
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
if not ns_parser.calls:
put_oi.plot(
x="strike",
y="openInterest",
label="Puts",
ax=ax,
marker="o",
ls="-",
c="r",
)
if not ns_parser.puts:
call_oi.plot(
x="strike",
y="openInterest",
label="Calls",
ax=ax,
marker="o",
ls="-",
c="g",
)
ax.axvline(
current_price, lw=2, c="k", ls="--", label="Current Price", alpha=0.7
)
ax.axvline(max_pain, lw=3, c="k", label=f"Max Pain: {max_pain}", alpha=0.7)
ax.grid("on")
ax.set_xlabel("Strike Price")
ax.set_ylabel("Open Interest (1k) ")
ax.set_xlim(min_strike, max_strike)
if gtff.USE_ION:
plt.ion()
ax.set_title(f"Open Interest for {ticker.upper()} expiring {expiry}")
plt.legend(loc=0)
fig.tight_layout(pad=1)
plt.show()
plt.style.use("default")
print("")
def plot_vol(
calls: pd.DataFrame,
puts: pd.DataFrame,
ticker: str,
expiry: str,
ns_parser: argparse.Namespace,
):
"""Plot volume
Parameters
----------
calls: pd.DataFrame
Dataframe of call options
puts: pd.DataFrame
Dataframe of put options
ticker: str
Ticker
expiry: str
Expiry date for options
ns_parser: argparse.Namespace
Parsed namespace
"""
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
if ns_parser.min == -1:
min_strike = 0.75 * current_price
else:
min_strike = ns_parser.min
if ns_parser.max == -1:
max_strike = 1.25 * current_price
else:
max_strike = ns_parser.max
if ns_parser.calls and ns_parser.puts:
print("Both flags selected, please select one", "\n")
return
call_v = calls.set_index("strike")["volume"] / 1000
put_v = puts.set_index("strike")["volume"] / 1000
plt.style.use("classic")
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
if not ns_parser.calls:
put_v.plot(
x="strike",
y="volume",
label="Puts",
ax=ax,
marker="o",
ls="-",
c="r",
)
if not ns_parser.puts:
call_v.plot(
x="strike",
y="volume",
label="Calls",
ax=ax,
marker="o",
ls="-",
c="g",
)
ax.axvline(current_price, lw=2, c="k", ls="--", label="Current Price", alpha=0.7)
ax.grid("on")
ax.set_xlabel("Strike Price")
ax.set_ylabel("Volume (1k) ")
ax.set_xlim(min_strike, max_strike)
if gtff.USE_ION:
plt.ion()
ax.set_title(f"Volume for {ticker.upper()} expiring {expiry}")
plt.legend(loc=0)
fig.tight_layout(pad=1)
plt.show()
plt.style.use("default")
print("")
def plot_volume_open_interest(
ticker: str,
exp_date: str,
calls: pd.DataFrame,
puts: pd.DataFrame,
ns_parser: argparse.Namespace,
):
"""Plot volume and open interest
Parameters
----------
ticker : str
Main ticker to compare income
exp_date : str
Expiry date of the option
calls: pd.DataFrame
Option data calls
puts: pd.DataFrame
Option data puts
ns_parser: argparse.Namespace
Parsed namespace
"""
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
# Process Calls Data
df_calls = calls.pivot_table(
index="strike", values=["volume", "openInterest"], aggfunc="sum"
).reindex()
df_calls["strike"] = df_calls.index
df_calls["type"] = "calls"
df_calls["openInterest"] = df_calls["openInterest"]
df_calls["volume"] = df_calls["volume"]
df_calls["oi+v"] = df_calls["openInterest"] + df_calls["volume"]
df_calls["spot"] = round(current_price, 2)
df_puts = puts.pivot_table(
index="strike", values=["volume", "openInterest"], aggfunc="sum"
).reindex()
df_puts["strike"] = df_puts.index
df_puts["type"] = "puts"
df_puts["openInterest"] = df_puts["openInterest"]
df_puts["volume"] = -df_puts["volume"]
df_puts["openInterest"] = -df_puts["openInterest"]
df_puts["oi+v"] = df_puts["openInterest"] + df_puts["volume"]
df_puts["spot"] = round(current_price, 2)
call_oi = calls.set_index("strike")["openInterest"] / 1000
put_oi = puts.set_index("strike")["openInterest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"openInterest_x": "OI_call", "openInterest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
if ns_parser.min_vol == -1 and ns_parser.min_sp == -1 and ns_parser.max_sp == -1:
# If no argument provided, we use the percentile 50 to get 50% of upper volume data
volume_percentile_threshold = 50
min_vol_calls = np.percentile(df_calls["oi+v"], volume_percentile_threshold)
min_vol_puts = np.percentile(df_puts["oi+v"], volume_percentile_threshold)
df_calls = df_calls[df_calls["oi+v"] > min_vol_calls]
df_puts = df_puts[df_puts["oi+v"] < min_vol_puts]
else:
if ns_parser.min_vol > -1:
df_calls = df_calls[df_calls["oi+v"] > ns_parser.min_vol]
df_puts = df_puts[df_puts["oi+v"] < -ns_parser.min_vol]
if ns_parser.min_sp > -1:
df_calls = df_calls[df_calls["strike"] > ns_parser.min_sp]
df_puts = df_puts[df_puts["strike"] > ns_parser.min_sp]
if ns_parser.max_sp > -1:
df_calls = df_calls[df_calls["strike"] < ns_parser.max_sp]
df_puts = df_puts[df_puts["strike"] < ns_parser.max_sp]
if df_calls.empty and df_puts.empty:
print(
"The filtering applied is too strong, there is no data available for such conditions.\n"
)
return
# Initialize the matplotlib figure
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
# make x axis symmetric
axis_origin = max(abs(max(df_puts["oi+v"])), abs(max(df_calls["oi+v"])))
ax.set_xlim(-axis_origin, +axis_origin)
sns.set_style(style="darkgrid")
g = sns.barplot(
x="oi+v",
y="strike",
data=df_calls,
label="Calls: Open Interest",
color="lightgreen",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_calls,
label="Calls: Volume",
color="green",
orient="h",
)
g = sns.barplot(
x="oi+v",
y="strike",
data=df_puts,
label="Puts: Open Interest",
color="pink",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_puts,
label="Puts: Volume",
color="red",
orient="h",
)
# draw spot line
s = [float(strike.get_text()) for strike in ax.get_yticklabels()]
spot_index = bisect_left(s, current_price) # find where the spot is on the graph
spot_line = ax.axhline(spot_index, ls="--", color="dodgerblue", alpha=0.3)
# draw max pain line
max_pain_index = bisect_left(s, max_pain)
max_pain_line = ax.axhline(max_pain_index, ls="-", color="black", alpha=0.3)
max_pain_line.set_linewidth(3)
# format ticklabels without - for puts
g.set_xticks(g.get_xticks())
xlabels = [f"{x:,.0f}".replace("-", "") for x in g.get_xticks()]
g.set_xticklabels(xlabels)
plt.title(
f"{ticker} volumes for {exp_date} (open interest displayed only during market hours)"
)
ax.invert_yaxis()
_ = ax.legend()
handles, _ = ax.get_legend_handles_labels()
handles.append(spot_line)
handles.append(max_pain_line)
# create legend labels + add to graph
labels = [
"Calls open interest",
"Calls volume ",
"Puts open interest",
"Puts volume",
"Current stock price",
f"Max pain = {max_pain}",
]
plt.legend(handles=handles[:], labels=labels)
sns.despine(left=True, bottom=True)
if gtff.USE_ION:
plt.ion()
plt.show()
plt.style.use("default")
print("")
| 27.375
| 100
| 0.59589
|
24c11687e699dc5783df9228c2ba6577df76af2d
| 1,105
|
py
|
Python
|
usr/gre/gamelib.py
|
Bugnon/oc-2018
|
7961de5ba9923512bd50c579c37f1dadf070b692
|
[
"MIT"
] | 3
|
2018-09-20T12:16:48.000Z
|
2019-06-21T08:32:17.000Z
|
games/xxx/gamelib.py
|
Bugnon/oc-2018
|
7961de5ba9923512bd50c579c37f1dadf070b692
|
[
"MIT"
] | null | null | null |
games/xxx/gamelib.py
|
Bugnon/oc-2018
|
7961de5ba9923512bd50c579c37f1dadf070b692
|
[
"MIT"
] | 2
|
2018-09-20T11:55:05.000Z
|
2019-09-01T19:40:13.000Z
|
"""
gamelib is a SenseHAT game library module which provides
- color constants
- a function to display a color matrix
"""
from sense_hat import SenseHat
BLACK = (0, 0, 0)
LEMON = (255, 255, 128)
PINK = (255, 0, 128)
RED = (255, 0, 0)
MINT = (128, 255, 128)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
MAGENTA = (255, 0, 255)
CYAN = (0, 255, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
GRAY = (90,94,107)
CORAIL = (231, 62, 1)
BORDEAU = (91, 60, 19)
LIME = (220, 255, 0)
WHITE = (255, 255, 255)
def main():
"""Test function to display all available colors."""
sense = SenseHat()
colors = (BLACK, LEMON, PINK, RED, MINT, BLUE, GREEN, MAGENTA, CYAN, YELLOW,
ORANGE, GRAY, CORAIL, BORDEAU, LIME, WHITE)
n = len(colors)
print(n, 'colors')
for x in range(8):
for y in range(8):
(r, g, b) = colors[(y*8 + x) % n]
sense.set_pixel(x, y, r, g, b)
# Execute the main() function when the file is executed,
# but do not execute when the module is imported as a module.
print('module name =', __name__)
if __name__ == '__main__':
main()
| 24.555556
| 80
| 0.59819
|
325e329cfe779d43580df77eadd43e60fc338cfb
| 1,506
|
py
|
Python
|
tests/project/test_project_in_pc.py
|
tuxtof/calm-dsl
|
5af67435d8304b97e170a690068f2d5975e9bfe6
|
[
"Apache-2.0"
] | 37
|
2019-12-23T15:23:20.000Z
|
2022-03-15T11:12:11.000Z
|
tests/project/test_project_in_pc.py
|
gabybeitler/calm-dsl
|
bac453413cfcf800eef95d89d5a7323c83654a93
|
[
"Apache-2.0"
] | 144
|
2020-03-09T11:22:09.000Z
|
2022-03-28T21:34:09.000Z
|
tests/project/test_project_in_pc.py
|
gabybeitler/calm-dsl
|
bac453413cfcf800eef95d89d5a7323c83654a93
|
[
"Apache-2.0"
] | 46
|
2020-01-23T14:28:04.000Z
|
2022-03-09T04:17:10.000Z
|
import json
from calm.dsl.builtins import Project
from calm.dsl.builtins import Provider, Ref, read_local_file
DSL_CONFIG = json.loads(read_local_file(".tests/config.json"))
ACCOUNTS = DSL_CONFIG["ACCOUNTS"]
NTNX_ACCOUNT = ACCOUNTS["NUTANIX_PC"][0]
NTNX_ACCOUNT_NAME = NTNX_ACCOUNT["NAME"]
NTNX_SUBNET = NTNX_ACCOUNT["SUBNETS"][0]["NAME"]
NTNX_SUBNET_CLUSTER = NTNX_ACCOUNT["SUBNETS"][0]["CLUSTER"]
AWS_ACCOUNT = ACCOUNTS["AWS"][0]
AWS_ACCOUNT_NAME = AWS_ACCOUNT["NAME"]
AZURE_ACCOUNT = ACCOUNTS["AZURE"][0]
AZURE_ACCOUNT_NAME = AZURE_ACCOUNT["NAME"]
GCP_ACCOUNT = ACCOUNTS["GCP"][0]
GCP_ACCOUNT_NAME = GCP_ACCOUNT["NAME"]
VMWARE_ACCOUNT = ACCOUNTS["VMWARE"][0]
VMWARE_ACCOUNT_NAME = VMWARE_ACCOUNT["NAME"]
K8S_ACCOUNT = ACCOUNTS["K8S"][0]
K8S_ACCOUNT_NAME = K8S_ACCOUNT["NAME"]
USER = DSL_CONFIG["USERS"][0]
USER_NAME = USER["NAME"]
class TestDslProject(Project):
"""Sample DSL Project"""
providers = [
Provider.Ntnx(
account=Ref.Account(NTNX_ACCOUNT_NAME),
subnets=[Ref.Subnet(name=NTNX_SUBNET, cluster=NTNX_SUBNET_CLUSTER)],
),
Provider.Aws(account=Ref.Account(AWS_ACCOUNT_NAME)),
Provider.Azure(account=Ref.Account(AZURE_ACCOUNT_NAME)),
Provider.Gcp(account=Ref.Account(GCP_ACCOUNT_NAME)),
Provider.Vmware(account=Ref.Account(VMWARE_ACCOUNT_NAME)),
Provider.K8s(account=Ref.Account(K8S_ACCOUNT_NAME)),
]
users = [Ref.User(name=USER_NAME)]
quotas = {"vcpus": 1, "storage": 2, "memory": 1}
| 29.529412
| 80
| 0.714475
|
2be5148d8e6d50b037c4aff48cf1196b53d8f484
| 3,119
|
py
|
Python
|
tests/test_serve.py
|
freemo/makinage
|
bab1a276b7d3a15f69e8236a0837bc17470afcc5
|
[
"MIT"
] | 22
|
2020-03-16T13:41:59.000Z
|
2022-03-14T10:16:36.000Z
|
tests/test_serve.py
|
freemo/makinage
|
bab1a276b7d3a15f69e8236a0837bc17470afcc5
|
[
"MIT"
] | 9
|
2020-10-22T21:20:22.000Z
|
2022-03-26T20:50:23.000Z
|
tests/test_serve.py
|
maki-nage/makinage
|
4a4a4f12af24a57a58c7ea0ddd95dbfabbcc6896
|
[
"MIT"
] | null | null | null |
import os
import pytest
from collections import namedtuple
from rx.subject import Subject
pytest.importorskip("makinage.serve.serve")
from makinage.serve.serve import create_transform_functions, \
create_model_predict, infer, serve
from makinage.sample.serve import ZeroModel, predict_zero
import numpy as np
zero_model_dirname = os.path.join('assets', 'zero_mlflow_pyfunc.zip')
def test_create_transform_default():
c = {'config': {'serve': {}}}
t = create_transform_functions(c)
assert t.pre([1]) == ([1], np.array([1]))
assert t.post([1], [1]) == [(1, 1)]
def test_create_predict_default():
c = {'config': {'serve': {}}}
model = ZeroModel()
p = create_model_predict(model, c)
assert p is predict_zero
def test_create_predict_custom():
c = {'config': {'serve': {
'predict': 'makinage.sample.serve:predict',
'ratio': 2,
}}}
model = ZeroModel()
p = create_model_predict(model, c)
assert p(np.array([2])) == [(0.0, 4)]
def test_predict():
predict_count = 0
def _predict(i):
nonlocal predict_count
predict_count += len(i)
return np.array([i]) + 1
c = {'config': {'serve': {
'pre_transform': 'makinage.sample.serve:pre_transform',
'post_transform': 'makinage.sample.serve:post_transform',
'input_field': 'x', 'output_field': 'pred',
}}}
t = create_transform_functions(c)
data = [
{'x': 42},
]
actual_result = infer(data, t, _predict)
assert predict_count == 1
assert actual_result == [
{'x': 42, 'pred': [43]}
]
def test_serve():
config = Subject()
model = Subject()
data = Subject()
prediction, = serve(config, model, data)
actual_predictions = []
prediction.subscribe(on_next=actual_predictions.append)
config.on_next({'config': {'serve': {
'predict': 'makinage.sample.serve:predict',
'ratio': 2,
}}})
with open(zero_model_dirname, 'rb') as fd:
model_archive_data = fd.read()
model.on_next(model_archive_data)
data.on_next(1)
assert actual_predictions == [
(1, (0.0, 2))
]
# update config
actual_predictions.clear()
config.on_next({'config': {'serve': {
'predict': 'makinage.sample.serve:predict',
'ratio': 3,
}}})
data.on_next(1)
assert actual_predictions == [
(1, (0.0, 3))
]
def test_serve_batch():
config = Subject()
model = Subject()
data = Subject()
prediction, = serve(config, model, data)
actual_predictions = []
prediction.subscribe(on_next=actual_predictions.append)
config.on_next({'config': {'serve': {
'predict': 'makinage.sample.serve:predict',
'batch': 3,
'ratio': 2,
}}})
with open(zero_model_dirname, 'rb') as fd:
model_archive_data = fd.read()
model.on_next(model_archive_data)
data.on_next(1)
data.on_next(1)
assert actual_predictions == []
data.on_next(1)
assert actual_predictions == [
(1, (0.0, 2)),
(1, (0.0, 2)),
(1, (0.0, 2)),
]
| 23.628788
| 69
| 0.60436
|
cf7163e6a0a9948d3bc4445f04c0ba8868309291
| 5,185
|
py
|
Python
|
cli/tests/test_polypod/test_custom_resources/test_mx_job_crd.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
cli/tests/test_polypod/test_custom_resources/test_mx_job_crd.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | 1
|
2022-01-24T11:26:47.000Z
|
2022-03-18T23:17:58.000Z
|
cli/tests/test_polypod/test_custom_resources/test_mx_job_crd.py
|
polyaxon/cli
|
3543c0220a8a7c06fc9573cd2a740f8ae4930641
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.k8s.custom_resources.crd import get_custom_object
from polyaxon.lifecycle import V1Statuses
from polyaxon.polyflow import V1Notification, V1SchedulingPolicy
from polyaxon.polyflow.environment import V1Environment
from polyaxon.polyflow.termination import V1Termination
from polyaxon.polypod.custom_resources import get_mx_job_custom_resource
from tests.test_polypod.test_custom_resources.base_kubeflow import (
BaseKubeflowCRDTestCase,
)
class TestMXJobCRD(BaseKubeflowCRDTestCase):
def test_get_mx_job_custom_resource_with_no_workers(self):
termination = V1Termination(max_retries=5, ttl=10, timeout=10)
environment = V1Environment(
labels={"foo": "bar"},
annotations={"foo": "bar"},
node_selector={"foo": "bar"},
node_name="foo",
restart_policy="Never",
)
custom_object = {
"mxJobSpec": {"cleanPodPolicy": "All", "replicaSpecs": {}},
"termination": {
"backoffLimit": termination.max_retries,
"activeDeadlineSeconds": termination.timeout,
"ttlSecondsAfterFinished": termination.ttl,
},
"collectLogs": False,
"syncStatuses": False,
"notifications": [],
}
expected_crd = get_custom_object(
namespace="default",
resource_name="foo",
kind="Operation",
api_version="core.polyaxon.com/v1",
labels={"foo": "bar"},
custom_object=custom_object,
annotations={"foo": "long-foo-bar" * 300},
)
crd = get_mx_job_custom_resource(
namespace="default",
resource_name="foo",
scheduler=None,
worker=None,
server=None,
tuner=None,
tuner_tracker=None,
tuner_server=None,
mode=None,
clean_pod_policy=None,
scheduling_policy=None,
termination=termination,
collect_logs=False,
sync_statuses=False,
notifications=None,
labels=environment.labels,
annotations={"foo": "long-foo-bar" * 300},
)
assert crd == expected_crd
def test_get_mx_job_custom_resource(self):
termination = V1Termination(max_retries=5, ttl=10, timeout=10)
environment = V1Environment(
labels={"foo": "bar"},
annotations={"foo": "bar"},
node_selector={"foo": "bar"},
node_name="foo",
restart_policy="Never",
)
notifications = [V1Notification(connections=["test"], trigger=V1Statuses.DONE)]
tuner, tuner_replica_template = self.get_replica(environment)
worker, worker_replica_template = self.get_replica(environment)
template_spec = {
"cleanPodPolicy": "Running",
"schedulingPolicy": {"minAvailable": 1},
"jobMode": "MXTune",
"replicaSpecs": {
"Worker": worker_replica_template,
"Tuner": tuner_replica_template,
},
}
custom_object = {
"mxJobSpec": template_spec,
"termination": {
"backoffLimit": termination.max_retries,
"activeDeadlineSeconds": termination.timeout,
"ttlSecondsAfterFinished": termination.ttl,
},
"collectLogs": True,
"syncStatuses": True,
"notifications": [n.to_operator() for n in notifications],
}
expected_crd = get_custom_object(
namespace="default",
resource_name="foo",
kind="Operation",
api_version="core.polyaxon.com/v1",
labels={"foo": "bar"},
annotations={"foo": "bar"},
custom_object=custom_object,
)
crd = get_mx_job_custom_resource(
namespace="default",
resource_name="foo",
scheduler=None,
worker=worker,
server=None,
tuner=tuner,
tuner_tracker=None,
tuner_server=None,
mode="MXTune",
scheduling_policy=V1SchedulingPolicy(min_available=1),
clean_pod_policy="Running",
termination=termination,
labels=environment.labels,
annotations={"foo": "bar"},
notifications=notifications,
collect_logs=True,
sync_statuses=True,
)
assert crd == expected_crd
| 36.006944
| 87
| 0.593635
|
c621415aa32b05b48b210feb4d1a014dba422657
| 143
|
py
|
Python
|
source/tutorial/map/urls.py
|
sinjorjob/GeoDjango_demo_site
|
357e31119932d9a528d05faa9cdc18cf432bdea5
|
[
"MIT"
] | null | null | null |
source/tutorial/map/urls.py
|
sinjorjob/GeoDjango_demo_site
|
357e31119932d9a528d05faa9cdc18cf432bdea5
|
[
"MIT"
] | 1
|
2021-05-11T08:06:18.000Z
|
2021-05-11T08:06:18.000Z
|
source/tutorial/map/urls.py
|
sinjorjob/GeoDjango_demo_site
|
357e31119932d9a528d05faa9cdc18cf432bdea5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('map/', views.Home.as_view()),
]
| 14.3
| 39
| 0.706294
|
d33bb1f6f4e0a4ae6197804ba9c6b93dfb6a510f
| 787
|
py
|
Python
|
tests/unit/assets/asset.py
|
amaas-fintech/amaas-core-sdk-python
|
bd77884de6e5ab05d864638addeb4bb338a51183
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/assets/asset.py
|
amaas-fintech/amaas-core-sdk-python
|
bd77884de6e5ab05d864638addeb4bb338a51183
|
[
"Apache-2.0"
] | 8
|
2017-06-06T09:42:41.000Z
|
2018-01-16T10:16:16.000Z
|
tests/unit/assets/asset.py
|
amaas-fintech/amaas-core-sdk-python
|
bd77884de6e5ab05d864638addeb4bb338a51183
|
[
"Apache-2.0"
] | 8
|
2017-01-18T04:14:01.000Z
|
2017-12-01T08:03:10.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from amaascore.tools.generate_asset import generate_asset
from amaascore.tools.generate_asset import generate_bond
class AssetTest(unittest.TestCase):
def setUp(self):
self.longMessage = True # Print complete error message on failure
def tearDown(self):
pass
def test_AssetTypeDisplay(self):
asset = generate_asset()
bond = generate_bond()
self.assertEqual(asset.asset_type_display, 'Asset')
self.assertEqual(bond.asset_type_display, 'Bond Government')
self.assertEqual(asset.pricing_method(), 'Derived')
self.assertEqual(bond.pricing_method(), 'Derived')
if __name__ == '__main__':
unittest.main()
| 29.148148
| 82
| 0.728081
|
8c27f88fa0b6cfc6a4007e410ec0a994de8ebd3b
| 2,057
|
py
|
Python
|
Cryptography/RSA.py
|
LucasEmmes/pythonScripts
|
0316b355795cd7f99012321a94492d57af60dd8d
|
[
"MIT"
] | null | null | null |
Cryptography/RSA.py
|
LucasEmmes/pythonScripts
|
0316b355795cd7f99012321a94492d57af60dd8d
|
[
"MIT"
] | null | null | null |
Cryptography/RSA.py
|
LucasEmmes/pythonScripts
|
0316b355795cd7f99012321a94492d57af60dd8d
|
[
"MIT"
] | null | null | null |
import math
import random
def RSA_generate_keys(*args):
"""
Generates a pair of public and private RSA keys
PARAMS:
*args: ((int), (int)) tuple with two prime numbers that you wish to be used. Can be ommited, in which case it will generate a random set for you (not implemented yet)
RETURNS:
If *args were provided (int, int, int): (e, d, N) tuple used for encrypting / decrypting your messages
Otherwise (int, int, int, int, int): (e, d, N, p, q) tuple used for encrypting / decrypting your messages
"""
# p, q = prime1, prime2
if len(args) == 2:
p, q = args
else:
pass
# TODO: get numbers from prime genmerator
# N = pq
N = p*q
# phi(N) = (p-1)(q-1)
phi_N = (p-1)*(q-1)
# e = 1 < e < phi(N), coprime to N, phi(N)
e_candidates = []
for i in range(2, phi_N):
if (math.gcd(i, phi_N) == 1):
e_candidates.append(i)
# Pick a random e (optional)
e = e_candidates[random.randint(0, len(e_candidates)-1)]
# d = d*e % phi(N) = 1
d = 0
for i in range(phi_N):
if i*e % phi_N == 1:
d = i
break
if len(args) == 2:
return e, d, N
return p, q, e, d, N
def RSA_encrypt(e, N, m):
"""
Encrypts your message using the provided keys p, q
"""
if m > N:
print(f"Message is too large!\nPlease make sure it is less than {N}")
raise ValueError
return m**e % N
# m = c**d % n
def RSA_decrypt(d, N, c):
"""
Decrypts your message using the provided key d
"""
return c**d % N
# TESTING FROM HERE. REMOVE IF YOU DONT NEED IT
def RSA_demo():
m = 42069
p, q = 523, 541
print(f"Provided primes were p={p}, q={q}")
e, d, N = RSA_generate_keys(p, q)
print(f"e={e}, d={d}, N={N}")
ct = RSA_encrypt(e, N, m)
print(f"Message {m} was encrypted as {ct}")
pt = RSA_decrypt(d, N, ct)
print(f"Ciphertext {ct} was decrypted as {pt}")
pause = input("Press enter to exit")
RSA_demo()
| 24.783133
| 174
| 0.555664
|
96544f3fc9948d04ff94a6c0f147621c325996da
| 793
|
py
|
Python
|
algorithms/ar-binsrt/python2/bin_sort.py
|
NuclearCactus/FOSSALGO
|
eb66f3bdcd6c42c66e8fc7110a32ac021596ca66
|
[
"MIT"
] | 59
|
2018-09-11T17:40:25.000Z
|
2022-03-03T14:40:39.000Z
|
algorithms/ar-binsrt/python2/bin_sort.py
|
RitvikDayal/FOSSALGO
|
ae225a5fffbd78d0dff83fd7b178ba47bfd7a769
|
[
"MIT"
] | 468
|
2018-08-28T17:04:29.000Z
|
2021-12-03T15:16:34.000Z
|
algorithms/ar-binsrt/python2/bin_sort.py
|
RitvikDayal/FOSSALGO
|
ae225a5fffbd78d0dff83fd7b178ba47bfd7a769
|
[
"MIT"
] | 253
|
2018-08-28T17:08:51.000Z
|
2021-11-01T12:30:39.000Z
|
def insertionSort(b):
for i in xrange(1, len(b)):
up = b[i]
j = i - 1
while j >= 0 and b[j] > up:
b[j+1] = b[j]
j = j - 1
b[j+1] = up
return b
def bucketSort(x):
arr = []
slot_num = 10
for i in xrange(slot_num):
arr.append([])
for j in x:
index_b = int(slot_num * j)
arr[index_b].append(j)
for i in xrange(slot_num):
arr[i] = insertionSort(arr[i])
k = 0
for i in xrange(slot_num):
for j in xrange(len(arr[i])):
x[k] = arr[i][j]
k += 1
return x
def main():
x = [0.1534, 0.9493, 0.4969,
0.8888, 0.663, 0.525]
print "Sorted Array is"
for no in bucketSort(x):
print no
if __name__ == "__main__":
main()
| 20.333333
| 38
| 0.470366
|
96c137546871aad380aeffab231528fb3f4c1171
| 556
|
py
|
Python
|
ex01/funcoes/__init__.py
|
duartele/exerc-python
|
fc149a5bcd0686ba4cad87e95277658f9bbdc63b
|
[
"MIT"
] | null | null | null |
ex01/funcoes/__init__.py
|
duartele/exerc-python
|
fc149a5bcd0686ba4cad87e95277658f9bbdc63b
|
[
"MIT"
] | null | null | null |
ex01/funcoes/__init__.py
|
duartele/exerc-python
|
fc149a5bcd0686ba4cad87e95277658f9bbdc63b
|
[
"MIT"
] | null | null | null |
def leiaInt(msg):
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print('\033[31mERRO: por favor, digite um número válido.\033[m')
else:
return n
def linha(tam=42):
return '-' * tam
def cabecalho(txt):
print(linha())
print(txt.center(42))
print(linha())
def menu(lista):
cabecalho('MENU PRINCIPAL')
c = 1
for item in lista:
print(f'{c} - {item}')
c += 1
print(linha())
opc = leiaInt('Sua Opcao: ')
return opc
| 18.533333
| 76
| 0.526978
|
e30b07049440a270ba7d0ccecbc255befa29154e
| 4,249
|
py
|
Python
|
reference/ddtrace/contrib/falcon/middleware.py
|
stschenk/opentelemetry-python-contrib
|
28c1331e571d386baab74f5028e3268e4bfda4cd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
reference/ddtrace/contrib/falcon/middleware.py
|
stschenk/opentelemetry-python-contrib
|
28c1331e571d386baab74f5028e3268e4bfda4cd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-12-12T17:59:41.000Z
|
2020-12-12T18:54:03.000Z
|
reference/ddtrace/contrib/falcon/middleware.py
|
stschenk/opentelemetry-python-contrib
|
28c1331e571d386baab74f5028e3268e4bfda4cd
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-10-22T04:16:33.000Z
|
2020-10-22T04:16:33.000Z
|
import sys
from ddtrace.ext import SpanTypes, http as httpx
from ddtrace.http import store_request_headers, store_response_headers
from ddtrace.propagation.http import HTTPPropagator
from ...compat import iteritems
from ...constants import ANALYTICS_SAMPLE_RATE_KEY
from ...settings import config
class TraceMiddleware(object):
def __init__(self, tracer, service='falcon', distributed_tracing=True):
# store tracing references
self.tracer = tracer
self.service = service
self._distributed_tracing = distributed_tracing
def process_request(self, req, resp):
if self._distributed_tracing:
# Falcon uppercases all header names.
headers = dict((k.lower(), v) for k, v in iteritems(req.headers))
propagator = HTTPPropagator()
context = propagator.extract(headers)
# Only activate the new context if there was a trace id extracted
if context.trace_id:
self.tracer.context_provider.activate(context)
span = self.tracer.trace(
'falcon.request',
service=self.service,
span_type=SpanTypes.WEB,
)
# set analytics sample rate with global config enabled
span.set_tag(
ANALYTICS_SAMPLE_RATE_KEY,
config.falcon.get_analytics_sample_rate(use_global_config=True)
)
span.set_tag(httpx.METHOD, req.method)
span.set_tag(httpx.URL, req.url)
if config.falcon.trace_query_string:
span.set_tag(httpx.QUERY_STRING, req.query_string)
# Note: any request header set after this line will not be stored in the span
store_request_headers(req.headers, span, config.falcon)
def process_resource(self, req, resp, resource, params):
span = self.tracer.current_span()
if not span:
return # unexpected
span.resource = '%s %s' % (req.method, _name(resource))
def process_response(self, req, resp, resource, req_succeeded=None):
# req_succeded is not a kwarg in the API, but we need that to support
# Falcon 1.0 that doesn't provide this argument
span = self.tracer.current_span()
if not span:
return # unexpected
status = httpx.normalize_status_code(resp.status)
# Note: any response header set after this line will not be stored in the span
store_response_headers(resp._headers, span, config.falcon)
# FIXME[matt] falcon does not map errors or unmatched routes
# to proper status codes, so we we have to try to infer them
# here. See https://github.com/falconry/falcon/issues/606
if resource is None:
status = '404'
span.resource = '%s 404' % req.method
span.set_tag(httpx.STATUS_CODE, status)
span.finish()
return
err_type = sys.exc_info()[0]
if err_type is not None:
if req_succeeded is None:
# backward-compatibility with Falcon 1.0; any version
# greater than 1.0 has req_succeded in [True, False]
# TODO[manu]: drop the support at some point
status = _detect_and_set_status_error(err_type, span)
elif req_succeeded is False:
# Falcon 1.1+ provides that argument that is set to False
# if get an Exception (404 is still an exception)
status = _detect_and_set_status_error(err_type, span)
span.set_tag(httpx.STATUS_CODE, status)
# Emit span hook for this response
# DEV: Emit before closing so they can overwrite `span.resource` if they want
config.falcon.hooks._emit('request', span, req, resp)
# Close the span
span.finish()
def _is_404(err_type):
return 'HTTPNotFound' in err_type.__name__
def _detect_and_set_status_error(err_type, span):
"""Detect the HTTP status code from the current stacktrace and
set the traceback to the given Span
"""
if not _is_404(err_type):
span.set_traceback()
return '500'
elif _is_404(err_type):
return '404'
def _name(r):
return '%s.%s' % (r.__module__, r.__class__.__name__)
| 36.316239
| 86
| 0.645328
|
5dc0d12839d826d5b9cc07a16aa43d65508f75ad
| 27,516
|
py
|
Python
|
pgAdmin4/pgAdmin4/lib/python2.7/site-packages/pgadmin4/regression/python_test_utils/test_utils.py
|
Anillab/One-Minute-Pitch
|
123f7b2010d3ae0f031066db1bcfe6eda7a41e84
|
[
"MIT"
] | 4
|
2019-10-03T21:58:22.000Z
|
2021-02-12T13:33:32.000Z
|
pgAdmin4/pgAdmin4/lib/python2.7/site-packages/pgadmin4/regression/python_test_utils/test_utils.py
|
Anillab/One-Minute-Pitch
|
123f7b2010d3ae0f031066db1bcfe6eda7a41e84
|
[
"MIT"
] | 4
|
2020-01-22T13:45:12.000Z
|
2022-02-10T20:58:09.000Z
|
pgAdmin4/pgAdmin4/lib/python2.7/site-packages/pgadmin4/regression/python_test_utils/test_utils.py
|
Anillab/One-Minute-Pitch
|
123f7b2010d3ae0f031066db1bcfe6eda7a41e84
|
[
"MIT"
] | 1
|
2021-01-13T09:30:29.000Z
|
2021-01-13T09:30:29.000Z
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from __future__ import print_function
import traceback
import os
import sys
import uuid
import psycopg2
import sqlite3
from functools import partial
from testtools.testcase import clone_test_with_new_id
import config
import regression
from regression import test_setup
SERVER_GROUP = test_setup.config_data['server_group']
file_name = os.path.realpath(__file__)
def get_db_connection(db, username, password, host, port, sslmode="prefer"):
"""This function returns the connection object of psycopg"""
connection = psycopg2.connect(
database=db,
user=username,
password=password,
host=host,
port=port,
sslmode=sslmode
)
return connection
def login_tester_account(tester):
"""
This function login the test client using env variables email and password
:param tester: test client
:type tester: flask test client object
:return: None
"""
if os.environ['PGADMIN_SETUP_EMAIL'] and \
os.environ['PGADMIN_SETUP_PASSWORD']:
email = os.environ['PGADMIN_SETUP_EMAIL']
password = os.environ['PGADMIN_SETUP_PASSWORD']
tester.post('/login', data=dict(email=email, password=password),
follow_redirects=True)
else:
from regression.runtests import app_starter
print("Unable to login test client, email and password not found.",
file=sys.stderr)
_cleanup(tester, app_starter)
sys.exit(1)
def logout_tester_account(tester):
"""
This function logout the test account
:param tester: test client
:type tester: flask test client object
:return: None
"""
response = tester.get('/logout')
def get_config_data():
"""This function reads the server data from config_data"""
server_data = []
for srv in test_setup.config_data['server_credentials']:
if (not 'enabled' in srv) or srv['enabled']:
data = {"name": srv['name'],
"comment": srv['comment'],
"host": srv['host'],
"port": srv['db_port'],
"db": srv['maintenance_db'],
"username": srv['db_username'],
"db_password": srv['db_password'],
"role": "",
"sslmode": srv['sslmode'],
"tablespace_path": srv.get('tablespace_path', None)}
server_data.append(data)
return server_data
def write_node_info(key, node_info=None):
"""
This function append the node details to
:param key: dict key name to store node info
:type key: str
:param node_info: node details
:type node_info: dict
:return: node_info_dict
:rtype: dict
"""
node_info_dict = regression.node_info_dict
if node_info not in node_info_dict[key]:
node_info_dict[key].append(node_info)
def clear_node_info_dict():
"""This function used to clears the node_info_dict variable"""
node_info_dict = regression.node_info_dict
for node in node_info_dict:
del node_info_dict[node][:]
def create_database(server, db_name):
"""This function used to create database and returns the database id"""
try:
connection = get_db_connection(server['db'],
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute(
'''CREATE DATABASE "%s" TEMPLATE template0''' % db_name)
connection.set_isolation_level(old_isolation_level)
connection.commit()
# Get 'oid' from newly created database
pg_cursor.execute("SELECT db.oid from pg_database db WHERE"
" db.datname='%s'" % db_name)
oid = pg_cursor.fetchone()
db_id = ''
if oid:
db_id = oid[0]
connection.close()
return db_id
except Exception:
traceback.print_exc(file=sys.stderr)
def create_table(server, db_name, table_name):
"""
This function create the table in given database name
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param table_name: table name
:type table_name: str
:return: None
"""
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute(
'''CREATE TABLE "%s" (some_column VARCHAR, value NUMERIC, details VARCHAR)''' %
table_name)
pg_cursor.execute(
'''INSERT INTO "%s" VALUES ('Some-Name', 6, 'some info')''' % table_name)
pg_cursor.execute(
'''INSERT INTO "%s" VALUES ('Some-Other-Name', 22, 'some other info')''' % table_name)
pg_cursor.execute(
'''INSERT INTO "%s" VALUES ('Yet-Another-Name', 14, 'cool info')''' % table_name)
connection.set_isolation_level(old_isolation_level)
connection.commit()
except Exception:
traceback.print_exc(file=sys.stderr)
def create_table_with_query(server, db_name, query):
"""
This function create the table in given database name
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param query: create table query
:type query: str
:return: None
"""
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute(query)
connection.set_isolation_level(old_isolation_level)
connection.commit()
except Exception:
traceback.print_exc(file=sys.stderr)
def create_constraint(
server, db_name, table_name,
constraint_type="unique", constraint_name="test_unique"):
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute('''
ALTER TABLE "%s"
ADD CONSTRAINT "%s" %s (some_column)
''' % (table_name, constraint_name, constraint_type.upper())
)
connection.set_isolation_level(old_isolation_level)
connection.commit()
except Exception:
traceback.print_exc(file=sys.stderr)
def create_debug_function(server, db_name, function_name="test_func"):
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute('''
CREATE OR REPLACE FUNCTION public."%s"()
RETURNS text
LANGUAGE 'plpgsql'
COST 100.0
VOLATILE
AS $function$
BEGIN
RAISE INFO 'This is a test function';
RAISE NOTICE '<img src="x" onerror="console.log(1)">';
RAISE NOTICE '<h1 onmouseover="console.log(1);">';
RETURN 'Hello, pgAdmin4';
END;
$function$;
''' % (function_name)
)
connection.set_isolation_level(old_isolation_level)
connection.commit()
except Exception:
traceback.print_exc(file=sys.stderr)
def drop_debug_function(server, db_name, function_name="test_func"):
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute('''
DROP FUNCTION public."%s"();
''' % (function_name)
)
connection.set_isolation_level(old_isolation_level)
connection.commit()
except Exception:
traceback.print_exc(file=sys.stderr)
def create_role(server, db_name, role_name="test_role"):
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute('''
CREATE USER "%s" WITH
LOGIN
NOSUPERUSER
INHERIT
CREATEDB
NOCREATEROLE
NOREPLICATION
''' % (role_name)
)
connection.set_isolation_level(old_isolation_level)
connection.commit()
except Exception:
traceback.print_exc(file=sys.stderr)
def drop_role(server, db_name, role_name="test_role"):
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
pg_cursor.execute('''
DROP USER "%s"
''' % (role_name)
)
connection.set_isolation_level(old_isolation_level)
connection.commit()
except Exception:
traceback.print_exc(file=sys.stderr)
def drop_database(connection, database_name):
"""This function used to drop the database"""
if database_name not in ["postgres", "template1", "template0"]:
pg_cursor = connection.cursor()
if connection.server_version >= 90100:
pg_cursor.execute(
"SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity "
"WHERE pg_stat_activity.datname ='%s' AND pid <> pg_backend_pid();" % database_name
)
else:
pg_cursor.execute(
"SELECT pg_terminate_backend(procpid) FROM pg_stat_activity " \
"WHERE pg_stat_activity.datname ='%s' AND current_query='<IDLE>';" % database_name
)
pg_cursor.execute("SELECT * FROM pg_database db WHERE"
" db.datname='%s'" % database_name)
if pg_cursor.fetchall():
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor.execute('''DROP DATABASE "%s"''' % database_name)
connection.set_isolation_level(old_isolation_level)
connection.commit()
connection.close()
def drop_tablespace(connection):
"""This function used to drop the tablespace"""
pg_cursor = connection.cursor()
pg_cursor.execute("SELECT * FROM pg_tablespace")
table_spaces = pg_cursor.fetchall()
if table_spaces:
for table_space in table_spaces:
if table_space[0] not in ["pg_default", "pg_global"]:
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor.execute("DROP TABLESPACE %s" % table_space[0])
connection.set_isolation_level(old_isolation_level)
connection.commit()
connection.close()
def create_server(server):
"""This function is used to create server"""
try:
conn = sqlite3.connect(config.TEST_SQLITE_PATH)
# Create the server
cur = conn.cursor()
server_details = (1, SERVER_GROUP, server['name'], server['host'],
server['port'], server['db'], server['username'],
server['role'], server['sslmode'], server['comment'])
cur.execute('INSERT INTO server (user_id, servergroup_id, name, host, '
'port, maintenance_db, username, role, ssl_mode,'
' comment) VALUES (?,?,?,?,?,?,?,?,?,?)', server_details)
server_id = cur.lastrowid
conn.commit()
# Add server info to parent_node_dict
regression.parent_node_dict["server"].append({"server_id": server_id,
"server": server})
return server_id
except Exception as exception:
raise Exception("Error while creating server. %s" % exception)
def delete_server_with_api(tester, sid):
"""This function used to delete server from SQLite"""
try:
url = '/browser/server/obj/' + str(SERVER_GROUP) + "/"
# Call API to delete the server
response = tester.delete(url + str(sid))
except Exception:
traceback.print_exc(file=sys.stderr)
def add_db_to_parent_node_dict(srv_id, db_id, test_db_name):
""" This function stores the database details into parent dict """
regression.parent_node_dict["database"].append({"server_id": srv_id,
"db_id": db_id,
"db_name": test_db_name})
def add_schema_to_parent_node_dict(srv_id, db_id, schema_id, schema_name):
""" This function stores the schema details into parent dict """
regression.parent_node_dict["schema"].append({"server_id": srv_id,
"db_id": db_id,
"schema_id": schema_id,
"schema_name": schema_name})
def create_parent_server_node(server_info):
"""
This function create the test server which will act as parent server,
the other node will add under this server
:param server_info: server details
:type server_info: dict
:return: None
"""
srv_id = create_server(server_info)
# Create database
test_db_name = "test_db_%s" % str(uuid.uuid4())[1:6]
db_id = create_database(server_info, test_db_name)
add_db_to_parent_node_dict(srv_id, db_id, test_db_name)
# Create schema
schema_name = "test_schema_%s" % str(uuid.uuid4())[1:6]
connection = get_db_connection(test_db_name,
server_info['username'],
server_info['db_password'],
server_info['host'],
server_info['port'],
server_info['sslmode'])
schema = regression.schema_utils.create_schema(connection, schema_name)
add_schema_to_parent_node_dict(srv_id, db_id, schema[0],
schema[1])
def delete_test_server(tester):
""" This function use to delete test server """
try:
parent_node_dict = regression.parent_node_dict
test_servers = parent_node_dict["server"]
test_databases = parent_node_dict["database"]
test_roles = regression.node_info_dict["lrid"]
test_table_spaces = regression.node_info_dict["tsid"]
for test_server in test_servers:
srv_id = test_server["server_id"]
servers_dict = test_server["server"]
for database in test_databases:
connection = get_db_connection(servers_dict['db'],
servers_dict['username'],
servers_dict['db_password'],
servers_dict['host'],
servers_dict['port'],
servers_dict['sslmode'])
database_name = database["db_name"]
# Drop database
drop_database(connection, database_name)
for role in test_roles:
connection = get_db_connection(servers_dict['db'],
servers_dict['username'],
servers_dict['db_password'],
servers_dict['host'],
servers_dict['port'],
servers_dict['sslmode'])
# Delete role
regression.roles_utils.delete_role(connection,
role["role_name"])
for tablespace in test_table_spaces:
connection = get_db_connection(servers_dict['db'],
servers_dict['username'],
servers_dict['db_password'],
servers_dict['host'],
servers_dict['port'],
servers_dict['sslmode'])
# Delete tablespace
regression.tablespace_utils.delete_tablespace(
connection, tablespace["tablespace_name"])
# Delete server
delete_server_with_api(tester, srv_id)
except Exception:
traceback.print_exc(file=sys.stderr)
raise
def get_db_password(config_servers, name, host, db_port):
""" This function return the password of particular server """
db_password = ''
for srv in config_servers:
if (srv['name'], srv['host'], srv['db_port']) == (name, host, db_port):
db_password = srv['db_password']
return db_password
def get_db_server(sid):
"""
This function returns the SQLite database connection
:param sid: server id
:type sid: int
:return: db connection
"""
connection = ''
conn = sqlite3.connect(config.TEST_SQLITE_PATH)
cur = conn.cursor()
server = cur.execute('SELECT name, host, port, maintenance_db,'
' username, ssl_mode FROM server where id=%s' % sid)
server = server.fetchone()
if server:
name = server[0]
host = server[1]
db_port = server[2]
db_name = server[3]
username = server[4]
ssl_mode = server[5]
config_servers = test_setup.config_data['server_credentials']
# Get the db password from config file for appropriate server
db_password = get_db_password(config_servers, name, host, db_port)
if db_password:
# Drop database
connection = get_db_connection(db_name,
username,
db_password,
host,
db_port,
ssl_mode)
conn.close()
return connection
def remove_db_file():
"""This function use to remove SQLite DB file"""
if os.path.isfile(config.TEST_SQLITE_PATH):
os.remove(config.TEST_SQLITE_PATH)
def _cleanup(tester, app_starter):
"""This function use to cleanup the created the objects(servers, databases,
schemas etc) during the test suite run"""
try:
test_servers = regression.parent_node_dict["server"] + \
regression.node_info_dict["sid"]
test_databases = regression.parent_node_dict["database"] + \
regression.node_info_dict["did"]
test_table_spaces = regression.parent_node_dict["tablespace"] + \
regression.node_info_dict["tsid"]
test_roles = regression.parent_node_dict["role"] + \
regression.node_info_dict["lrid"]
# Drop databases
for database in test_databases:
connection = get_db_server(database["server_id"])
if connection:
drop_database(connection, database["db_name"])
# Delete table spaces
for tablespace in test_table_spaces:
connection = get_db_server(tablespace["server_id"])
if connection:
regression.tablespace_utils.delete_tablespace(
connection, tablespace["tablespace_name"])
# Delete roles
for role in test_roles:
connection = get_db_server(role["server_id"])
if connection:
regression.roles_utils.delete_role(connection,
role["role_name"])
# Delete servers
for server in test_servers:
delete_server_with_api(tester, server["server_id"])
except Exception:
traceback.print_exc(file=sys.stderr)
finally:
# Logout the test client
logout_tester_account(tester)
# Remove SQLite db file
remove_db_file()
if app_starter:
app_starter.stop_app()
def get_cleanup_handler(tester, app_starter):
"""This function use to bind variable to drop_objects function"""
return partial(_cleanup, tester, app_starter)
def apply_scenario(scenario, test):
"""Apply scenario to test.
:param scenario: A tuple (name, parameters) to apply to the test. The test
is cloned, its id adjusted to have (name) after it, and the parameters
dict is used to update the new test.
:param test: The test to apply the scenario to. This test is unaltered.
:return: A new test cloned from test, with the scenario applied.
"""
name, parameters = scenario
parameters["scenario_name"] = name
scenario_suffix = '(' + name + ')'
newtest = clone_test_with_new_id(test,
test.id() + scenario_suffix)
# Replace test description with test scenario name
test_desc = name
if test_desc is not None:
newtest_desc = test_desc
newtest.shortDescription = (lambda: newtest_desc)
for key, value in parameters.items():
setattr(newtest, key, value)
return newtest
# This method is overridden to catch passed test cases
def add_success(self, test):
"""
This function add the passed test cases in list i.e. TextTestResult.passed
:param self:TextTestResult class
:type self: TextTestResult object
:param test: test case
:type test: test case object
:return: None
"""
if self.showAll:
self.passed.append((test, "Passed"))
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def get_scenario_name(cases):
"""
This function filters the test cases from list of test cases and returns
the test cases list
:param cases: test cases
:type cases: dict
:return: test cases in dict
:rtype: dict
"""
test_cases_dict = {}
test_cases_dict_json = {}
for class_name, test_case_list in cases.items():
result = {class_name: []}
for case_name_dict in test_case_list:
key, value = list(case_name_dict.items())[0]
class_names_dict = dict(
(c_name, "") for scenario in result[class_name] for
c_name in scenario.keys())
if key not in class_names_dict:
result[class_name].append(case_name_dict)
test_cases_dict_json.update(result)
test_cases_list = list(dict((case, "") for test_case in test_case_list
for case in test_case))
test_cases_dict.update({class_name: test_cases_list})
return test_cases_dict, test_cases_dict_json
class Database:
"""
Temporarily create and connect to a database, tear it down at exit
example:
with Database(server, 'some_test_db') as (connection, database_name):
connection.cursor().execute(...)
"""
def __init__(self, server):
self.name = None
self.server = server
self.maintenance_connection = None
self.connection = None
def __enter__(self):
self.name = "test_db_{0}".format(str(uuid.uuid4())[0:7])
self.maintenance_connection = get_db_connection(
self.server['db'],
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode']
)
create_database(self.server, self.name)
self.connection = get_db_connection(
self.name,
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'],
self.server['sslmode']
)
return self.connection, self.name
def __exit__(self, type, value, traceback):
self.connection.close()
drop_database(self.maintenance_connection, self.name)
def get_timezone_without_dst(connection):
"""
Returns timezone when daylight savings is not observed.
DST starts at mid of March and ends on first week of November.
So when getting timezone without dst use date (2017-01-01) which do not
fall in dst range.
"""
timezone_no_dst_sql = """SELECT EXTRACT(
TIMEZONE FROM '2017-01-01 00:00:00'::timestamp with time zone);"""
pg_cursor = connection.cursor()
pg_cursor.execute(timezone_no_dst_sql)
return pg_cursor.fetchone()[0]
| 37.538881
| 99
| 0.562473
|
a90f1c0070f63992a7ce7980270dc13a3abefee4
| 1,155
|
py
|
Python
|
models/cog.py
|
nmarotte/MACS_VUBot
|
d7dff822c825dc95c96726048b60507ba0e69eea
|
[
"MIT"
] | null | null | null |
models/cog.py
|
nmarotte/MACS_VUBot
|
d7dff822c825dc95c96726048b60507ba0e69eea
|
[
"MIT"
] | null | null | null |
models/cog.py
|
nmarotte/MACS_VUBot
|
d7dff822c825dc95c96726048b60507ba0e69eea
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
from discord.ext.commands import Context
from models.client import MaxVUBot
class CommandCog(commands.Cog):
@commands.command()
async def pls_pin(self, ctx: Context):
message_reference: Optional[discord.MessageReference] = ctx.message.reference
if not message_reference:
await ctx.reply("Please use this command while replying on the message you wish to pin")
return
message_to_pin = message_reference.cached_message or await ctx.channel.fetch_message(
message_reference.message_id
)
await message_to_pin.pin(reason=f"Pinned by {ctx.author}")
@commands.command()
async def pls_unpin(self, ctx: Context):
message_reference: Optional[discord.MessageReference] = ctx.message.reference
if not message_reference:
await ctx.reply("Please use this command while replying on the message you wish to pin")
return
message_to_pin = message_reference.cached_message or await ctx.channel.fetch_message(
message_reference.message_id
)
await message_to_pin.unpin()
| 39.827586
| 100
| 0.703896
|
86e64b4e5d5dcafc9834d9e4ab967adc6499c3ea
| 1,454
|
py
|
Python
|
lib/extensions/package.py
|
renebentes/JoomlaProjects
|
30c07041b90fc51e4de3fb544b378854f71db2f1
|
[
"MIT"
] | 1
|
2015-04-19T10:36:18.000Z
|
2015-04-19T10:36:18.000Z
|
lib/extensions/package.py
|
renebentes/JoomlaProjects
|
30c07041b90fc51e4de3fb544b378854f71db2f1
|
[
"MIT"
] | null | null | null |
lib/extensions/package.py
|
renebentes/JoomlaProjects
|
30c07041b90fc51e4de3fb544b378854f71db2f1
|
[
"MIT"
] | 1
|
2015-01-14T02:10:55.000Z
|
2015-01-14T02:10:55.000Z
|
# coding: utf-8
import sublime
import os
st_version = int(sublime.version())
if st_version > 3000:
from JoomlaPack.lib import *
from JoomlaPack.lib.extensions.base import Base
from JoomlaPack.lib.inflector import *
else:
from lib import *
from lib.extensions.base import Base
from lib.inflector import *
class Package(Base):
'''
Implements the Joomla's Package of extensions.
'''
def __init__(self, content=None, inflector=English):
Base.__init__(self, inflector)
self.prefix = 'pkg_'
self.template_path = 'package'
if content is not None:
self.name = content
self.fullname = self.inflector.underscore(self.prefix + content)
else:
self.fullname = self.inflector.underscore(
Project().get_project_name())
self.name = self.inflector.humanize(self.fullname, prefix='pkg_')
self.path(os.path.join(Project().root(), self.fullname))
def rename(self):
name = self.inflector.humanize(self.name, self.prefix)
for root, dirs, files in os.walk(self.path):
for filename in files:
newname = filename.replace('{{package}}', name)
if newname != filename:
os.rename(os.path.join(root, filename),
os.path.join(root, newname))
def __str__(self):
return "JoomlaPack: Joomla Package"
| 28.509804
| 77
| 0.611417
|
a2294fa43b8e83f14bb40023000ad94b5e617e29
| 8,548
|
py
|
Python
|
simba/run_RF_model_old.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 172
|
2019-12-18T22:19:42.000Z
|
2022-03-29T01:58:25.000Z
|
simba/run_RF_model_old.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 165
|
2020-01-10T19:05:16.000Z
|
2022-03-31T16:08:36.000Z
|
simba/run_RF_model_old.py
|
justinshenk/simba
|
a58ccd0ceeda201c1452d186033ce6b25fbab564
|
[
"MIT"
] | 80
|
2019-12-20T00:01:43.000Z
|
2022-03-29T16:20:10.000Z
|
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
warnings.filterwarnings('ignore',category=DeprecationWarning)
import pandas as pd
import pickle
import numpy as np
import statistics
import os
from configparser import ConfigParser, MissingSectionHeaderError, NoOptionError, NoSectionError
from simba.drop_bp_cords import *
import glob
from simba.rw_dfs import *
warnings.simplefilter(action='ignore', category=FutureWarning)
def rfmodel(inifile):
config = ConfigParser()
configFile = str(inifile)
try:
config.read(configFile)
except MissingSectionHeaderError:
print('ERROR: Not a valid project_config file. Please check the project_config.ini path.')
projectPath = config.get('General settings', 'project_path')
csv_dir_in, csv_dir_out = os.path.join(projectPath, 'csv', 'features_extracted'), os.path.join(projectPath, 'csv', 'machine_results')
model_dir = config.get('SML settings', 'model_dir')
model_nos = config.getint('SML settings', 'No_targets')
poseEstimationBps = config.get('create ensemble settings', 'pose_estimation_body_parts')
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
vidInfPath = os.path.join(projectPath, 'logs', 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
try:
multiAnimalIDList = config.get('Multi animal IDs', 'id_list')
multiAnimalIDList = multiAnimalIDList.split(",")
if (multiAnimalIDList[0] != '') and (poseEstimationBps == 'user_defined'):
multiAnimalStatus = True
print('Applying settings for multi-animal tracking...')
else:
multiAnimalStatus = False
print('Applying settings for classical tracking...')
except NoSectionError:
multiAnimalIDList = ['']
multiAnimalStatus = False
print('Applying settings for classical tracking...')
bpHeaders = getBpHeaders(inifile)
model_paths, target_names, DTList, min_bout_list = ([], [], [], [])
target_names = []
fileCounter = 0
########### GET MODEL PATHS, NAMES, AND DISCRIMIINATION THRESHOLDS ###########
for i in range(model_nos):
currentModelPaths = 'model_path_' + str(i+1)
currentModelNames = 'target_name_' + str(i+1)
currentDT = 'threshold_' + str(i+1)
currMinBoutName = 'min_bout_' + str(i+1)
currentModelPaths = config.get('SML settings', currentModelPaths)
currentModelNames = config.get('SML settings', currentModelNames)
currentDT = config.getfloat('threshold_settings', currentDT)
currMinBout = config.getfloat('Minimum_bout_lengths', currMinBoutName)
DTList.append(currentDT)
min_bout_list.append(currMinBout)
model_paths.append(currentModelPaths)
target_names.append(currentModelNames)
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
print('Running ' + str(len(target_names)) + ' model(s) on ' + str(len(filesFound)) + ' video file(s).')
for currFile in filesFound:
currentFileName = os.path.basename(currFile)
fileCounter+=1
print('Analyzing video ' + str(fileCounter) + '/' + str(len(filesFound)) + '...')
inputFile = read_df(currFile, wfileType)
try:
inputFile = inputFile.set_index('scorer')
except KeyError:
pass
inputFile = inputFile.loc[:, ~inputFile.columns.str.contains('^Unnamed')]
inputFileOrganised = drop_bp_cords(inputFile, inifile)
currVidInfoDf = vidinfDf.loc[vidinfDf['Video'] == str(currentFileName.replace('.' + wfileType, ''))]
try:
currVidFps = int(currVidInfoDf['fps'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
outputDf = inputFile.copy(deep=True)
for b in range(model_nos):
shortest_bout = min_bout_list[b]
framesToPlug = int(currVidFps * (shortest_bout / 1000))
framesToPlugList = list(range(1, framesToPlug + 1))
framesToPlugList.reverse()
patternListofLists = []
for k in framesToPlugList:
zerosInList = [0] * k
currList = [1]
currList.extend(zerosInList)
currList.extend([1])
patternListofLists.append(currList)
# patternListofLists.append([0, 1, 1, 0])
# patternListofLists.append([0, 1, 0])
patterns = np.asarray(patternListofLists)
currentModelPath = model_paths[b]
model = os.path.join(model_dir, currentModelPath)
currModelName = target_names[b]
discrimination_threshold = DTList[b]
currProbName = 'Probability_' + currModelName
clf = pickle.load(open(model, 'rb'))
# predictions = clf.predict_proba(inputFileOrganised)
try:
predictions = clf.predict_proba(inputFileOrganised)
except ValueError:
print('Mismatch in the number of features in input file and what is expected from the model in file ' + str(currentFileName) + ' and model ' + str(currModelName))
outputDf[currProbName] = predictions[:, 1]
outputDf[currModelName] = np.where(outputDf[currProbName] > discrimination_threshold, 1, 0)
########## FIX 'GAPS' ###########################################
for l in patterns:
currPattern = l
n_obs = len(currPattern)
outputDf['rolling_match'] = (outputDf[currModelName].rolling(window=n_obs, min_periods=n_obs)
.apply(lambda x: (x == currPattern).all())
.mask(lambda x: x == 0)
.bfill(limit=n_obs - 1)
.fillna(0)
.astype(bool)
)
# if (currPattern == patterns[-2]) or (currPattern == patterns[-1]):
# outputDf.loc[outputDf['rolling_match'] == True, currModelName] = 0
# else:
# outputDf.loc[outputDf['rolling_match'] == True, currModelName] = 1
outputDf.loc[outputDf['rolling_match'] == True, currModelName] = 1
outputDf.loc[outputDf['rolling_match'] == False, currModelName] = 0
outputDf = outputDf.drop(['rolling_match'], axis=1)
if poseEstimationBps == '4' or poseEstimationBps == '7' or poseEstimationBps == '8' or poseEstimationBps == '7':
#sketchy fix due to version compatability
try:
mouse1size = (statistics.mean(outputDf['Mouse_1_nose_to_tail']))
mouse1Max = mouse1size * 8
outputDf['Scaled_movement_M1'] = (outputDf['Total_movement_all_bodyparts_M1'] / (mouse1Max))
except:
mouse1size = (statistics.mean(outputDf['Mouse_nose_to_tail']))
mouse1Max = mouse1size * 8
outputDf['Scaled_movement_M1'] = (outputDf['Total_movement_all_bodyparts_M1'] / (mouse1Max))
if poseEstimationBps == '16' or poseEstimationBps == '14':
mouse1size = (statistics.mean(outputDf['Mouse_1_nose_to_tail']))
mouse2size = (statistics.mean(outputDf['Mouse_2_nose_to_tail']))
mouse1Max = mouse1size * 8
mouse2Max = mouse2size * 8
outputDf['Scaled_movement_M1'] = (outputDf['Total_movement_all_bodyparts_M1'] / (mouse1Max))
outputDf['Scaled_movement_M2'] = (outputDf['Total_movement_all_bodyparts_M2'] / (mouse2Max))
outputDf['Scaled_movement_M1_M2'] = (outputDf['Scaled_movement_M1'] + outputDf['Scaled_movement_M2']) / 2
outputDf['Scaled_movement_M1_M2'] = outputDf['Scaled_movement_M1_M2'].round(decimals=2)
fileBaseName = os.path.basename(currFile)
outFname = os.path.join(csv_dir_out, fileBaseName)
save_df(outputDf, wfileType, outFname)
print('Predictions generated for ' + str(fileBaseName) + '...')
print('Predictions complete. Saved @ project_folder/csv/machine_results')
| 52.121951
| 179
| 0.606224
|
6fb54e6f66f0543f886254d3182105f4513d799a
| 19,912
|
py
|
Python
|
gfootball/env/wrappers.py
|
chevin-ken/football
|
cfcdf12c06a2a04af3daef4e135c5e19ddcfa15d
|
[
"Apache-2.0"
] | null | null | null |
gfootball/env/wrappers.py
|
chevin-ken/football
|
cfcdf12c06a2a04af3daef4e135c5e19ddcfa15d
|
[
"Apache-2.0"
] | null | null | null |
gfootball/env/wrappers.py
|
chevin-ken/football
|
cfcdf12c06a2a04af3daef4e135c5e19ddcfa15d
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment that can be used with OpenAI Baselines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import cv2
from gfootball.env import football_action_set
from gfootball.env import observation_preprocessing
import gym
import numpy as np
import math
class GetStateWrapper(gym.Wrapper):
"""A wrapper that only dumps traces/videos periodically."""
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self._wrappers_with_support = {
'CheckpointRewardWrapper', 'FrameStack', 'GetStateWrapper',
'SingleAgentRewardWrapper', 'SingleAgentObservationWrapper',
'SMMWrapper', 'PeriodicDumpWriter', 'Simple115StateWrapper',
'PixelsStateWrapper'
}
def _check_state_supported(self):
o = self
while True:
name = o.__class__.__name__
if o.__class__.__name__ == 'FootballEnv':
break
assert name in self._wrappers_with_support, (
'get/set state not supported'
' by {} wrapper').format(name)
o = o.env
def get_state(self):
self._check_state_supported()
to_pickle = {}
return self.env.get_state(to_pickle)
def set_state(self, state):
self._check_state_supported()
self.env.set_state(state)
class PeriodicDumpWriter(gym.Wrapper):
"""A wrapper that only dumps traces/videos periodically."""
def __init__(self, env, dump_frequency, render=False):
gym.Wrapper.__init__(self, env)
self._dump_frequency = dump_frequency
self._render = render
self._original_dump_config = {
'write_video': env._config['write_video'],
'dump_full_episodes': env._config['dump_full_episodes'],
'dump_scores': env._config['dump_scores'],
}
self._current_episode_number = 0
def step(self, action):
return self.env.step(action)
def reset(self):
if (self._dump_frequency > 0 and
(self._current_episode_number % self._dump_frequency == 0)):
self.env._config.update(self._original_dump_config)
if self._render:
self.env.render()
else:
self.env._config.update({'write_video': False,
'dump_full_episodes': False,
'dump_scores': False})
if self._render:
self.env.disable_render()
self._current_episode_number += 1
return self.env.reset()
class Simple115StateWrapper(gym.ObservationWrapper):
"""A wrapper that converts an observation to 115-features state."""
def __init__(self, env, fixed_positions=False):
"""Initializes the wrapper.
Args:
env: an envorinment to wrap
fixed_positions: whether to fix observation indexes corresponding to teams
Note: simple115v2 enables fixed_positions option.
"""
gym.ObservationWrapper.__init__(self, env)
action_shape = np.shape(self.env.action_space)
shape = (action_shape[0] if len(action_shape) else 1, 115)
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=shape, dtype=np.float32)
self._fixed_positions = fixed_positions
def observation(self, observation):
"""Converts an observation into simple115 (or simple115v2) format."""
return Simple115StateWrapper.convert_observation(observation, self._fixed_positions)
@staticmethod
def convert_observation(observation, fixed_positions):
"""Converts an observation into simple115 (or simple115v2) format.
Args:
observation: observation that the environment returns
fixed_positions: Players and positions are always occupying 88 fields
(even if the game is played 1v1).
If True, the position of the player will be the same - no
matter how many players are on the field:
(so first 11 pairs will belong to the first team, even
if it has less players).
If False, then the position of players from team2
will depend on number of players in team1).
Returns:
(N, 115) shaped representation, where N stands for the number of players
being controlled.
"""
def do_flatten(obj):
"""Run flatten on either python list or numpy array."""
if type(obj) == list:
return np.array(obj).flatten()
return obj.flatten()
final_obs = []
for obs in observation:
o = []
if fixed_positions:
for i, name in enumerate(['left_team', 'left_team_direction',
'right_team', 'right_team_direction']):
o.extend(do_flatten(obs[name]))
# If there were less than 11vs11 players we backfill missing values
# with -1.
if len(o) < (i + 1) * 22:
o.extend([-1] * ((i + 1) * 22 - len(o)))
else:
o.extend(do_flatten(obs['left_team']))
o.extend(do_flatten(obs['left_team_direction']))
o.extend(do_flatten(obs['right_team']))
o.extend(do_flatten(obs['right_team_direction']))
# If there were less than 11vs11 players we backfill missing values with
# -1.
# 88 = 11 (players) * 2 (teams) * 2 (positions & directions) * 2 (x & y)
if len(o) < 88:
o.extend([-1] * (88 - len(o)))
# ball position
o.extend(obs['ball'])
# ball direction
o.extend(obs['ball_direction'])
# one hot encoding of which team owns the ball
if obs['ball_owned_team'] == -1:
o.extend([1, 0, 0])
if obs['ball_owned_team'] == 0:
o.extend([0, 1, 0])
if obs['ball_owned_team'] == 1:
o.extend([0, 0, 1])
active = [0] * 11
if obs['active'] != -1:
active[obs['active']] = 1
o.extend(active)
game_mode = [0] * 7
game_mode[obs['game_mode']] = 1
o.extend(game_mode)
final_obs.append(o)
return np.array(final_obs, dtype=np.float32)
class MoreFeatWrapper(gym.ObservationWrapper):
def __init__(self, env, fixed_positions=False):
gym.ObservationWrapper.__init__(self, env)
action_shape = np.shape(self.env.action_space)
shape = (action_shape[0] if len(action_shape) else 1, 140)
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf, shape=shape, dtype=np.float32)
self._fixed_positions = fixed_positions
def observation(self, observation):
"""Converts an observation into simple115 (or simple115v2) format."""
return MoreFeatWrapper.convert_observation(observation, self._fixed_positions)
@staticmethod
def convert_observation(observation, fixed_positions):
"""Adds more features to the simple115 (or simple115v2) format."""
def do_flatten(obj):
"""Run flatten on either python list or numpy array."""
if type(obj) == list:
return np.array(obj).flatten()
return obj.flatten()
final_obs = []
for obs in observation:
o = []
if fixed_positions:
for i, name in enumerate(['left_team', 'left_team_direction',
'right_team', 'right_team_direction']):
o.extend(do_flatten(obs[name]))
# If there were less than 11vs11 players we backfill missing values
# with -1.
if len(o) < (i + 1) * 22:
o.extend([-1] * ((i + 1) * 22 - len(o)))
else:
o.extend(do_flatten(obs['left_team']))
o.extend(do_flatten(obs['left_team_direction']))
o.extend(do_flatten(obs['right_team']))
o.extend(do_flatten(obs['right_team_direction']))
# If there were less than 11vs11 players we backfill missing values with
# -1.
# 88 = 11 (players) * 2 (teams) * 2 (positions & directions) * 2 (x & y)
if len(o) < 88:
o.extend([-1] * (88 - len(o)))
# ball position
o.extend(obs['ball'])
# ball direction
o.extend(obs['ball_direction'])
# one hot encoding of which team owns the ball
if obs['ball_owned_team'] == -1:
o.extend([1, 0, 0])
if obs['ball_owned_team'] == 0:
o.extend([0, 1, 0])
if obs['ball_owned_team'] == 1:
o.extend([0, 0, 1])
active = [0] * 11
if obs['active'] != -1:
active[obs['active']] = 1
o.extend(active)
game_mode = [0] * 7
game_mode[obs['game_mode']] = 1
o.extend(game_mode)
# Position of active player
left, right = o[:22], o[44:66]
player = -1
for i in range(97, 108):
if o[i] == 1:
player = i - 97
x = 0
y = 0
if player >= 0:
x = o[2*player]
y = o[2*player + 1]
# Closest teammate to active player: [distance, x-direction, y-direction]
p_i = 0
closest_dist = float('inf')
for i in range(11):
dist = math.sqrt((left[2*i] - x) ** 2 + (left[2*i + 1] - y) ** 2)
if dist and dist < closest_dist and i != player:
p_i = i
closest_dist = dist
close_teammate = [closest_dist, (left[2*p_i] - x), (left[2*p_i + 1] - y)]
o.extend(close_teammate)
# Closest opponent to active player: [distance, x-direction, y-direction]
p_i = 0
closest_dist = float('inf')
for i in range(11):
dist = math.sqrt((right[2*i] - x) ** 2 + (right[2*i + 1] - y) ** 2)
if dist and dist < closest_dist:
p_i = i
closest_dist = dist
close_opponent = [closest_dist, (right[2*p_i] - x), (right[2*p_i + 1] - y)]
o.extend(close_opponent)
# Ball to active player: [distance, x-direction, y-direction]
ball_dist = [0, 0, 0]
if player >= 0:
dist = math.sqrt((o[88] - x) ** 2 + (o[89] - y) ** 2)
ball_dist = [dist, (o[88] - x), (o[89] - y)]
o.extend(ball_dist)
# Ball to own goal: [distance, x-direction, y-direction]
ball_2_own_gal = [0, 0, 0]
if player >= 0:
dist = math.sqrt((o[88] + 1) ** 2 + (o[89]) ** 2)
ball_2_own_goal = [dist, o[88] + 1, o[88]]
o.extend(ball_2_own_goal)
# Ball to opponent goal: [distance, x-direction, y-direction]
ball_2_opp_goal = [0, 0, 0]
if player >= 0:
dist = math.sqrt((o[88] - 1) ** 2 + (o[89]) ** 2)
ball_2_opp_goal = [dist, o[88] - 1, o[88]]
o.extend(ball_2_opp_goal)
o.extend(obs["sticky_actions"])
final_obs.append(o)
return np.array(final_obs, dtype=np.float32)
class PixelsStateWrapper(gym.ObservationWrapper):
"""A wrapper that extracts pixel representation."""
def __init__(self, env, grayscale=True,
channel_dimensions=(observation_preprocessing.SMM_WIDTH,
observation_preprocessing.SMM_HEIGHT)):
gym.ObservationWrapper.__init__(self, env)
self._grayscale = grayscale
self._channel_dimensions = channel_dimensions
action_shape = np.shape(self.env.action_space)
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(action_shape[0] if len(action_shape) else 1,
channel_dimensions[1], channel_dimensions[0],
1 if grayscale else 3),
dtype=np.uint8)
def observation(self, obs):
o = []
for observation in obs:
assert 'frame' in observation, ("Missing 'frame' in observations. Pixel "
"representation requires rendering and is"
" supported only for players on the left "
"team.")
frame = observation['frame']
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self._channel_dimensions[0],
self._channel_dimensions[1]),
interpolation=cv2.INTER_AREA)
if self._grayscale:
frame = np.expand_dims(frame, -1)
o.append(frame)
return np.array(o, dtype=np.uint8)
class SMMWrapper(gym.ObservationWrapper):
"""A wrapper that convers observations into a minimap format."""
def __init__(self, env,
channel_dimensions=(observation_preprocessing.SMM_WIDTH,
observation_preprocessing.SMM_HEIGHT)):
gym.ObservationWrapper.__init__(self, env)
self._channel_dimensions = channel_dimensions
action_shape = np.shape(self.env.action_space)
shape = (action_shape[0] if len(action_shape) else 1, channel_dimensions[1],
channel_dimensions[0],
len(
observation_preprocessing.get_smm_layers(
self.env.unwrapped._config)))
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=shape, dtype=np.uint8)
def observation(self, obs):
return observation_preprocessing.generate_smm(
obs, channel_dimensions=self._channel_dimensions,
config=self.env.unwrapped._config)
class SingleAgentObservationWrapper(gym.ObservationWrapper):
"""A wrapper that returns an observation only for the first agent."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(
low=env.observation_space.low[0],
high=env.observation_space.high[0],
dtype=env.observation_space.dtype)
def observation(self, obs):
return obs[0]
class SingleAgentRewardWrapper(gym.RewardWrapper):
"""A wrapper that converts an observation to a minimap."""
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
return reward[0]
class CheckpointRewardWrapper(gym.RewardWrapper):
"""A wrapper that adds a dense checkpoint reward."""
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
self._collected_checkpoints = {}
self._num_checkpoints = 10
self._checkpoint_reward = 0.1
def reset(self):
self._collected_checkpoints = {}
return self.env.reset()
def get_state(self, to_pickle):
to_pickle['CheckpointRewardWrapper'] = self._collected_checkpoints
return self.env.get_state(to_pickle)
def set_state(self, state):
from_pickle = self.env.set_state(state)
self._collected_checkpoints = from_pickle['CheckpointRewardWrapper']
return from_pickle
def reward(self, reward):
observation = self.env.unwrapped.observation()
if observation is None:
return reward
assert len(reward) == len(observation)
for rew_index in range(len(reward)):
o = observation[rew_index]
if reward[rew_index] == 1:
reward[rew_index] += self._checkpoint_reward * (
self._num_checkpoints -
self._collected_checkpoints.get(rew_index, 0))
self._collected_checkpoints[rew_index] = self._num_checkpoints
continue
# Check if the active player has the ball.
if ('ball_owned_team' not in o or
o['ball_owned_team'] != 0 or
'ball_owned_player' not in o or
o['ball_owned_player'] != o['active']):
continue
d = ((o['ball'][0] - 1) ** 2 + o['ball'][1] ** 2) ** 0.5
# Collect the checkpoints.
# We give reward for distance 1 to 0.2.
while (self._collected_checkpoints.get(rew_index, 0) <
self._num_checkpoints):
if self._num_checkpoints == 1:
threshold = 0.99 - 0.8
else:
threshold = (0.99 - 0.8 / (self._num_checkpoints - 1) *
self._collected_checkpoints.get(rew_index, 0))
if d > threshold:
break
reward[rew_index] += self._checkpoint_reward
self._collected_checkpoints[rew_index] = (
self._collected_checkpoints.get(rew_index, 0) + 1)
return reward
class FrameStack(gym.Wrapper):
"""Stack k last observations."""
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self.obs = collections.deque([], maxlen=k)
low = env.observation_space.low
high = env.observation_space.high
low = np.concatenate([low] * k, axis=-1)
high = np.concatenate([high] * k, axis=-1)
self.observation_space = gym.spaces.Box(
low=low, high=high, dtype=env.observation_space.dtype)
def reset(self):
observation = self.env.reset()
self.obs.extend([observation] * self.obs.maxlen)
return self._get_observation()
def get_state(self, to_pickle):
to_pickle['FrameStack'] = self.obs
return self.env.get_state(to_pickle)
def set_state(self, state):
from_pickle = self.env.set_state(state)
self.obs = from_pickle['FrameStack']
return from_pickle
def step(self, action):
observation, reward, done, info = self.env.step(action)
self.obs.append(observation)
return self._get_observation(), reward, done, info
def _get_observation(self):
return np.concatenate(list(self.obs), axis=-1)
class MultiAgentToSingleAgent(gym.Wrapper):
"""Converts raw multi-agent observations to single-agent observation.
It returns observations of the designated player on the team, so that
using this wrapper in multi-agent setup is equivalent to controlling a single
player. This wrapper is used for scenarios with control_all_players set when
agent controls only one player on the team. It can also be used
in a standalone manner:
env = gfootball.env.create_environment(env_name='tests/multiagent_wrapper',
number_of_left_players_agent_controls=11)
observations = env.reset()
single_observation = MultiAgentToSingleAgent.get_observation(observations)
single_action = agent.run(single_observation)
actions = MultiAgentToSingleAgent.get_action(single_action, observations)
env.step(actions)
"""
def __init__(self, env, left_players, right_players):
assert left_players < 2
assert right_players < 2
players = left_players + right_players
gym.Wrapper.__init__(self, env)
self._observation = None
if players > 1:
self.action_space = gym.spaces.MultiDiscrete([env._num_actions] * players)
else:
self.action_space = gym.spaces.Discrete(env._num_actions)
def reset(self):
self._observation = self.env.reset()
return self._get_observation()
def step(self, action):
assert self._observation, 'Reset must be called before step'
action = MultiAgentToSingleAgent.get_action(action, self._observation)
self._observation, reward, done, info = self.env.step(action)
return self._get_observation(), reward, done, info
def _get_observation(self):
return MultiAgentToSingleAgent.get_observation(self._observation)
@staticmethod
def get_observation(observation):
assert 'designated' in observation[
0], 'Only raw observations can be converted'
result = []
for obs in observation:
if obs['designated'] == obs['active']:
result.append(obs)
return result
@staticmethod
def get_action(actions, orginal_observation):
assert 'designated' in orginal_observation[
0], 'Only raw observations can be converted'
result = [football_action_set.action_builtin_ai] * len(orginal_observation)
action_idx = 0
for idx, obs in enumerate(orginal_observation):
if obs['designated'] == obs['active']:
assert action_idx < len(actions)
result[idx] = actions[action_idx]
action_idx += 1
return result
| 35.056338
| 88
| 0.641573
|
01c130eedee1a68adb0c1c9c2224f6a2f34232b4
| 1,518
|
py
|
Python
|
knn_and_regression/tests/test_a_environment.py
|
WallabyLester/Machine_Learning_From_Scratch
|
6042cf421f5de2db61fb570b7c4de64dc03453f3
|
[
"MIT"
] | null | null | null |
knn_and_regression/tests/test_a_environment.py
|
WallabyLester/Machine_Learning_From_Scratch
|
6042cf421f5de2db61fb570b7c4de64dc03453f3
|
[
"MIT"
] | null | null | null |
knn_and_regression/tests/test_a_environment.py
|
WallabyLester/Machine_Learning_From_Scratch
|
6042cf421f5de2db61fb570b7c4de64dc03453f3
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import glob
def test_a_imports():
"""
Please don't import sklearn or scipy.stats to solve any of the problems in this assignment.
If you fail this test, we will give you a zero for this assignment, regardless of how
sklearn or scipy.stats was used in your code.
the 'a' in the file name is so this test is run first on a clean Python interpreter.
"""
import sys
import src
source_dir: Path = Path(__file__).parent.parent / 'src'
source_files: list = [Path(p) for p in glob.glob(str(source_dir / "*.py"))]
for f in source_files:
assert not is_imported(f, 'sklearn'), "use of sklearn is not permitted in this assignment."
assert not is_imported(f, 'scipy'), "use of scipy is not permitted in this assignment."
def is_imported(filepath: Path, pkg_name: str) -> bool:
"""checks if a package has been imported in a file
"""
imported = False
with open(filepath, 'r') as f:
content = f.read()
tokens = content.split()
check_indexes = []
for idx, t in enumerate(tokens):
if pkg_name in t:
check_indexes.append(idx)
for idx in check_indexes:
try:
assert idx > 0
assert idx < len(tokens) - 1
if tokens[idx-1] == 'import':
imported = True
elif tokens[idx-1] == 'from' and tokens[idx+1] == "import":
imported = True
except ValueError:
pass
return imported
| 30.979592
| 99
| 0.613966
|
b3f4c4c7797b434f0e41c09e612b3d12eff4fc71
| 1,713
|
py
|
Python
|
scripts/jupyterhub-secret.py
|
biviosoftware/salt-srv
|
61031c5cdbf67d833aa8113d10f49775237ddbb9
|
[
"Apache-2.0"
] | 1
|
2017-04-28T10:54:42.000Z
|
2017-04-28T10:54:42.000Z
|
scripts/jupyterhub-secret.py
|
biviosoftware/salt-srv
|
61031c5cdbf67d833aa8113d10f49775237ddbb9
|
[
"Apache-2.0"
] | 4
|
2016-05-12T00:22:38.000Z
|
2016-05-12T14:21:11.000Z
|
scripts/jupyterhub-secret.py
|
biviosoftware/salt-conf
|
61031c5cdbf67d833aa8113d10f49775237ddbb9
|
[
"Apache-2.0"
] | 1
|
2016-07-05T18:38:11.000Z
|
2016-07-05T18:38:11.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""Generate secrets for test purposes.
You will need to get real values for:
* github_client_id
* github_client_secret
* admin_user
The rest can be generated this way for production::
admin_users=xyz github_client_secret=x github_client_id=x python secret.py
You can override any values in the environment.
Note that admin_users will be split on spaces, because it is a
list.
:copyright: Copyright (c) 2016 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import base64
import os
import random
import string
import yaml
import sys
def main():
cfg = _environ_override({
'jupyterhub': {
'admin_users': ['vagrant'],
'authenticator_class': 'jupyterhub.auth.PAMAuthenticator',
'cookie_secret': base64.b64encode(os.urandom(64)),
'db_pass': _random_password(),
'github_client_id': 'replace_me',
'github_client_secret': 'replace_me',
'proxy_auth_token': base64.b64encode(os.urandom(32)),
},
'postgresql_jupyterhub': {
'admin_pass': _random_password(),
},
})
return yaml.dump(cfg, default_flow_style=False, indent=2)
def _environ_override(cfg):
for c in cfg.values():
for k in c:
v = os.environ.get(k)
if v:
c[k] = v.split(None) if k == 'admin_users' else v
return cfg
def _random_password():
return ''.join(random.choice(string.letters + string.digits) for _ in range(16))
if __name__ == '__main__':
sys.stdout.write(main())
| 26.765625
| 84
| 0.654991
|
286821e9bbff6810a1ca470421a63165fbc78b00
| 942
|
py
|
Python
|
loadfiles.py
|
FrancoisNadeau/loadutils
|
c51e641b62c588eaf2716c5af2d0468311dcae24
|
[
"MIT"
] | null | null | null |
loadfiles.py
|
FrancoisNadeau/loadutils
|
c51e641b62c588eaf2716c5af2d0468311dcae24
|
[
"MIT"
] | null | null | null |
loadfiles.py
|
FrancoisNadeau/loadutils
|
c51e641b62c588eaf2716c5af2d0468311dcae24
|
[
"MIT"
] | null | null | null |
#!usr/bin/env/python3
import os
from typing import Union
import pandas as pd
from os.path import basename as bname
from os.path import dirname as dname
def loadfiles(pathlist: Union[list, tuple]) -> object:
""" Returns a pd.DataFrame with columns
-----------------------------------
'filename': name of file without extension,
'ext': file extension,
'parent': parent directory name,
'fpaths': path to file """
return pd.DataFrame(((bname(sheet).split(".", 1)[0],
os.path.splitext(sheet)[1],
bname(dname(sheet)), sheet)
for sheet in pathlist),
dtype = object,
columns=["filename", "ext",
"parent", "fpaths"]).sort_values(
"filename").reset_index(drop=True)
def main():
if __name__ == __main__:
loadfiles(pathlist)
| 33.642857
| 66
| 0.526539
|
49d6b3d7a45638900a94d8d4660aa34e8205fe33
| 10,766
|
py
|
Python
|
chi/plots/_residuals.py
|
DavAug/erlotinib
|
9d113257de52b56359ed6451ba7db455645315d1
|
[
"BSD-3-Clause"
] | null | null | null |
chi/plots/_residuals.py
|
DavAug/erlotinib
|
9d113257de52b56359ed6451ba7db455645315d1
|
[
"BSD-3-Clause"
] | 221
|
2020-11-06T13:03:32.000Z
|
2021-07-30T08:17:58.000Z
|
chi/plots/_residuals.py
|
DavAug/erlotinib
|
9d113257de52b56359ed6451ba7db455645315d1
|
[
"BSD-3-Clause"
] | 1
|
2021-02-10T13:03:58.000Z
|
2021-02-10T13:03:58.000Z
|
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import numpy as np
import pandas as pd
import plotly.colors
import plotly.graph_objects as go
from chi import plots
class ResidualPlot(plots.SingleFigure):
"""
A figure class that visualises the residual error between the predictions
of a predictive model and measured observations.
Expects a :class:`pandas.DataFrame` of measurements with an ID, a time,
biomarker and a measurement column. This dataset is used as reference to
compute the residuals.
Extends :class:`SingleFigure`.
Parameters
----------
data
A :class:`pandas.DataFrame` with the time series PD data in form of
an ID, time, and biomarker column.
id_key
Key label of the :class:`DataFrame` which specifies the ID column.
The ID refers to the identity of an individual. Defaults to
``'ID'``.
time_key
Key label of the :class:`DataFrame` which specifies the time
column. Defaults to ``'Time'``.
biom_key
Key label of the :class:`DataFrame` which specifies the PD
biomarker column. Defaults to ``'Biomarker'``.
meas_key
Key label of the :class:`DataFrame` which specifies the column of
the measured PD biomarker. Defaults to ``'Measurement'``.
updatemenu
Boolean flag that enables or disables interactive buttons, such as a
logarithmic scale switch for the y-axis.
"""
def __init__(
self, measurements, id_key='ID', time_key='Time',
biom_key='Biomarker', meas_key='Measurement', updatemenu=True):
super(ResidualPlot, self).__init__(updatemenu)
# Check input format
if not isinstance(measurements, pd.DataFrame):
raise TypeError(
'Measurements has to be pandas.DataFrame.')
for key in [id_key, time_key, biom_key, meas_key]:
if key not in measurements.keys():
raise ValueError(
'Measurements does not have the key <' + str(key) + '>.')
# Remember data and keys
self._measurements = measurements
self._keys = [id_key, time_key, biom_key, meas_key]
def _add_predicted_versus_observed_scatter_plot(
self, meas, pred, show_residuals, show_relative, biomarker,
time_key, sample_key):
"""
Adds a scatter plot of the mean predictions on the x-axis and
the measured values on the y-axis. Each individual gets a
different colour.
"""
# Get a colour scheme
colors = plotly.colors.qualitative.Plotly
n_colors = len(colors)
# Get measurement keys
id_key, time_key_m, _, meas_key = self._keys
# Add scatter plot for each individual
ids = meas[id_key].unique()
for index, _id in enumerate(ids):
# Get relevant measurements
mask = meas[id_key] == _id
temp = meas[mask]
times = temp[time_key_m]
observations = temp[meas_key].to_numpy()
mean_predictions = self._get_mean_predictions(
pred, times, time_key, sample_key)
if show_residuals is True:
# Compute residuals of observations from mean predictions
observations -= mean_predictions
if show_relative is True:
# Normalise observations by mean predictions
observations /= mean_predictions
# Plot mean predictions versus observations
color = colors[index % n_colors]
self._add_residual_trace(
_id, mean_predictions, observations, color)
# Add default axes labels
xlabel = 'Prediction'
ylabel = 'Residual' if show_residuals is True else 'Biomarker'
if show_relative is True:
ylabel += ' in rel. units'
self._fig.update_layout(
xaxis_title=xlabel,
yaxis_title=ylabel)
def _add_residual_trace(
self, _id, mean_predictions, measurements, color):
"""
Adds scatter plot of an indiviudals pharamcodynamics to figure.
"""
self._fig.add_trace(
go.Scatter(
x=mean_predictions,
y=measurements,
name="ID: %d" % _id,
showlegend=True,
mode="markers",
marker=dict(
symbol='circle',
color=color,
opacity=0.7,
line=dict(color='black', width=1))))
def _get_mean_predictions(
self, pred, times, time_key, sample_key):
"""
Returns a list of mean prediction estimates for the provided times.
"""
means = np.empty(shape=len(times))
for time_id, time in enumerate(times):
# Compute mean
mask = pred[time_key] == time
means[time_id] = pred[mask][sample_key].mean()
return means
def _get_relevant_measurements(
self, data, biomarker, individual, time_key):
"""
Filters the observations for the relevant biomarker and ID. Also makes
sure that there is a prediction for each measured time point.
"""
# Get keys of measurement dataframe
id_key_m, time_key_m, biom_key_m, _ = self._keys
# Mask measurements for individual (if None keep all individuals)
measurements = self._measurements
if individual is not None:
mask = measurements[id_key_m] == individual
measurements = measurements[mask]
# Mask measurements for biomarker
mask = measurements[biom_key_m] == biomarker
measurements = measurements[mask]
# Make sure that there are predictions for each observed time
measured_times = measurements[time_key_m].dropna().unique()
predicted_times = data[time_key].to_numpy()
for time in measured_times:
if time not in predicted_times:
raise ValueError(
'The prediction dataframe is not compatible with the '
'measurement dataframe. The prediction dataframe does not '
'provide predictions for the measurement time <%.3f>'
% time)
return measurements
def add_data(
self, data, biomarker=None, individual=None, show_residuals=True,
show_relative=False, time_key='Time', biom_key='Biomarker',
sample_key='Sample'):
r"""
Adds the residuals of the predicted biomarker values with respect
to the measured values to the figure.
Expects a :class:`pandas.DataFrame` with a time, a biomarker and a
sample column. The time column determines the time of the biomarker
measurement and the sample column the corresponding biomarker
measurement. The biomarker column determines the biomarker type.
The predictions are matched to the observations based on their ID and
time label. If multiple predictions are provided for one measured time
point, the mean prediction is computed as reference.
Parameters
----------
data
A :class:`pandas.DataFrame` with the time series PD simulation in
form of a time and biomarker column.
biomarker
The predicted bimoarker. This argument is used to determine the
relevant rows in the dataframe. If ``None``, the first biomarker
type in the biomarker column is selected.
individual
The ID of the individual whose measurements are used as reference
for the predictive residuals. Defaults to ``None`` which compares
the predictions to all individuals.
show_residuals
A boolean flag which indicates whether the residuals are plotted
on the y axis, or the measurements themselves. Defaults to
``True``.
show_relative
A boolean flag which indicates whether the observations/residuals
are normalised by the mean predictions. Defaults to ``False``.
time_key
Key label of the :class:`pandas.DataFrame` which specifies the time
column. Defaults to ``'Time'``.
biom_key
Key label of the :class:`pandas.DataFrame` which specifies the PD
biomarker column. Defaults to ``'Biomarker'``.
sample_key
Key label of the :class:`pandas.DataFrame` which specifies the
sample column. Defaults to ``'Sample'``.
"""
# Check input format
if not isinstance(data, pd.DataFrame):
raise TypeError(
'Data has to be pandas.DataFrame.')
for key in [time_key, biom_key, sample_key]:
if key not in data.keys():
raise ValueError(
'Data does not have the key <' + str(key) + '>.')
# Check that selected individual exists in measurement dataframe
if individual is not None:
id_key_m = self._keys[0]
ids = self._measurements[id_key_m].unique()
if individual not in ids:
raise ValueError(
'The ID <' + str(individual) + '> does not exist in the '
'measurement dataframe.')
# Default to first bimoarker, if biomarker is not specified
biom_types = data[biom_key].dropna().unique()
if biomarker is None:
biomarker = biom_types[0]
if biomarker not in biom_types:
raise ValueError(
'The biomarker could not be found in the biomarker column.')
# Check that selected biomarker exists in the measurement dataframe
biom_key_m = self._keys[2]
biomarkers = self._measurements[biom_key_m].unique()
if biomarker not in biomarkers:
raise ValueError(
'The biomarker <' + str(biomarker) + '> does not exist in the '
'measurement dataframe.')
# Mask predictions for biomarker
mask = data[biom_key] == biomarker
data = data[mask]
# Get the relevant observations
meas = self._get_relevant_measurements(
data, biomarker, individual, time_key)
# Add mean predictions versus observations as scatter points
self._add_predicted_versus_observed_scatter_plot(
meas, data, show_residuals, show_relative, biomarker, time_key,
sample_key)
| 39.149091
| 79
| 0.614063
|
e499102552bd3aee4d085fbe1a241bdf1d971995
| 948
|
py
|
Python
|
Python/mianjing.py
|
JWang169/LintCodeJava
|
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
|
[
"CNRI-Python"
] | 1
|
2020-12-10T05:36:15.000Z
|
2020-12-10T05:36:15.000Z
|
Python/mianjing.py
|
JWang169/LintCodeJava
|
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
|
[
"CNRI-Python"
] | null | null | null |
Python/mianjing.py
|
JWang169/LintCodeJava
|
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
|
[
"CNRI-Python"
] | 3
|
2020-04-06T05:55:08.000Z
|
2021-08-29T14:26:54.000Z
|
"""
第一轮
给一个 m*n 的空矩阵,有一个 API 可以每天随机加一个 tower (矩阵里面的一个点)。一个 tower 代表矩阵里的一个点
相邻(上下左右)的 tower之间可以连通。
问多少天能连通最左最右两个 column (任意 column 上面的点就行)。 相当于就是问什么时候可以确定最左最右是连通的
"""
def tower(grid, x, y):
m, n = len(grid[0]), len(grid)
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
for i in range(4):
nx, ny = x + dx[i] + y + dy[i]
if 0 <= nx < m and 0 <= ny < n:
if grid[nx][ny] == 1:
self.connection[(nx, ny)] = (x, y)
return self.find(x, y)
def find(x, y):
left, right = False, False
while self.connection[(x, y)] != (x, y):
if y == 0:
left = True
if y == n - 1:
right = True
(x, y) = self.connections[(x, y)]
if left and right:
return True
return False
"""
第二轮
有一个 graph (其实是 tree),每个node 代表一个 castle,edge 是 castle 之间的连接,edge 代表 castle 之间传递信息需要的时间
问题就是给定一个起点,问需要多少时间能传遍所有的 castle
follow up: 怎么按照信息到达 castle 的时间输出 castle 的 id
"""
# NETWORD DELAY
"""
第三轮
给一个 array,求 array 里面最短的 subarry,使得 array 里面 element 的和等于 K
"""
| 18.230769
| 86
| 0.619198
|
4adf7c986a316670c061c397ed68c2efccd4e95e
| 2,179
|
py
|
Python
|
Linkedlist/singlyll_Add_At_index.py
|
mukul20-21/python_datastructure
|
6126722f0742110752fb634a7da1081bdbce9444
|
[
"Apache-2.0"
] | null | null | null |
Linkedlist/singlyll_Add_At_index.py
|
mukul20-21/python_datastructure
|
6126722f0742110752fb634a7da1081bdbce9444
|
[
"Apache-2.0"
] | null | null | null |
Linkedlist/singlyll_Add_At_index.py
|
mukul20-21/python_datastructure
|
6126722f0742110752fb634a7da1081bdbce9444
|
[
"Apache-2.0"
] | null | null | null |
class Node:
def __init__(self,data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def __str__ (self):
result = []
node = self.head # create a pointer named as node for over convience..!!
while node:
result.append(f'{node.data}')
node = node.next
return'-->'.join(result)
def add_at_index(self,index,data):
if index <0: # case1 where the index is less than zero, boundary cases..!!
raise IndexError('Negative index is not a valid input..!!!')
if index == 0: # case2 where the indez is zero or insert value at head...!!
new_node = Node(data)
new_node.next = self.head
self.head = new_node
return
prev = None
node = self.head
counter = 0 # case3 in which index value is postive integer...!!!
while node:
if counter == index:
new_node = Node(data)
prev.next = new_node
new_node.next = node
return
counter += 1
prev = node
node = node.next
if counter == index: # case4 where index is greater than the length of list
new_node = Node(data)
prev.next = new_node
else:
raise IndexError("Index can't greater than the length of list")
linked_list = LinkedList()
print(linked_list)
linked_list.add_at_index(0,1) # index function take two argument (index,data)...!!!!
print(linked_list)
linked_list.add_at_index(1,2) # index function take two argument (index,data)...!!!!
print(linked_list)
linked_list.add_at_index(1,3) # index function take two argument (index,data)...!!!!
print(linked_list)
| 38.22807
| 119
| 0.479119
|
3f0ac36a51b93c22b506e35f7443161865822f46
| 3,391
|
py
|
Python
|
scraper/scraper/db.py
|
toolen/covid-leningrad-region
|
d50c8e26269c4876c1ff927b97bd25969d7924c4
|
[
"MIT"
] | null | null | null |
scraper/scraper/db.py
|
toolen/covid-leningrad-region
|
d50c8e26269c4876c1ff927b97bd25969d7924c4
|
[
"MIT"
] | null | null | null |
scraper/scraper/db.py
|
toolen/covid-leningrad-region
|
d50c8e26269c4876c1ff927b97bd25969d7924c4
|
[
"MIT"
] | null | null | null |
"""This file contains the code for working with the database."""
import logging
from typing import Dict, List, Optional, Union, cast
from pymongo import MongoClient, UpdateOne
from pymongo.collection import Collection
from pymongo.errors import BulkWriteError
from tenacity import retry, wait_fixed
from scraper.constants import PROPERTY_DISTRICT, PROPERTY_DISTRICT_DATE
from scraper.types import DistrictType
logger = logging.getLogger(__name__)
class DBWrapper:
"""This class provides methods for working with the database."""
def __init__(
self,
uri: str,
db_name: str,
collection_name: str,
tls_cert_key_path: Optional[str] = None,
tls_ca_path: Optional[str] = None,
) -> None:
"""
Construct the DBWrapper class.
:param uri: connection string
:param db_name: database name
:param collection_name: collection name
:param tls_cert_key_path: path to TLS key certificate
:param tls_ca_path: path to CA certificate
"""
tls: Dict[str, Union[bool, str]] = {}
if tls_cert_key_path:
tls["tls"] = True
tls["tlsCertificateKeyFile"] = tls_cert_key_path
if tls_ca_path:
tls["tlsCAFile"] = tls_ca_path
self.db_name = db_name
self.collection_name = collection_name
self.client = self.get_client(uri, tls)
@retry(wait=wait_fixed(5))
def get_client(self, uri: str, tls: Dict[str, Union[bool, str]]) -> MongoClient:
"""
Return MongoClient instance.
:param uri: connection string.
:param tls: TLS options.
:return: MongoClient instance.
"""
return MongoClient(
uri,
connectTimeoutMS=1000,
retryWrites=True,
maxPoolSize=50,
wTimeoutMS=2500,
**tls
)
def get_collection(self) -> Collection:
"""
Return default collection from database.
:return: Collection
"""
return self.client[self.db_name][self.collection_name]
def drop_collection(self) -> None:
"""
Drop default collection.
:return: None
"""
collection = self.get_collection()
collection.drop()
def collection_size(self) -> int:
"""
Return size of default collection.
:return: None
"""
collection = self.get_collection()
return cast(int, collection.estimated_document_count())
def close(self) -> None:
"""
Close database connection.
:return: None
"""
self.client.close()
def save_data(self, data: List[DistrictType]) -> None:
"""
Save data into database.
:param data: data to save.
:return: None
"""
operations = []
for district in data:
filter_ = {
PROPERTY_DISTRICT_DATE: district[PROPERTY_DISTRICT_DATE],
PROPERTY_DISTRICT: district[PROPERTY_DISTRICT],
}
operations.append(UpdateOne(filter_, {"$set": district}, upsert=True))
try:
collection = self.get_collection()
collection.bulk_write(operations)
logger.info("Write success.")
except BulkWriteError as bwe:
logger.error(bwe.details)
| 28.258333
| 84
| 0.598349
|
731518e39ee696af90ef69b8ac939589b5f645ee
| 20,408
|
py
|
Python
|
skimage/segmentation/random_walker_segmentation.py
|
lyuka/scikit-image
|
9fd8bb09a34d41aed078029f583ab2917ca87bbd
|
[
"BSD-3-Clause"
] | 1
|
2016-01-26T06:14:58.000Z
|
2016-01-26T06:14:58.000Z
|
skimage/segmentation/random_walker_segmentation.py
|
lyuka/scikit-image
|
9fd8bb09a34d41aed078029f583ab2917ca87bbd
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/segmentation/random_walker_segmentation.py
|
lyuka/scikit-image
|
9fd8bb09a34d41aed078029f583ab2917ca87bbd
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Random walker segmentation algorithm
from *Random walks for image segmentation*, Leo Grady, IEEE Trans
Pattern Anal Mach Intell. 2006 Nov;28(11):1768-83.
Installing pyamg and using the 'cg_mg' mode of random_walker improves
significantly the performance.
"""
import warnings
import numpy as np
from scipy import sparse, ndimage
# executive summary for next code block: try to import umfpack from
# scipy, but make sure not to raise a fuss if it fails since it's only
# needed to speed up a few cases.
# See discussions at:
# https://groups.google.com/d/msg/scikit-image/FrM5IGP6wh4/1hp-FtVZmfcJ
# http://stackoverflow.com/questions/13977970/ignore-exceptions-printed-to-stderr-in-del/13977992?noredirect=1#comment28386412_13977992
try:
from scipy.sparse.linalg.dsolve import umfpack
old_del = umfpack.UmfpackContext.__del__
def new_del(self):
try:
old_del(self)
except AttributeError:
pass
umfpack.UmfpackContext.__del__ = new_del
UmfpackContext = umfpack.UmfpackContext()
except:
UmfpackContext = None
try:
from pyamg import ruge_stuben_solver
amg_loaded = True
except ImportError:
amg_loaded = False
from scipy.sparse.linalg import cg
from ..util import img_as_float
from ..filter import rank_order
#-----------Laplacian--------------------
def _make_graph_edges_3d(n_x, n_y, n_z):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction
n_z: integer
The size of the grid in the z direction
Returns
-------
edges : (2, N) ndarray
with the total number of edges::
N = n_x * n_y * (nz - 1) +
n_x * (n_y - 1) * nz +
(n_x - 1) * n_y * nz
Graph edges with each column describing a node-id pair.
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_weights_3d(data, spacing, beta=130, eps=1.e-6,
multichannel=False):
# Weight calculation is main difference in multispectral version
# Original gradient**2 replaced with sum of gradients ** 2
gradients = 0
for channel in range(0, data.shape[-1]):
gradients += _compute_gradients_3d(data[..., channel],
spacing) ** 2
# All channels considered together in this standard deviation
beta /= 10 * data.std()
if multichannel:
# New final term in beta to give == results in trivial case where
# multiple identical spectra are passed.
beta /= np.sqrt(data.shape[-1])
gradients *= beta
weights = np.exp(- gradients)
weights += eps
return weights
def _compute_gradients_3d(data, spacing):
gr_deep = np.abs(data[:, :, :-1] - data[:, :, 1:]).ravel() / spacing[2]
gr_right = np.abs(data[:, :-1] - data[:, 1:]).ravel() / spacing[1]
gr_down = np.abs(data[:-1] - data[1:]).ravel() / spacing[0]
return np.r_[gr_deep, gr_right, gr_down]
def _make_laplacian_sparse(edges, weights):
"""
Sparse implementation
"""
pixel_nb = edges.max() + 1
diag = np.arange(pixel_nb)
i_indices = np.hstack((edges[0], edges[1]))
j_indices = np.hstack((edges[1], edges[0]))
data = np.hstack((-weights, -weights))
lap = sparse.coo_matrix((data, (i_indices, j_indices)),
shape=(pixel_nb, pixel_nb))
connect = - np.ravel(lap.sum(axis=1))
lap = sparse.coo_matrix(
(np.hstack((data, connect)), (np.hstack((i_indices, diag)),
np.hstack((j_indices, diag)))),
shape=(pixel_nb, pixel_nb))
return lap.tocsr()
def _clean_labels_ar(X, labels, copy=False):
X = X.astype(labels.dtype)
if copy:
labels = np.copy(labels)
labels = np.ravel(labels)
labels[labels == 0] = X
return labels
def _buildAB(lap_sparse, labels):
"""
Build the matrix A and rhs B of the linear system to solve.
A and B are two block of the laplacian of the image graph.
"""
labels = labels[labels >= 0]
indices = np.arange(labels.size)
unlabeled_indices = indices[labels == 0]
seeds_indices = indices[labels > 0]
# The following two lines take most of the time in this function
B = lap_sparse[unlabeled_indices][:, seeds_indices]
lap_sparse = lap_sparse[unlabeled_indices][:, unlabeled_indices]
nlabels = labels.max()
rhs = []
for lab in range(1, nlabels + 1):
mask = (labels[seeds_indices] == lab)
fs = sparse.csr_matrix(mask)
fs = fs.transpose()
rhs.append(B * fs)
return lap_sparse, rhs
def _mask_edges_weights(edges, weights, mask):
"""
Remove edges of the graph connected to masked nodes, as well as
corresponding weights of the edges.
"""
mask0 = np.hstack((mask[:, :, :-1].ravel(), mask[:, :-1].ravel(),
mask[:-1].ravel()))
mask1 = np.hstack((mask[:, :, 1:].ravel(), mask[:, 1:].ravel(),
mask[1:].ravel()))
ind_mask = np.logical_and(mask0, mask1)
edges, weights = edges[:, ind_mask], weights[ind_mask]
max_node_index = edges.max()
# Reassign edges labels to 0, 1, ... edges_number - 1
order = np.searchsorted(np.unique(edges.ravel()),
np.arange(max_node_index + 1))
edges = order[edges.astype(np.int64)]
return edges, weights
def _build_laplacian(data, spacing, mask=None, beta=50,
multichannel=False):
l_x, l_y, l_z = tuple(data.shape[i] for i in range(3))
edges = _make_graph_edges_3d(l_x, l_y, l_z)
weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10,
multichannel=multichannel)
if mask is not None:
edges, weights = _mask_edges_weights(edges, weights, mask)
lap = _make_laplacian_sparse(edges, weights)
del edges, weights
return lap
#----------- Random walker algorithm --------------------------------
def random_walker(data, labels, beta=130, mode='bf', tol=1.e-3, copy=True,
multichannel=False, return_full_prob=False, spacing=None):
"""Random walker algorithm for segmentation from markers.
Random walker algorithm is implemented for gray-level or multichannel
images.
Parameters
----------
data : array_like
Image to be segmented in phases. Gray-level `data` can be two- or
three-dimensional; multichannel data can be three- or four-
dimensional (multichannel=True) with the highest dimension denoting
channels. Data spacing is assumed isotropic unless the `spacing`
keyword argument is used.
labels : array of ints, of same shape as `data` without channels dimension
Array of seed markers labeled with different positive integers
for different phases. Zero-labeled pixels are unlabeled pixels.
Negative labels correspond to inactive pixels that are not taken
into account (they are removed from the graph). If labels are not
consecutive integers, the labels array will be transformed so that
labels are consecutive. In the multichannel case, `labels` should have
the same shape as a single channel of `data`, i.e. without the final
dimension denoting channels.
beta : float
Penalization coefficient for the random walker motion
(the greater `beta`, the more difficult the diffusion).
mode : string, available options {'cg_mg', 'cg', 'bf'}
Mode for solving the linear system in the random walker algorithm.
If no preference given, automatically attempt to use the fastest
option available ('cg_mg' from pyamg >> 'cg' with UMFPACK > 'bf').
- 'bf' (brute force): an LU factorization of the Laplacian is
computed. This is fast for small images (<1024x1024), but very slow
and memory-intensive for large images (e.g., 3-D volumes).
- 'cg' (conjugate gradient): the linear system is solved iteratively
using the Conjugate Gradient method from scipy.sparse.linalg. This is
less memory-consuming than the brute force method for large images,
but it is quite slow.
- 'cg_mg' (conjugate gradient with multigrid preconditioner): a
preconditioner is computed using a multigrid solver, then the
solution is computed with the Conjugate Gradient method. This mode
requires that the pyamg module (http://pyamg.org/) is
installed. For images of size > 512x512, this is the recommended
(fastest) mode.
tol : float
tolerance to achieve when solving the linear system, in
cg' and 'cg_mg' modes.
copy : bool
If copy is False, the `labels` array will be overwritten with
the result of the segmentation. Use copy=False if you want to
save on memory.
multichannel : bool, default False
If True, input data is parsed as multichannel data (see 'data' above
for proper input format in this case)
return_full_prob : bool, default False
If True, the probability that a pixel belongs to each of the labels
will be returned, instead of only the most likely label.
spacing : iterable of floats
Spacing between voxels in each spatial dimension. If `None`, then
the spacing between pixels/voxels in each dimension is assumed 1.
Returns
-------
output : ndarray
* If `return_full_prob` is False, array of ints of same shape as
`data`, in which each pixel has been labeled according to the marker
that reached the pixel first by anisotropic diffusion.
* If `return_full_prob` is True, array of floats of shape
`(nlabels, data.shape)`. `output[label_nb, i, j]` is the probability
that label `label_nb` reaches the pixel `(i, j)` first.
See also
--------
skimage.morphology.watershed: watershed segmentation
A segmentation algorithm based on mathematical morphology
and "flooding" of regions from markers.
Notes
-----
Multichannel inputs are scaled with all channel data combined. Ensure all
channels are separately normalized prior to running this algorithm.
The `spacing` argument is specifically for anisotropic datasets, where
data points are spaced differently in one or more spatial dimensions.
Anisotropic data is commonly encountered in medical imaging.
The algorithm was first proposed in *Random walks for image
segmentation*, Leo Grady, IEEE Trans Pattern Anal Mach Intell.
2006 Nov;28(11):1768-83.
The algorithm solves the diffusion equation at infinite times for
sources placed on markers of each phase in turn. A pixel is labeled with
the phase that has the greatest probability to diffuse first to the pixel.
The diffusion equation is solved by minimizing x.T L x for each phase,
where L is the Laplacian of the weighted graph of the image, and x is
the probability that a marker of the given phase arrives first at a pixel
by diffusion (x=1 on markers of the phase, x=0 on the other markers, and
the other coefficients are looked for). Each pixel is attributed the label
for which it has a maximal value of x. The Laplacian L of the image
is defined as:
- L_ii = d_i, the number of neighbors of pixel i (the degree of i)
- L_ij = -w_ij if i and j are adjacent pixels
The weight w_ij is a decreasing function of the norm of the local gradient.
This ensures that diffusion is easier between pixels of similar values.
When the Laplacian is decomposed into blocks of marked and unmarked
pixels::
L = M B.T
B A
with first indices corresponding to marked pixels, and then to unmarked
pixels, minimizing x.T L x for one phase amount to solving::
A x = - B x_m
where x_m = 1 on markers of the given phase, and 0 on other markers.
This linear system is solved in the algorithm using a direct method for
small images, and an iterative method for larger images.
Examples
--------
>>> a = np.zeros((10, 10)) + 0.2 * np.random.random((10, 10))
>>> a[5:8, 5:8] += 1
>>> b = np.zeros_like(a)
>>> b[3, 3] = 1 # Marker for first phase
>>> b[6, 6] = 2 # Marker for second phase
>>> random_walker(a, b)
array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=int32)
"""
# Parse input data
if mode is None:
if amg_loaded:
mode = 'cg_mg'
elif UmfpackContext is not None:
mode = 'cg'
else:
mode = 'bf'
if UmfpackContext is None and mode == 'cg':
warnings.warn('"cg" mode will be used, but it may be slower than '
'"bf" because SciPy was built without UMFPACK. Consider'
' rebuilding SciPy with UMFPACK; this will greatly '
'accelerate the conjugate gradient ("cg") solver. '
'You may also install pyamg and run the random_walker '
'function in "cg_mg" mode (see docstring).')
if (labels != 0).all():
warnings.warn('Random walker only segments unlabeled areas, where '
'labels == 0. No zero valued areas in labels were '
'found. Returning provided labels.')
if return_full_prob:
# Find and iterate over valid labels
unique_labels = np.unique(labels)
unique_labels = unique_labels[unique_labels > 0]
out_labels = np.empty(labels.shape + (len(unique_labels),),
dtype=np.bool)
for n, i in enumerate(unique_labels):
out_labels[..., n] = (labels == i)
else:
out_labels = labels
return out_labels
# This algorithm expects 4-D arrays of floats, where the first three
# dimensions are spatial and the final denotes channels. 2-D images have
# a singleton placeholder dimension added for the third spatial dimension,
# and single channel images likewise have a singleton added for channels.
# The following block ensures valid input and coerces it to the correct
# form.
if not multichannel:
if data.ndim < 2 or data.ndim > 3:
raise ValueError('For non-multichannel input, data must be of '
'dimension 2 or 3.')
dims = data.shape # To reshape final labeled result
data = np.atleast_3d(img_as_float(data))[..., np.newaxis]
else:
if data.ndim < 3:
raise ValueError('For multichannel input, data must have 3 or 4 '
'dimensions.')
dims = data[..., 0].shape # To reshape final labeled result
data = img_as_float(data)
if data.ndim == 3: # 2D multispectral, needs singleton in 3rd axis
data = data[:, :, np.newaxis, :]
# Spacing kwarg checks
if spacing is None:
spacing = np.asarray((1.,) * 3)
elif len(spacing) == len(dims):
if len(spacing) == 2: # Need a dummy spacing for singleton 3rd dim
spacing = np.r_[spacing, 1.]
else: # Convert to array
spacing = np.asarray(spacing)
else:
raise ValueError('Input argument `spacing` incorrect, should be an '
'iterable with one number per spatial dimension.')
if copy:
labels = np.copy(labels)
label_values = np.unique(labels)
# Reorder label values to have consecutive integers (no gaps)
if np.any(np.diff(label_values) != 1):
mask = labels >= 0
labels[mask] = rank_order(labels[mask])[0].astype(labels.dtype)
labels = labels.astype(np.int32)
# If the array has pruned zones, be sure that no isolated pixels
# exist between pruned zones (they could not be determined)
if np.any(labels < 0):
filled = ndimage.binary_propagation(labels > 0, mask=labels >= 0)
labels[np.logical_and(np.logical_not(filled), labels == 0)] = -1
del filled
labels = np.atleast_3d(labels)
if np.any(labels < 0):
lap_sparse = _build_laplacian(data, spacing, mask=labels >= 0,
beta=beta, multichannel=multichannel)
else:
lap_sparse = _build_laplacian(data, spacing, beta=beta,
multichannel=multichannel)
lap_sparse, B = _buildAB(lap_sparse, labels)
# We solve the linear system
# lap_sparse X = B
# where X[i, j] is the probability that a marker of label i arrives
# first at pixel j by anisotropic diffusion.
if mode == 'cg':
X = _solve_cg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
if mode == 'cg_mg':
if not amg_loaded:
warnings.warn(
"""pyamg (http://pyamg.org/)) is needed to use
this mode, but is not installed. The 'cg' mode will be used
instead.""")
X = _solve_cg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
else:
X = _solve_cg_mg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
if mode == 'bf':
X = _solve_bf(lap_sparse, B,
return_full_prob=return_full_prob)
# Clean up results
if return_full_prob:
labels = labels.astype(np.float)
X = np.array([_clean_labels_ar(Xline, labels,
copy=True).reshape(dims) for Xline in X])
for i in range(1, int(labels.max()) + 1):
mask_i = np.squeeze(labels == i)
X[:, mask_i] = 0
X[i - 1, mask_i] = 1
else:
X = _clean_labels_ar(X + 1, labels).reshape(dims)
return X
def _solve_bf(lap_sparse, B, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i. An LU decomposition
of lap_sparse is computed first. For each pixel, the label i
corresponding to the maximal X_i is returned.
"""
lap_sparse = lap_sparse.tocsc()
solver = sparse.linalg.factorized(lap_sparse.astype(np.double))
X = np.array([solver(np.array((-B[i]).todense()).ravel())
for i in range(len(B))])
if not return_full_prob:
X = np.argmax(X, axis=0)
return X
def _solve_cg(lap_sparse, B, tol, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method. For each pixel, the label i corresponding to the
maximal X_i is returned.
"""
lap_sparse = lap_sparse.tocsc()
X = []
for i in range(len(B)):
x0 = cg(lap_sparse, -B[i].todense(), tol=tol)[0]
X.append(x0)
if not return_full_prob:
X = np.array(X)
X = np.argmax(X, axis=0)
return X
def _solve_cg_mg(lap_sparse, B, tol, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method with a multigrid preconditioner (ruge-stuben from
pyamg). For each pixel, the label i corresponding to the maximal
X_i is returned.
"""
X = []
ml = ruge_stuben_solver(lap_sparse)
M = ml.aspreconditioner(cycle='V')
for i in range(len(B)):
x0 = cg(lap_sparse, -B[i].todense(), tol=tol, M=M, maxiter=30)[0]
X.append(x0)
if not return_full_prob:
X = np.array(X)
X = np.argmax(X, axis=0)
return X
| 39.550388
| 135
| 0.61814
|
af53fd879153376910929445a6df2a5cbf8e391f
| 4,902
|
py
|
Python
|
env.py
|
crazycs520/py12306
|
f8340c84524636493505154d3b3fecda15f58a42
|
[
"Apache-2.0"
] | null | null | null |
env.py
|
crazycs520/py12306
|
f8340c84524636493505154d3b3fecda15f58a42
|
[
"Apache-2.0"
] | null | null | null |
env.py
|
crazycs520/py12306
|
f8340c84524636493505154d3b3fecda15f58a42
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# 12306 账号
USER_ACCOUNTS = [
# 目前已支持仅查询,不下单,屏蔽掉下面的账号即可
{
'key': 0, # 如使用多个账号 key 不能重复
'user_name': '152110938',
'password': '152110938'
},
# {
# 'key': 'wangwu',
# 'user_name': 'wangwu@qq.com',
# 'password': 'wangwu'
# }
]
# 查询间隔(指每一个任务中每一个日期的间隔 / 单位秒)
# 默认取间隔/2 到 间隔之间的随机数 如设置为 1 间隔则为 0.5 ~ 1 之间的随机数
# 接受字典形式 格式: {'min': 0.5, 'max': 1}
QUERY_INTERVAL = 1
# 用户心跳检测间隔 格式同上
USER_HEARTBEAT_INTERVAL = 120
# 多线程查询
QUERY_JOB_THREAD_ENABLED = 0 # 是否开启多线程查询,开启后第个任务会单独分配线程处理
# 打码平台账号
# 目前只支持免费打码接口 和 若快打码,注册地址:http://www.ruokuai.com/login
AUTO_CODE_PLATFORM = 'free' # 免费填写 free 若快 ruokuai # 免费打码无法保证持续可用,如失效请手动切换
AUTO_CODE_ACCOUNT = { # 使用 free 可用省略
'user': 'your user name',
'pwd': 'your password'
}
# 语音验证码
# 没找到比较好用的,现在用的这个是阿里云 API 市场上的,基本满足要求,价格也便宜
# 购买成功后到控制台找到 APPCODE 放在下面就可以了
# 地址:易源 https://market.aliyun.com/products/57126001/cmapi019902.html
# 2019-01-18 更新
# 增加新的服务商 鼎信 https://market.aliyun.com/products/56928004/cmapi026600.html?spm=5176.2020520132.101.2.e27e7218KQttQS
NOTIFICATION_BY_VOICE_CODE = 0 # 开启语音通知
NOTIFICATION_VOICE_CODE_TYPE = 'dingxin' # 语音验证码服务商 可用项 dingxin yiyuan
NOTIFICATION_API_APP_CODE = 'your app code'
NOTIFICATION_VOICE_CODE_PHONE = 'your phone' # 接受通知的手机号
# 钉钉通知
# 使用说明 https://open-doc.dingtalk.com/docs/doc.htm?treeId=257&articleId=105735&docType=1
DINGTALK_ENABLED = 1
DINGTALK_WEBHOOK = 'https://oapi.dingtalk.com/robot/send?access_token=453712d754c47ff387de3d04089d2348a092639f639d6b0382a732dd3f4b04e2'
# Telegram消息推送
# 目前共有两个Bot:
# 1:https://t.me/notificationme_bot
# 2:https://t.me/RE_Link_Push_bot
# 任选一个Bot,关注获取URL链接,如果没有回复则发送给Bot这条信息: /start
# 将获取的URL填入下面对应位置
# 注意:因为以上Bot都由他人公益提供,无法保证随时可用,如以上Bot都无法使用,请使用其他消息推送方式
# Bot1来源:https://github.com/Fndroid/tg_push_bot
# Bot2来源:https://szc.me/post/2.html
TELEGRAM_ENABLED = 0
TELEGRAM_BOT_API_URL = 'https://tgbot.lbyczf.com/sendMessage/:your_token'
# ServerChan 和 PushBear 微信消息推送
# 使用说明
# ServerChan http://sc.ftqq.com
# PushBear http://pushbear.ftqq.com
SERVERCHAN_ENABLED = 0
SERVERCHAN_KEY = ''
PUSHBEAR_ENABLED = 0
PUSHBEAR_KEY = ''
# 输出日志到文件
OUT_PUT_LOG_TO_FILE_ENABLED = 0
OUT_PUT_LOG_TO_FILE_PATH = 'runtime/12306.log' # 日志目录
# 分布式集群配置
CLUSTER_ENABLED = 1 # 集群状态
NODE_IS_MASTER = 1 # 是否是主节点 同时只能启用 1 个主节点
NODE_SLAVE_CAN_BE_MASTER = 1 # 主节点宕机后,子节点是否可以自动提升为主节点(建议打开)
NODE_NAME = 'master' # 节点名称,不能重复
REDIS_HOST = 'localhost' # Redis host
REDIS_PORT = '6379' # Redis port
REDIS_PASSWORD = '' # Redis 密码 没有可以留空
# 邮箱配置
EMAIL_ENABLED = 0 # 是否开启邮件通知
EMAIL_SENDER = 'sender@example.com' # 邮件发送者
EMAIL_RECEIVER = 'receiver@example.com' # 邮件接受者 # 可以多个 [email1@gmail.com, email2@gmail.com]
EMAIL_SERVER_HOST = 'localhost' # 邮件服务 host
EMAIL_SERVER_USER = '' # 邮件服务登录用户名
EMAIL_SERVER_PASSWORD = '' # 邮件服务登录密码
# Web 管理
WEB_ENABLE = 1 # 是否打开 Web 管理
WEB_USER = { # 登录信息
'username': 'admin',
'password': 'admin'
}
WEB_PORT = 8008 # 监听端口
# 是否开启 CDN 查询
CDN_ENABLED = 0
CDN_CHECK_TIME_OUT = 1 # 检测单个 cdn 是否可用超时时间
# 查询任务
QUERY_JOBS = [
{
# 'job_name': 'bj -> sz', # 任务名称,不填默认会以车站名命名,不可重复
'account_key': 0, # 将会使用指定账号下单
'left_dates': [ # 出发日期 :Array
"2019-02-10",
"2019-02-10",
],
'stations': { # 车站 支持多个车站同时查询 :Dict or :List
'left': '常德',
'arrive': '广州',
},
# # 多个车站示例 (建议添加多个,有时多买几站成功率会高一点)
# 'stations': [{
# 'left': '北京',
# 'arrive': '深圳',
# },{ # 多个车站示例
# 'left': '北京',
# 'arrive': '广州',
# }],
'members': [ # 乘客姓名,会根据当前账号自动识别乘客类型 购买儿童票 设置两个相同的姓名即可,程序会自动识别 如 ['张三', '张三']
"陈霜",
# 7, # 支持通过序号确定唯一乘客,序号查看可通过 python main.py -t 登录成功之后在 runtime/user/ 下找到对应的 用户名_passengers.json 文件,找到对应的 code 填入
],
'allow_less_member': 0, # 是否允许余票不足时提交部分乘客
'seats': [ # 筛选座位 有先后顺序 :Array
# 可用值: 特等座, 商务座, 一等座, 二等座, 软卧, 硬卧, 动卧, 软座, 硬座, 无座
'硬卧',
'硬座',
'软卧'
],
'train_numbers': [ # 筛选车次 可以为空,为空则所有车次都可以提交 如 [] 注意大小写需要保持一致
"K9035",
"K9075",
],
'except_train_numbers': [ # 筛选车次,排除车次 train_numbers 和 except_train_numbers 不可同时存在
],
'period': { # 筛选时间
'from': '00:00',
'to': '24:00'
}
},
# {
# 'job_name': 'cd -> gz', # 任务名称,不填默认会以车站名命名,不可重复
# 'account_key': 0, # 将会使用指定账号下单
# 'left_dates': [
# "2019-01-27",
# "2019-01-28"
# ],
# 'stations': {
# 'left': '成都',
# 'arrive': '广州',
# },
# 'members': [
# "小王",
# ],
# 'allow_less_member': 0,
# 'seats': [
# '硬卧',
# ],
# 'train_numbers': []
# }
]
| 28.5
| 135
| 0.598531
|
1f6615fc09af3d6abf56b99972ea68e4ab30c184
| 8,501
|
py
|
Python
|
python/ray/tune/tests/test_experiment_analysis_mem.py
|
daobook/ray
|
af9f1ef4dc160e0671206556b387f8017f3c3930
|
[
"Apache-2.0"
] | 33
|
2020-05-27T14:25:24.000Z
|
2022-03-22T06:11:30.000Z
|
python/ray/tune/tests/test_experiment_analysis_mem.py
|
daobook/ray
|
af9f1ef4dc160e0671206556b387f8017f3c3930
|
[
"Apache-2.0"
] | 115
|
2021-01-19T04:40:50.000Z
|
2022-03-26T07:09:00.000Z
|
python/ray/tune/tests/test_experiment_analysis_mem.py
|
daobook/ray
|
af9f1ef4dc160e0671206556b387f8017f3c3930
|
[
"Apache-2.0"
] | 5
|
2020-08-06T15:53:07.000Z
|
2022-02-09T03:31:31.000Z
|
import json
import unittest
import shutil
import tempfile
import os
import random
import pandas as pd
import pytest
import numpy as np
import ray
from ray.tune import (run, Trainable, sample_from, ExperimentAnalysis,
grid_search)
from ray.tune.result import DEBUG_METRICS
from ray.tune.utils.mock import MyTrainableClass
from ray.tune.utils.serialization import TuneFunctionEncoder
class ExperimentAnalysisInMemorySuite(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(local_mode=False, num_cpus=1)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def setUp(self):
class MockTrainable(Trainable):
scores_dict = {
0: [5, 4, 4, 4, 4, 4, 4, 4, 0],
1: [4, 3, 3, 3, 3, 3, 3, 3, 1],
2: [2, 1, 1, 1, 1, 1, 1, 1, 8],
3: [9, 7, 7, 7, 7, 7, 7, 7, 6],
4: [7, 5, 5, 5, 5, 5, 5, 5, 3]
}
def setup(self, config):
self.id = config["id"]
self.idx = 0
def step(self):
val = self.scores_dict[self.id][self.idx]
self.idx += 1
return {"score": val}
def save_checkpoint(self, checkpoint_dir):
pass
def load_checkpoint(self, checkpoint_path):
pass
self.MockTrainable = MockTrainable
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def testInitLegacy(self):
"""Should still work if checkpoints are not json strings"""
experiment_checkpoint_path = os.path.join(self.test_dir,
"experiment_state.json")
checkpoint_data = {
"checkpoints": [{
"trainable_name": "MockTrainable",
"logdir": "/mock/test/MockTrainable_0_id=3_2020-07-12"
}]
}
with open(experiment_checkpoint_path, "w") as f:
f.write(json.dumps(checkpoint_data))
experiment_analysis = ExperimentAnalysis(experiment_checkpoint_path)
self.assertEqual(len(experiment_analysis._checkpoints), 1)
self.assertFalse(experiment_analysis.trials)
def testInit(self):
experiment_checkpoint_path = os.path.join(self.test_dir,
"experiment_state.json")
checkpoint_data = {
"checkpoints": [
json.dumps(
{
"trainable_name": "MockTrainable",
"logdir": "/mock/test/MockTrainable_0_id=3_2020-07-12"
},
cls=TuneFunctionEncoder)
]
}
with open(experiment_checkpoint_path, "w") as f:
f.write(json.dumps(checkpoint_data))
experiment_analysis = ExperimentAnalysis(experiment_checkpoint_path)
self.assertEqual(len(experiment_analysis._checkpoints), 1)
self.assertFalse(experiment_analysis.trials)
def testInitException(self):
experiment_checkpoint_path = os.path.join(self.test_dir, "mock.json")
with pytest.raises(ValueError):
ExperimentAnalysis(experiment_checkpoint_path)
def testCompareTrials(self):
scores = np.asarray(list(self.MockTrainable.scores_dict.values()))
scores_all = scores.flatten("F")
scores_last = scores_all[5:]
ea = run(
self.MockTrainable,
name="analysis_exp",
local_dir=self.test_dir,
stop={"training_iteration": len(scores[0])},
num_samples=1,
config={"id": grid_search(list(range(5)))})
max_all = ea.get_best_trial("score", "max",
"all").metric_analysis["score"]["max"]
min_all = ea.get_best_trial("score", "min",
"all").metric_analysis["score"]["min"]
max_last = ea.get_best_trial("score", "max",
"last").metric_analysis["score"]["last"]
max_avg = ea.get_best_trial("score", "max",
"avg").metric_analysis["score"]["avg"]
min_avg = ea.get_best_trial("score", "min",
"avg").metric_analysis["score"]["avg"]
max_avg_5 = ea.get_best_trial(
"score", "max",
"last-5-avg").metric_analysis["score"]["last-5-avg"]
min_avg_5 = ea.get_best_trial(
"score", "min",
"last-5-avg").metric_analysis["score"]["last-5-avg"]
max_avg_10 = ea.get_best_trial(
"score", "max",
"last-10-avg").metric_analysis["score"]["last-10-avg"]
min_avg_10 = ea.get_best_trial(
"score", "min",
"last-10-avg").metric_analysis["score"]["last-10-avg"]
self.assertEqual(max_all, max(scores_all))
self.assertEqual(min_all, min(scores_all))
self.assertEqual(max_last, max(scores_last))
self.assertNotEqual(max_last, max(scores_all))
self.assertAlmostEqual(max_avg, max(np.mean(scores, axis=1)))
self.assertAlmostEqual(min_avg, min(np.mean(scores, axis=1)))
self.assertAlmostEqual(max_avg_5, max(np.mean(scores[:, -5:], axis=1)))
self.assertAlmostEqual(min_avg_5, min(np.mean(scores[:, -5:], axis=1)))
self.assertAlmostEqual(max_avg_10, max(
np.mean(scores[:, -10:], axis=1)))
self.assertAlmostEqual(min_avg_10, min(
np.mean(scores[:, -10:], axis=1)))
def testRemoveMagicResults(self):
[trial] = run(
self.MockTrainable,
name="analysis_remove_exp",
local_dir=self.test_dir,
stop={
"training_iteration": 9
},
num_samples=1,
config={
"id": 1
}).trials
for metric in DEBUG_METRICS:
self.assertNotIn(metric, trial.metric_analysis)
self.assertNotIn(metric, trial.metric_n_steps)
self.assertTrue(not any(
metric.startswith("config") for metric in trial.metric_analysis))
self.assertTrue(not any(
metric.startswith("config") for metric in trial.metric_n_steps))
class AnalysisSuite(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(local_mode=True, include_dashboard=False)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def setUp(self):
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "1"
self.test_dir = tempfile.mkdtemp()
self.num_samples = 10
self.metric = "episode_reward_mean"
self.run_test_exp(test_name="analysis_exp1")
self.run_test_exp(test_name="analysis_exp2")
def run_test_exp(self, test_name=None):
run(MyTrainableClass,
name=test_name,
local_dir=self.test_dir,
stop={"training_iteration": 1},
num_samples=self.num_samples,
config={
"width": sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
})
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def testDataframe(self):
analysis = ExperimentAnalysis(self.test_dir)
df = analysis.dataframe(self.metric, mode="max")
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEqual(df.shape[0], self.num_samples * 2)
def testBestLogdir(self):
analysis = ExperimentAnalysis(self.test_dir)
logdir = analysis.get_best_logdir(self.metric, mode="max")
self.assertTrue(logdir.startswith(self.test_dir))
logdir2 = analysis.get_best_logdir(self.metric, mode="min")
self.assertTrue(logdir2.startswith(self.test_dir))
self.assertNotEqual(logdir, logdir2)
def testBestConfigIsLogdir(self):
analysis = ExperimentAnalysis(self.test_dir)
for metric, mode in [(self.metric, "min"), (self.metric, "max")]:
logdir = analysis.get_best_logdir(metric, mode=mode)
best_config = analysis.get_best_config(metric, mode=mode)
self.assertEqual(analysis.get_all_configs()[logdir], best_config)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 36.32906
| 79
| 0.581814
|
886e1cd6f912085a63fc294a605ea762da2e1a7a
| 325,997
|
py
|
Python
|
tir/technologies/webapp_internal.py
|
totvs/tir
|
2f096b1b6f103b0b3069ce0171f9d6860712f375
|
[
"MIT"
] | 55
|
2018-09-12T22:01:42.000Z
|
2022-01-27T19:16:56.000Z
|
tir/technologies/webapp_internal.py
|
totvs/tir
|
2f096b1b6f103b0b3069ce0171f9d6860712f375
|
[
"MIT"
] | 44
|
2018-09-17T13:50:42.000Z
|
2021-08-31T17:54:32.000Z
|
tir/technologies/webapp_internal.py
|
totvs/tir
|
2f096b1b6f103b0b3069ce0171f9d6860712f375
|
[
"MIT"
] | 55
|
2018-09-13T01:44:42.000Z
|
2022-03-08T21:37:35.000Z
|
import re
import time
import pandas as pd
import inspect
import os
import random
import uuid
from functools import reduce
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
import tir.technologies.core.enumerations as enum
from tir.technologies.core.log import Log
from tir.technologies.core.config import ConfigLoader
from tir.technologies.core.language import LanguagePack
from tir.technologies.core.third_party.xpath_soup import xpath_soup
from tir.technologies.core.psutil_info import system_info
from tir.technologies.core.base import Base
from tir.technologies.core.numexec import NumExec
from math import sqrt, pow
from selenium.common.exceptions import *
from datetime import datetime
from tir.technologies.core.logging_config import logger
import pathlib
class WebappInternal(Base):
"""
Internal implementation of Protheus Webapp class.
This class contains all the methods defined to run Selenium Interface Tests on Protheus Webapp.
Internal methods should have the **[Internal]** tag and should not be accessible to the user.
:param config_path: The path to the config file. - **Default:** "" (empty string)
:type config_path: str
:param autostart: Sets whether TIR should open browser and execute from the start. - **Default:** True
:type: bool
Usage:
>>> # Inside __init__ method in Webapp class of main.py
>>> def __init__(self, config_path="", autostart=True):
>>> self.__webapp = WebappInternal(config_path, autostart)
"""
def __init__(self, config_path="", autostart=True):
"""
Definition of each global variable:
base_container: A variable to contain the layer element to be used on all methods.
grid_check: List with fields from a grid that must be checked in the next LoadGrid call.
grid_counters: A global counter of grids' last row to be filled.
grid_input: List with fields from a grid that must be filled in the next LoadGrid call.
used_ids: Dictionary of element ids and container already captured by a label search.
"""
webdriver_exception = None
try:
super().__init__(config_path, autostart)
except WebDriverException as e:
webdriver_exception = e
self.containers_selectors = {
"SetButton" : ".tmodaldialog,.ui-dialog",
"GetCurrentContainer": ".tmodaldialog",
"AllContainers": "body,.tmodaldialog,.ui-dialog",
"ClickImage": ".tmodaldialog",
"BlockerContainers": ".tmodaldialog,.ui-dialog",
"Containers": ".tmodaldialog,.ui-dialog"
}
self.base_container = ".tmodaldialog"
self.grid_check = []
self.grid_counters = {}
self.grid_input = []
self.down_loop_grid = False
self.num_exec = NumExec()
self.restart_counter = 0
self.used_ids = {}
self.tss = False
self.restart_coverage = True
self.parameters = []
self.backup_parameters = []
self.tree_base_element = ()
self.tmenu_screen = None
self.grid_memo_field = False
self.range_multiplier = None
self.routine = None
if not self.config.smart_test and self.config.issue:
self.check_mot_exec()
if webdriver_exception:
message = f"Wasn't possible execute Start() method: {next(iter(webdriver_exception.msg.split(':')), None)}"
self.restart_counter = 3
self.log_error(message)
self.assertTrue(False, message)
def SetupTSS( self, initial_program = "", enviroment = ""):
"""
Prepare the Protheus Webapp TSS for the test case, filling the needed information to access the environment.
.. note::
This method use the user and password from config.json.
:param initial_program: The initial program to load.
:type initial_program: str
:param environment: The initial environment to load.
:type environment: str
Usage:
>>> # Calling the method:
>>> oHelper.SetupTSS("TSSMANAGER", "SPED")
"""
try:
logger().info("Starting Setup TSS")
self.tss = True
self.service_process_bat_file()
self.config.initial_program = initial_program
enviroment = self.config.environment if self.config.environment else enviroment
self.containers_selectors["SetButton"] = "body"
self.containers_selectors["GetCurrentContainer"] = ".tmodaldialog, body"
if not self.config.skip_environment and not self.config.coverage:
self.program_screen(initial_program, enviroment)
if not self.log.program:
self.log.program = self.get_program_name()
if self.config.coverage:
self.open_url_coverage(url=self.config.url, initial_program=initial_program, environment=self.config.environment)
self.user_screen_tss()
self.set_log_info_tss()
if self.config.num_exec:
if not self.num_exec.post_exec(self.config.url_set_start_exec, 'ErrorSetIniExec'):
self.restart_counter = 3
self.log_error(f"WARNING: Couldn't possible send num_exec to server please check log.")
except ValueError as e:
self.log_error(str(e))
except Exception as e:
self.log_error(str(e))
def user_screen_tss(self):
"""
[Internal]
Fills the user login screen of Protheus with the user and password located on config.json.
Usage:
>>> # Calling the method
>>> self.user_screen()
"""
logger().info("Fill user Screen")
self.wait_element(term="[name='cUser']", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
self.SetValue('cUser', self.config.user, name_attr = True)
self.SetValue('cPass', self.config.password, name_attr = True)
self.SetButton("Entrar")
def Setup(self, initial_program, date='', group='99', branch='01', module='', save_input=True):
"""
Prepare the Protheus Webapp for the test case, filling the needed information to access the environment.
:param initial_program: The initial program to load.
:type initial_program: str
:param date: The date to fill on the environment screen. - **Default:** "" (empty string)
:type date: str
:param group: The group to fill on the environment screen. - **Default:** "99"
:type group: str
:param branch: The branch to fill on the environment screen. - **Default:** "01"
:type branch: str
:param module: The module to fill on the environment screen. - **Default:** "" (empty string)
:type module: str
:param save_input: Boolean if all input info should be saved for later usage. Leave this flag 'True' if you are not sure. **Default:** True
:type save_input: bool
Usage:
>>> # Calling the method:
>>> oHelper.Setup("SIGAFAT", "18/08/2018", "T1", "D MG 01 ")
"""
if self.config.smart_test:
logger().info(f"***System Info*** in Setup():")
system_info()
try:
self.service_process_bat_file()
if not initial_program:
self.log_error("Couldn't find The initial program")
if self.config.smart_erp:
self.wait_smart_erp_environment()
if not self.log.program:
self.log.program = self.get_program_name()
if save_input:
self.config.initial_program = initial_program
self.config.date = re.sub('([\d]{2}).?([\d]{2}).?([\d]{4})', r'\1/\2/\3', date)
self.config.group = group
self.config.branch = branch
self.config.module = module
if self.config.coverage:
self.open_url_coverage(url=self.config.url, initial_program=initial_program, environment=self.config.environment)
if not self.config.valid_language:
self.config.language = self.get_language()
self.language = LanguagePack(self.config.language)
if not self.config.skip_environment and not self.config.coverage:
self.program_screen(initial_program=initial_program, coverage=False)
self.log.webapp_version = self.driver.execute_script("return app.VERSION")
self.user_screen(True) if initial_program.lower() == "sigacfg" else self.user_screen()
endtime = time.time() + self.config.time_out
if not self.config.poui_login:
while(time.time() < endtime and (not self.element_exists(term=self.language.database, scrap_type=enum.ScrapType.MIXED, main_container=".twindow", optional_term=".tsay"))):
self.update_password()
self.environment_screen()
endtime = time.time() + self.config.time_out
while(time.time() < endtime and (not self.element_exists(term=".tmenu", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body"))):
self.close_warning_screen()
self.close_coin_screen()
self.close_modal()
if save_input:
self.set_log_info()
self.log.country = self.config.country
self.log.execution_id = self.config.execution_id
self.log.issue = self.config.issue
except ValueError as error:
self.log_error(error)
except Exception as e:
self.log_error(str(e))
if self.config.num_exec:
if not self.num_exec.post_exec(self.config.url_set_start_exec, 'ErrorSetIniExec'):
self.restart_counter = 3
self.log_error(f"WARNING: Couldn't possible send num_exec to server please check log.")
if self.config.smart_test and self.config.coverage and self.search_stack("setUpClass") and self.restart_coverage:
self.restart()
self.restart_coverage = False
def service_process_bat_file(self):
"""
[Internal]
This method creates a batfile in the root path to kill the process and its children.
"""
if self.config.smart_test:
with open("firefox_task_kill.bat", "w", ) as firefox_task_kill:
firefox_task_kill.write(f"taskkill /f /PID {self.driver.service.process.pid} /T")
def program_screen(self, initial_program="", environment="", coverage=False):
"""
[Internal]
Fills the first screen of Protheus with the first program to run and the environment to connect.
:param initial_program: The initial program to load
:type initial_program: str
:param environment: The environment to connect
:type environment: str
Usage:
>>> # Calling the method
>>> self.program_screen("SIGAADV", "MYENVIRONMENT")
"""
if coverage:
self.open_url_coverage(url=self.config.url, initial_program=initial_program, environment=self.config.environment)
else:
try_counter = 0
self.wait_element(term='#inputStartProg', scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
self.wait_element(term='#inputEnv', scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
soup = self.get_current_DOM()
logger().info("Filling Initial Program")
start_prog_element = next(iter(soup.select("#inputStartProg")), None)
if start_prog_element is None:
self.restart_counter += 1
message = "Couldn't find Initial Program input element."
self.log_error(message)
raise ValueError(message)
start_prog = lambda: self.soup_to_selenium(start_prog_element)
start_prog_value = self.get_web_value(start_prog())
endtime = time.time() + self.config.time_out
while (time.time() < endtime and (start_prog_value.strip() != initial_program.strip())):
if try_counter == 0:
start_prog = lambda: self.soup_to_selenium(start_prog_element)
else:
start_prog = lambda: self.soup_to_selenium(start_prog_element.parent)
self.set_element_focus(start_prog())
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys(Keys.HOME).key_up(Keys.CONTROL).perform()
ActionChains(self.driver).key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(
Keys.END).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()
self.send_keys(start_prog(), initial_program)
start_prog_value = self.get_web_value(start_prog())
try_counter += 1 if(try_counter < 1) else -1
if (start_prog_value.strip() != initial_program.strip()):
self.restart_counter += 1
message = "Couldn't fill Program input element."
self.log_error(message)
raise ValueError(message)
logger().info("Filling Environment")
env_element = next(iter(soup.select("#inputEnv")), None)
if env_element is None:
self.restart_counter += 1
message = "Couldn't find Environment input element."
self.log_error(message)
raise ValueError(message)
env = lambda: self.soup_to_selenium(env_element)
env_value = self.get_web_value(env())
endtime = time.time() + self.config.time_out
try_counter = 0
while (time.time() < endtime and (env_value.strip() != self.config.environment.strip())):
if try_counter == 0:
env = lambda: self.soup_to_selenium(env_element)
else:
env = lambda: self.soup_to_selenium(env_element.parent)
self.set_element_focus(env())
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys(Keys.HOME).key_up(Keys.CONTROL).perform()
ActionChains(self.driver).key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(
Keys.END).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()
self.send_keys(env(), self.config.environment)
env_value = self.get_web_value(env())
try_counter += 1 if(try_counter < 1) else -1
if (env_value.strip() != self.config.environment.strip()):
self.restart_counter += 1
message = "Couldn't fill Environment input element."
self.log_error(message)
raise ValueError(message)
button = self.driver.find_element(By.CSS_SELECTOR, ".button-ok")
self.click(button)
def user_screen(self, admin_user = False):
"""
[Internal]
Fills the user login screen of Protheus with the user and password located on config.json.
Usage:
>>> # Calling the method
>>> self.user_screen()
"""
user_text = self.config.user_cfg if admin_user and self.config.user_cfg else self.config.user
password_text = self.config.password_cfg if admin_user and self.config.password_cfg else self.config.password
if self.config.smart_test and admin_user and not self.config.user_cfg :
user_text = "admin"
password_text = "1234"
if self.config.poui_login:
self.twebview_context = True
# if not self.driver.find_element(By.CSS_SELECTOR, ".po-page-login-info-field .po-input"):
if not self.wait_element_timeout(term=".po-page-login-info-field .po-input",
scrap_type=enum.ScrapType.CSS_SELECTOR, timeout=self.config.time_out * 3,main_container='body'):
self.reload_user_screen()
elif not self.wait_element_timeout(term="[name='cGetUser'] > input",
scrap_type=enum.ScrapType.CSS_SELECTOR, timeout = self.config.time_out * 3 , main_container='body'):
self.reload_user_screen()
self.set_multilanguage()
try_counter = 0
soup = self.get_current_DOM()
logger().info("Filling User")
try:
if self.config.poui_login:
user_element = next(iter(soup.select(".po-page-login-info-field .po-input")), None)
else:
user_element = next(iter(soup.select("[name='cGetUser'] > input")), None)
if user_element is None:
self.restart_counter += 1
message = "Couldn't find User input element."
self.log_error(message)
raise ValueError(message)
user = lambda: self.soup_to_selenium(user_element)
user_value = self.get_web_value(user())
except AttributeError as e:
self.log_error(str(e))
raise AttributeError(e)
endtime = time.time() + self.config.time_out
while (time.time() < endtime and (user_value.strip() != user_text.strip())):
if try_counter == 0:
user = lambda: self.soup_to_selenium(user_element)
else:
user = lambda: self.soup_to_selenium(user_element.parent)
self.set_element_focus(user())
self.wait_until_to(expected_condition="element_to_be_clickable", element = user_element, locator = By.XPATH, timeout=True)
self.double_click(user())
# self.send_keys(user(), Keys.HOME)
self.send_keys(user(), user_text)
self.send_keys(user(), Keys.ENTER)
user_value = self.get_web_value(user())
try_counter += 1 if(try_counter < 1) else -1
if (user_value.strip() != user_text.strip()):
self.restart_counter += 1
message = "Couldn't fill User input element."
self.log_error(message)
raise ValueError(message)
# loop_control = True
# while(loop_control):
logger().info("Filling Password")
if self.config.poui_login:
password_element = next(iter(soup.select(".po-input-icon-right")), None)
else:
password_element = next(iter(soup.select("[name='cGetPsw'] > input")), None)
if password_element is None:
self.restart_counter += 1
message = "Couldn't find User input element."
self.log_error(message)
raise ValueError(message)
password = lambda: self.soup_to_selenium(password_element)
password_value = self.get_web_value(password())
endtime = time.time() + self.config.time_out
try_counter = 0
while (time.time() < endtime and not password_value.strip() and self.config.password != ''):
if try_counter == 0:
password = lambda: self.soup_to_selenium(password_element)
else:
password = lambda: self.soup_to_selenium(password_element.parent)
self.set_element_focus(password())
self.wait_until_to( expected_condition="element_to_be_clickable", element = password_element, locator = By.XPATH, timeout=True)
self.click(password())
self.send_keys(password(), Keys.HOME)
self.send_keys(password(), password_text)
if not self.config.poui_login:
self.send_keys(password(), Keys.ENTER)
else:
self.send_keys(password(), Keys.TAB)
password_value = self.get_web_value(password())
self.wait_blocker()
try_counter += 1 if(try_counter < 1) else -1
if not password_value.strip() and self.config.password != '':
self.restart_counter += 1
message = "Couldn't fill User input element."
self.log_error(message)
raise ValueError(message)
button_element = next(iter(list(filter(lambda x: self.language.enter in x.text, soup.select("button")))), None)
if button_element is None:
self.restart_counter += 1
message = "Couldn't find Enter button."
self.log_error(message)
raise ValueError(message)
button = lambda: self.driver.find_element_by_xpath(xpath_soup(button_element))
self.click(button())
def reload_user_screen(self):
"""
[Internal]
Refresh the page - retry load user_screen
"""
self.driver_refresh()
if self.config.coverage:
self.driver.get(f"{self.config.url}/?StartProg=CASIGAADV&A={self.config.initial_program}&Env={self.config.environment}")
if not self.config.skip_environment and not self.config.coverage:
self.program_screen(self.config.initial_program)
self.wait_element_timeout(term="[name='cGetUser'] > input",
scrap_type=enum.ScrapType.CSS_SELECTOR, timeout = self.config.time_out , main_container='body')
def environment_screen(self, change_env=False):
"""
[Internal]
Fills the environment screen of Protheus with the values passed on the Setup method.
Used to fill the fields triggered by the ChangeEnvironment method as well.
:param change_env: Boolean if the method is being called by ChangeEnvironment. - **Default:** False
:type change_env: bool
Usage:
>>> # Calling the method
>>> self.environment_screen()
"""
if not self.config.date:
self.config.date = datetime.today().strftime('%d/%m/%Y')
if change_env:
label = self.language.confirm
container = "body"
else:
label = self.language.enter
container = ".twindow"
if self.config.poui_login:
self.wait_element(term=".po-datepicker", main_container='body', scrap_type=enum.ScrapType.CSS_SELECTOR)
else:
self.wait_element(self.language.database, main_container=container)
logger().info("Filling Date")
if self.config.poui_login:
base_dates = self.web_scrap(term=".po-datepicker", main_container='body', scrap_type=enum.ScrapType.CSS_SELECTOR)
else:
base_dates = self.web_scrap(term="[name='dDataBase'] input, [name='__dInfoData'] input", scrap_type=enum.ScrapType.CSS_SELECTOR, label=True, main_container=container)
if len(base_dates) > 1:
base_date = base_dates.pop()
else:
base_date = next(iter(base_dates), None)
if base_date is None:
self.restart_counter += 1
message = "Couldn't find Date input element."
self.log_error(message)
raise ValueError(message)
date = lambda: self.soup_to_selenium(base_date)
base_date_value = ''
endtime = time.time() + self.config.time_out
while (time.time() < endtime and (base_date_value.strip() != self.config.date.strip())):
self.double_click(date())
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys(Keys.HOME).key_up(Keys.CONTROL).perform()
ActionChains(self.driver).key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(
Keys.END).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()
self.send_keys(date(), self.config.date)
base_date_value = self.get_web_value(date())
if self.config.poui_login:
ActionChains(self.driver).send_keys(Keys.TAB).perform()
logger().info("Filling Group")
if self.config.poui_login:
group_elements = self.web_scrap(term=self.language.group, main_container='body',scrap_type=enum.ScrapType.TEXT)
group_element = next(iter(group_elements))
group_element = group_element.find_parent('pro-company-lookup')
group_element = next(iter(group_element.select('input')), None)
else:
group_elements = self.web_scrap(term="[name='cGroup'] input, [name='__cGroup'] input", scrap_type=enum.ScrapType.CSS_SELECTOR, label=True, main_container=container)
if len(group_elements) > 1:
group_element = group_elements.pop()
else:
group_element = next(iter(group_elements), None)
if group_element is None:
self.restart_counter += 1
message = "Couldn't find Group input element."
self.log_error(message)
raise ValueError(message)
group = lambda: self.soup_to_selenium(group_element)
group_value = ''
endtime = time.time() + self.config.time_out
while (time.time() < endtime and (group_value.strip() != self.config.group.strip())):
self.double_click(group())
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys(Keys.HOME).key_up(Keys.CONTROL).perform()
ActionChains(self.driver).key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(
Keys.END).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()
self.send_keys(group(), self.config.group)
group_value = self.get_web_value(group())
if self.config.poui_login:
ActionChains(self.driver).send_keys(Keys.TAB).perform()
logger().info("Filling Branch")
if self.config.poui_login:
branch_elements = self.web_scrap(term=self.language.branch, main_container='body',scrap_type=enum.ScrapType.TEXT)
branch_element = next(iter(branch_elements))
branch_element = branch_element.find_parent('pro-branch-lookup')
branch_element = next(iter(branch_element.select('input')), None)
else:
branch_elements = self.web_scrap(term="[name='cFil'] input, [name='__cFil'] input", scrap_type=enum.ScrapType.CSS_SELECTOR, label=True, main_container=container)
if len(branch_elements) > 1:
branch_element = branch_elements.pop()
else:
branch_element = next(iter(branch_elements), None)
if branch_element is None:
self.restart_counter += 1
message = "Couldn't find Branch input element."
self.log_error(message)
raise ValueError(message)
branch = lambda: self.soup_to_selenium(branch_element)
branch_value = ''
endtime = time.time() + self.config.time_out
while (time.time() < endtime and (branch_value.strip() != self.config.branch.strip())):
self.double_click(branch())
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys(Keys.HOME).key_up(Keys.CONTROL).perform()
ActionChains(self.driver).key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(
Keys.END).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()
self.send_keys(branch(), self.config.branch)
branch_value = self.get_web_value(branch())
if self.config.poui_login:
ActionChains(self.driver).send_keys(Keys.TAB).perform()
logger().info("Filling Environment")
if self.config.poui_login:
environment_elements = self.web_scrap(term=self.language.environment, main_container='body',scrap_type=enum.ScrapType.TEXT)
environment_element = next(iter(environment_elements))
environment_element = environment_element.find_parent('pro-system-module-lookup')
environment_element = next(iter(environment_element.select('input')), None)
else:
environment_elements = self.web_scrap(term="[name='cAmb'] input", scrap_type=enum.ScrapType.CSS_SELECTOR, label=True, main_container=container)
if len(environment_elements) > 1:
environment_element = environment_elements.pop()
else:
environment_element = next(iter(environment_elements), None)
if environment_element is None:
self.restart_counter += 1
message = "Couldn't find Module input element."
self.log_error(message)
raise ValueError(message)
env = lambda: self.soup_to_selenium(environment_element)
if self.config.poui_login:
enable = env().is_enabled()
else:
enable = ("disabled" not in environment_element.parent.attrs["class"] and env().is_enabled())
if enable:
env_value = ''
endtime = time.time() + self.config.time_out
while (time.time() < endtime and env_value.strip() != self.config.module.strip()):
self.double_click(env())
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys(Keys.HOME).key_up(Keys.CONTROL).perform()
ActionChains(self.driver).key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(
Keys.END).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()
self.send_keys(env(), self.config.module)
env_value = self.get_web_value(env())
if self.config.poui_login:
ActionChains(self.driver).send_keys(Keys.TAB).perform()
time.sleep(1)
self.close_warning_screen()
buttons = self.filter_displayed_elements(self.web_scrap(label, scrap_type=enum.ScrapType.MIXED, optional_term="button", main_container="body"), True)
button_element = next(iter(buttons), None) if buttons else None
if button_element and hasattr(button_element, "name") and hasattr(button_element, "parent"):
button = lambda: self.driver.find_element_by_xpath(xpath_soup(button_element))
self.click(button())
elif not change_env:
self.restart_counter += 1
message = f"Couldn't find {label} button."
self.log_error(message)
raise ValueError(message)
if not self.config.poui_login:
self.wait_element(term=self.language.database, scrap_type=enum.ScrapType.MIXED, presence=False, optional_term="input", main_container=container)
else:
self.driver.switch_to.default_content()
self.config.poui_login = False
def ChangeEnvironment(self, date="", group="", branch="", module=""):
"""
Clicks on the change environment area of Protheus Webapp and
fills the environment screen.
:param date: The date to fill on the environment screen. - **Default:** "" (empty string)
:type date: str
:param group: The group to fill on the environment screen. - **Default:** "" (empty string)
:type group: str
:param branch: The branch to fill on the environment screen. - **Default:** "" (empty string)
:type branch: str
:param module: The module to fill on the environment screen. - **Default:** "" (empty string)
:type module: str
Usage:
>>> # Calling the method:
>>> oHelper.ChangeEnvironment(date="13/11/2018", group="T1", branch="D MG 01 ")
"""
if date:
self.config.date = date
if group:
self.config.group = group
if branch:
self.config.branch = branch
if module:
self.config.module = module
element = self.change_environment_element_home_screen()
if element:
self.click(self.driver.find_element_by_xpath(xpath_soup(element)))
self.environment_screen(True)
else:
self.log_error("Change Envirioment method did not find the element to perform the click or the element was not visible on the screen.")
self.wait_blocker()
self.close_warning_screen()
self.close_coin_screen()
def change_environment_element_home_screen(self):
"""
[Internal]
This method wait the element to perform ChangeEnvirionmentm return a soup element.
Usage:
>>> # Calling the method:
>>> self.change_environment_element_home_screen()
"""
endtime = time.time() + self.config.time_out
while time.time() < endtime:
if self.wait_element_timeout(term=self.language.change_environment, scrap_type=enum.ScrapType.MIXED, timeout = 1, optional_term="button", main_container="body"):
return next(iter(self.web_scrap(term=self.language.change_environment, scrap_type=enum.ScrapType.MIXED, optional_term="button", main_container="body")), None)
elif self.wait_element_timeout(term=".tpanel > .tpanel > .tbutton", scrap_type=enum.ScrapType.CSS_SELECTOR, timeout = 1, main_container="body"):
tbuttons = self.filter_displayed_elements(self.web_scrap(term=".tpanel > .tpanel > .tbutton", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body"), True)
element = next(iter(list(filter(lambda x: 'TOTVS' in x.text, tbuttons))), None)
if element:
return element
return False
def ChangeUser(self, user, password, initial_program = "", date='', group='', branch=''):
"""
Change the user then init protheus on home page.
:param initial_program: The initial program to load. - **Default:** "" (previous initial_program)
:type initial_program: str
:param date: The date to fill on the environment screen. - **Default:** "" (previous date)
:type date: str
:param group: The group to fill on the environment screen. - **Default:** "previous date group"
:type group: str
:param branch: The branch to fill on the environment screen. - **Default:** "previous branch"
:type branch: str
Usage:
>>> # Calling the method:
>>> oHelper.ChangeUser("userTest", "a", "SIGAFAT", "18/08/2018", "T1", "D MG 01 ")
>>> #------------------------------------------------------------------------
>>> # Calling the method:
>>> oHelper.ChangeUser(user="user08", password="8" )
>>> #------------------------------------------------------------------------
"""
if not user and not password:
self.log_error("You must enter a user and a password to use ChangeUser!")
return
initial_program = self.config.initial_program if not self.config.initial_program else initial_program
date = self.config.date if not self.config.date else date
group = self.config.group if not self.config.group else group
branch = self.config.branch if not self.config.branch else branch
self.config.user = user
self.config.password = password
self.driver.refresh()
logger().info(f"Change to the user: {user}")
self.Setup(initial_program, date, group, branch)
def close_modal(self):
"""
[Internal]
This method closes the modal in the opening screen.
Usage:
>>> # Calling the method:
>>> self.close_modal()
"""
soup = self.get_current_DOM()
modals = self.zindex_sort(soup.select(".tmodaldialog"), True)
if modals and self.element_exists(term=".tmodaldialog .tbrowsebutton", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body", check_error = False):
buttons = modals[0].select(".tbrowsebutton")
if buttons:
close_button = next(iter(list(filter(lambda x: x.text == self.language.close, buttons))), None)
time.sleep(0.5)
selenium_close_button = lambda: self.driver.find_element_by_xpath(xpath_soup(close_button))
if close_button:
try:
self.wait_until_to( expected_condition = "element_to_be_clickable", element = close_button , locator = By.XPATH)
self.click(selenium_close_button())
except:
pass
def close_coin_screen(self):
"""
[Internal]
Closes the coin screen.
Usage:
>>> # Calling the method:
>>> self.close_coin_screen()
"""
soup = self.get_current_DOM()
modals = self.zindex_sort(soup.select(".tmodaldialog"), True)
if modals and self.element_exists(term=self.language.coins, scrap_type=enum.ScrapType.MIXED,
optional_term=".tmodaldialog > .tpanel > .tsay", main_container="body", check_error = False):
self.SetButton(self.language.confirm)
def close_coin_screen_after_routine(self):
"""
[internal]
This method is responsible for closing the "coin screen" that opens after searching for the routine
"""
endtime = time.time() + self.config.time_out
self.wait_element_timeout(term=".workspace-container", scrap_type=enum.ScrapType.CSS_SELECTOR,
timeout = self.config.time_out, main_container="body", check_error = False)
tmodaldialog_list = []
while(time.time() < endtime and not tmodaldialog_list):
try:
soup = self.get_current_DOM()
tmodaldialog_list = soup.select('.tmodaldialog')
self.wait_element_timeout(term=self.language.coins, scrap_type=enum.ScrapType.MIXED,
optional_term=".tsay", timeout=10, main_container = "body", check_error = False)
tmodal_coin_screen = next(iter(self.web_scrap(term=self.language.coins, scrap_type=enum.ScrapType.MIXED,
optional_term=".tmodaldialog > .tpanel > .tsay", main_container="body", check_error = False, check_help = False)), None)
if tmodal_coin_screen and tmodal_coin_screen in tmodaldialog_list:
tmodaldialog_list.remove(tmodal_coin_screen.parent.parent)
self.close_coin_screen()
except Exception as e:
logger().exception(str(e))
def close_warning_screen(self):
"""
[Internal]
Closes the warning screen.
Usage:
>>> # Calling the method:
>>> self.close_warning_screen()
"""
soup = self.get_current_DOM()
modals = self.zindex_sort(soup.select(".ui-dialog"), True)
if modals and self.element_exists(term=self.language.warning, scrap_type=enum.ScrapType.MIXED,
optional_term=".ui-dialog > .ui-dialog-titlebar", main_container="body", check_error = False):
self.set_button_x()
def close_warning_screen_after_routine(self):
"""
[internal]
This method is responsible for closing the "warning screen" that opens after searching for the routine
"""
endtime = time.time() + self.config.time_out
self.wait_element_timeout(term=".workspace-container", scrap_type=enum.ScrapType.CSS_SELECTOR,
timeout = self.config.time_out, main_container="body", check_error = False)
uidialog_list = []
while(time.time() < endtime and not uidialog_list):
try:
soup = self.get_current_DOM()
uidialog_list = soup.select('.ui-dialog')
self.wait_element_timeout(term=self.language.warning, scrap_type=enum.ScrapType.MIXED,
optional_term=".ui-dialog-titlebar", timeout=10, main_container = "body", check_error = False)
tmodal_warning_screen = next(iter(self.web_scrap(term=self.language.warning, scrap_type=enum.ScrapType.MIXED,
optional_term=".ui-dialog > .ui-dialog-titlebar", main_container="body", check_error = False, check_help = False)), None)
if tmodal_warning_screen and tmodal_warning_screen in uidialog_list:
uidialog_list.remove(tmodal_warning_screen.parent.parent)
self.close_warning_screen()
except Exception as e:
logger().exception(str(e))
def close_resolution_screen(self):
"""
[Internal]
Closes the Alert of resolution screen.
Usage:
>>> # Calling the method:
>>> self.close_resolution_screen()
"""
endtime = time.time() + self.config.time_out
container = self.get_current_container()
while (time.time() < endtime and container and self.element_exists(term="img[src*='fwskin_alert_ico.png']", scrap_type=enum.ScrapType.CSS_SELECTOR)):
self.SetButton(self.language.close)
time.sleep(1)
self.wait_element_timeout(term="[name='cGetUser']", scrap_type=enum.ScrapType.CSS_SELECTOR, timeout = self.config.time_out, main_container='body')
def set_log_info(self):
"""
[Internal]
Fills the log information needed by opening the About page.
Usage:
>>> # Calling the method:
>>> self.set_log_info()
"""
self.SetLateralMenu(self.language.menu_about, save_input=False)
self.wait_element(term=".tmodaldialog", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
self.wait_until_to(expected_condition = "presence_of_all_elements_located", element = ".tmodaldialog", locator= By.CSS_SELECTOR)
soup = self.get_current_DOM()
labels = list(soup.select(".tmodaldialog .tpanel .tsay"))
release_element = next(iter(filter(lambda x: x.text.startswith("Release"), labels)), None)
database_element = next(iter(filter(lambda x: x.text.startswith("Top DataBase"), labels)), None)
lib_element = next(iter(filter(lambda x: x.text.startswith("Versão da lib"), labels)), None)
build_element = next(iter(filter(lambda x: x.text.startswith("Build"), labels)), None)
if release_element:
release = release_element.text.split(":")[1].strip()
self.log.release = release
self.log.version = release.split(".")[0]
if database_element:
self.log.database = database_element.text.split(":")[1].strip()
if build_element:
self.log.build_version = build_element.text.split(":")[1].strip()
if lib_element:
self.log.lib_version = lib_element.text.split(":")[1].strip()
self.SetButton(self.language.close)
def set_log_info_tss(self):
self.log.country = self.config.country
self.log.execution_id = self.config.execution_id
self.log.issue = self.config.issue
label_element = None
self.SetButton("Sobre")
soup = self.get_current_DOM()
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not label_element):
soup = self.get_current_DOM()
label_element = soup.find_all("label", string="Versão do TSS:")
if not label_element:
raise ValueError("SetupTss fail about screen not found")
labels = list(map(lambda x: x.text, soup.select("label")))
label = labels[labels.index("Versão do TSS:")+1]
self.log.release = next(iter(re.findall(r"[\d.]*\d+", label)), None)
self.SetButton('x')
def get_language(self):
"""
[Internal]
Gets the current language of the html.
:return: The current language of the html.
:rtype: str
Usage:
>>> # Calling the method:
>>> language = self.get_language()
"""
language = self.driver.find_element(By.CSS_SELECTOR, "html").get_attribute("lang")
return language
def Program(self, program_name):
"""
Method that sets the program in the initial menu search field.
.. note::
Only used when the Initial Program is the module Ex: SIGAFAT.
:param program_name: The program name
:type program_name: str
Usage:
>>> # Calling the method:
>>> oHelper.Program("MATA020")
"""
self.routine = 'Program'
self.config.routine = program_name
if not self.log.program:
self.log.program = program_name
self.set_program(program_name)
def set_program(self, program):
"""
[Internal]
Method that sets the program in the initial menu search field.
:param program: The program name
:type program: str
Usage:
>>> # Calling the method:
>>> self.set_program("MATA020")
"""
try:
logger().info(f"Setting program: {program}")
self.wait_element(term="[name=cGet] > input", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
ActionChains(self.driver).key_down(Keys.ESCAPE).perform()
self.wait_element(term="[name=cGet] > input", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
soup = self.get_current_DOM()
tget = next(iter(soup.select("[name=cGet]")), None)
tget_input = next(iter(tget.select("input")), None)
if tget:
tget_img = next(iter(tget.select("img")), None)
if tget_img is None or not self.element_is_displayed(tget_img):
self.log_error("Couldn't find Program field.")
s_tget = lambda : self.driver.find_element_by_xpath(xpath_soup(tget_input))
s_tget_img = lambda : self.driver.find_element_by_xpath(xpath_soup(tget_img))
self.wait_until_to( expected_condition = "element_to_be_clickable", element = tget_input, locator = By.XPATH )
self.double_click(s_tget())
self.set_element_focus(s_tget())
self.send_keys(s_tget(), Keys.HOME)
ActionChains(self.driver).key_down(Keys.SHIFT).send_keys(Keys.END).key_up(Keys.SHIFT).perform()
self.wait_until_to( expected_condition = "element_to_be_clickable", element = tget_input, locator = By.XPATH )
self.send_keys(s_tget(), program)
current_value = self.get_web_value(s_tget()).strip()
endtime = time.time() + self.config.time_out
while(time.time() < endtime and current_value != program):
self.send_keys(s_tget(), Keys.BACK_SPACE)
self.wait_until_to( expected_condition = "element_to_be_clickable", element = tget_input, locator = By.XPATH, timeout=True)
self.send_keys(s_tget(), program)
current_value = self.get_web_value(s_tget()).strip()
if current_value.strip() != program.strip():
self.log_error(f"Couldn't fill program input - current value: {current_value} - Program: {program}")
self.set_element_focus(s_tget_img())
self.wait_until_to( expected_condition = "element_to_be_clickable", element = tget_input, locator = By.XPATH )
self.wait_until_to( expected_condition = "element_to_be_clickable", element = tget_img, locator = By.XPATH )
self.send_action(self.click, s_tget_img)
self.wait_element_is_not_displayed(tget_img)
if self.config.initial_program.lower() == 'sigaadv':
self.close_warning_screen_after_routine()
self.close_coin_screen_after_routine()
except AssertionError as error:
logger().exception(f"Warning set program raise AssertionError: {str(error)}")
raise error
except Exception as e:
self.log_error(str(e))
def standard_search_field(self, term, name_attr=False,send_key=False):
"""
[Internal]
Do the standard query(F3)
this method
1.Search the field
2.Search icon "lookup"
3.Click()
:param term: The term that must be searched.
:type term: str
:param name_attr: If true searchs element by name.
:type name_attr: bool
:param send_key: Try open standard search field send key F3 (no click).
:type send_key: bool
Usage:
>>> # To search using a label name:
>>> self.standard_search_field(name_label)
>>> #------------------------------------------------------------------------
>>> # To search using the name of input:
>>> self.standard_search_field(field='A1_EST',name_attr=True)
>>> #------------------------------------------------------------------------
>>> # To search using the name of input and do action with a key:
>>> oHelper.F3(field='A1_EST',name_attr=True,send_key=True)
"""
endtime = self.config.time_out + time.time()
try:
#wait element
if name_attr:
self.wait_element(term=f"[name$='{term}']", scrap_type=enum.ScrapType.CSS_SELECTOR)
else:
self.wait_element(term)
# find element
element = self.get_field(term,name_attr).find_parent()
if not(element):
raise Exception("Couldn't find element")
logger().debug("Field successfully found")
if(send_key):
input_field = lambda: self.driver.find_element_by_xpath(xpath_soup(element))
self.set_element_focus(input_field())
container = self.get_current_container()
self.send_keys(input_field(), Keys.F3)
else:
icon = next(iter(element.select("img[src*=fwskin_icon_lookup], img[src*=btpesq_mdi]")),None)
icon_s = self.soup_to_selenium(icon)
container = self.get_current_container()
self.click(icon_s)
container_end = self.get_current_container()
if (container['id'] == container_end['id']):
input_field = lambda: self.driver.find_element_by_xpath(xpath_soup(element))
self.set_element_focus(input_field())
self.send_keys(input_field(), Keys.F3)
while( time.time() < endtime and container['id'] == container_end['id']):
container_end = self.get_current_container()
time.sleep(0.01)
if time.time() > endtime:
logger().debug("Timeout: new container not found.")
else:
logger().debug("Success")
except Exception as e:
self.log_error(str(e))
def SearchBrowse(self, term, key=None, identifier=None, index=False, column=None):
"""
Searchs a term on Protheus Webapp.
It will search using the default search key, but if a **key** is provided
it will search using the chosen key.
It will search using the first search box on the screen, but if an **identifier**
is provided, it will search on the chosen search box.
:param term: The term that must be searched.
:type term: str
:param key: The search key to be chosen on the search dropdown. - **Default:** None
:type key: str
:param identifier: The identifier of the search box. If none is provided, it defaults to the first of the screen. - **Default:** None
:type identifier: str
:param index: Whether the key is an index or not. - **Default:** False
:type index: bool
Usage:
>>> # To search using the first search box and default search key:
>>> oHelper.SearchBrowse("D MG 001")
>>> #------------------------------------------------------------------------
>>> # To search using the first search box and a chosen key:
>>> oHelper.SearchBrowse("D MG 001", key="Branch+id")
>>> #------------------------------------------------------------------------
>>> # To search using a chosen search box and the default search key:
>>> oHelper.SearchBrowse("D MG 001", identifier="Products")
>>> #------------------------------------------------------------------------
>>> # To search using a chosen search box and a chosen search key:
>>> oHelper.SearchBrowse("D MG 001", key="Branch+id", identifier="Products")
>>> #------------------------------------------------------------------------
>>> # To search using the first search box and a chosen column:
>>> oHelper.SearchBrowse("D MG 001", column="Branch+id")
>>> #------------------------------------------------------------------------
"""
self.wait_blocker()
logger().info(f"Searching: {term}")
if index and isinstance(key, int):
key -= 1
browse_elements = self.get_search_browse_elements(identifier)
if key:
self.search_browse_key(key, browse_elements, index)
elif column:
self.search_browse_column(column, browse_elements, index)
self.fill_search_browse(term, browse_elements)
def get_search_browse_elements(self, panel_name=None):
"""
[Internal]
Returns a tuple with the search browse elements in this order:
Key Dropdown, Input, Icon.
:param panel_name: The identifier of the search box. If none is provided, it defaults to the first of the screen. - **Default:** None
:type panel_name: str
:return: Tuple with the Key Dropdown, Input and Icon elements of a search box
:rtype: Tuple of Beautiful Soup objects.
Usage:
>>> # Calling the method:
>>> search_elements = self.get_search_browse_elements("Products")
"""
success = False
container = None
elements_soup = None
self.wait_element_timeout(term="[style*='fwskin_seekbar_ico']", scrap_type=enum.ScrapType.CSS_SELECTOR, timeout = self.config.time_out)
endtime = time.time() + self.config.time_out
while (time.time() < endtime and not success):
soup = self.get_current_DOM()
search_index = self.get_panel_name_index(panel_name) if panel_name else 0
containers = self.zindex_sort(soup.select(".tmodaldialog"), reverse=True)
container = next(iter(containers), None)
if container:
elements_soup = container.select("[style*='fwskin_seekbar_ico']")
if elements_soup:
if elements_soup and len(elements_soup) -1 >= search_index:
browse_div = elements_soup[search_index].find_parent().find_parent()
success = True
if not elements_soup:
self.log_error("Couldn't find search browse.")
if not container:
self.log_error("Couldn't find container of element.")
if not success:
self.log_error("Get search browse elements couldn't find browser div")
browse_tget = browse_div.select(".tget")[0]
browse_key = browse_div.select(".tbutton button")[0]
browse_input = browse_tget.select("input")[0]
browse_icon = browse_tget.select("img")[0]
return (browse_key, browse_input, browse_icon)
def search_browse_key(self, search_key, search_elements, index=False):
"""
[Internal]
Chooses the search key to be used during the search.
:param search_key: The search key to be chosen on the search dropdown
:type search_key: str
:param search_elements: Tuple of Search elements
:type search_elements: Tuple of Beautiful Soup objects
:param index: Whether the key is an index or not.
:type index: bool
Usage:
>>> #Preparing the tuple:
>>> search_elements = self.get_search_browse_elements("Products")
>>> # Calling the method:
>>> self.search_browse_key("Branch+Id", search_elements)
"""
success = False
if index and not isinstance(search_key, int):
self.log_error("If index parameter is True, key must be a number!")
sel_browse_key = lambda: self.driver.find_element_by_xpath(xpath_soup(search_elements[0]))
self.wait_element(term="[style*='fwskin_seekbar_ico']", scrap_type=enum.ScrapType.CSS_SELECTOR)
self.wait_until_to( expected_condition = "element_to_be_clickable", element = search_elements[0], locator = By.XPATH)
self.set_element_focus(sel_browse_key())
self.click(sel_browse_key())
if self.driver.execute_script("return app.VERSION").split('-')[0] >= "4.6.4":
self.driver.switch_to.default_content()
content = self.driver.page_source
soup = BeautifulSoup(content,"html.parser")
else:
soup = self.get_current_DOM()
if not index:
search_key = re.sub(r"\.+$", '', search_key.strip()).lower()
tradiobuttonitens = soup.select(".tradiobuttonitem input")
tradiobuttonitens_ends_dots = list(filter(lambda x: re.search(r"\.\.$", x.next.text), tradiobuttonitens))
tradiobuttonitens_not_ends_dots = list(filter(lambda x: not re.search(r"\.\.$", x.next.text), tradiobuttonitens))
if tradiobuttonitens_not_ends_dots:
radio = next(iter(list(filter(lambda x: search_key in re.sub(r"\.+$", '', x.next.text.strip()).lower() , tradiobuttonitens_not_ends_dots))), None)
if radio:
self.wait_until_to( expected_condition = "element_to_be_clickable", element = radio, locator = By.XPATH )
self.click(self.soup_to_selenium(radio))
success = True
if tradiobuttonitens_ends_dots and not success and self.config.initial_program.lower() != "sigaadv":
for element in tradiobuttonitens_ends_dots:
self.wait_until_to( expected_condition = "element_to_be_clickable", element = element, locator = By.XPATH )
selenium_input = lambda : self.soup_to_selenium(element)
self.click(selenium_input())
time.sleep(1)
try_get_tooltip = 0
while (not success and try_get_tooltip < 3):
success = self.check_element_tooltip(element, search_key, contains=True)
try_get_tooltip += 1
if success:
break
elif self.driver.execute_script("return app.VERSION").split('-')[0] >= "4.6.4":
self.driver.switch_to.default_content()
content = self.driver.page_source
soup = BeautifulSoup(content,"html.parser")
else:
pass
if tradiobuttonitens_ends_dots and not success and self.config.initial_program.lower() == "sigaadv":
for element in tradiobuttonitens_ends_dots:
old_value = self.search_browse_key_input_value(search_elements[1])
if tradiobuttonitens.index(element) == 0:
self.wait_until_to( expected_condition = "element_to_be_clickable", element = tradiobuttonitens_ends_dots[1], locator = By.XPATH )
self.click(self.soup_to_selenium(tradiobuttonitens_ends_dots[1]))
while(old_value == self.search_browse_key_input_value(search_elements[1])):
time.sleep(0.1)
old_value = self.search_browse_key_input_value(search_elements[1])
if not self.driver.find_elements_by_css_selector(".tradiobuttonitem input"):
self.get_current_DOM()
self.set_element_focus(sel_browse_key())
self.click(sel_browse_key())
self.driver.switch_to.default_content()
self.wait_until_to( expected_condition = "element_to_be_clickable", element = element, locator = By.XPATH )
self.click(self.soup_to_selenium(element))
while(old_value == self.search_browse_key_input_value(search_elements[1])):
time.sleep(0.1)
success = search_key.lower().strip() in self.search_browse_key_input_value(search_elements[1]).strip().lower()
if success:
break
else:
pass
if not success:
self.log_error(f"Couldn't search the key: {search_key} on screen.")
else:
tradiobuttonitens = soup.select(".tradiobuttonitem input")
if len(tradiobuttonitens) < search_key + 1:
self.log_error("Key index out of range.")
trb_input = tradiobuttonitens[search_key]
sel_input = lambda: self.driver.find_element_by_xpath(xpath_soup(trb_input))
self.wait_until_to( expected_condition = "element_to_be_clickable", element = trb_input, locator = By.XPATH )
self.click(sel_input())
def search_browse_column(self, search_column, search_elements, index=False):
"""
[Internal]
Chooses the search key to be used during the search.
:param search_column: The search Column to be chosen on the search dropdown
:type search_column: str
:param search_elements: Tuple of Search elements
:type search_elements: Tuple of Beautiful Soup objects
:param index: Whether the key is an index or not.
:type index: bool
Usage:
>>> #Preparing the tuple:
>>> search_elements = self.get_search_browse_elements("Products")
>>> # Calling the method:
>>> self.search_browse_key("Filial*", search_elements)
"""
if index and not isinstance(search_column, int):
self.log_error("If index parameter is True, column must be a number!")
sel_browse_column = lambda: self.driver.find_element_by_xpath(xpath_soup(search_elements[0]))
self.wait_element(term="[style*='fwskin_seekbar_ico']", scrap_type=enum.ScrapType.CSS_SELECTOR)
self.wait_until_to( expected_condition = "element_to_be_clickable", element = search_elements[0], locator = By.XPATH)
self.set_element_focus(sel_browse_column())
self.click(sel_browse_column())
if self.driver.execute_script("return app.VERSION").split('-')[0] >= "4.6.4":
self.tmenu_out_iframe = True
self.wait_element_timeout(".tmenupopup.activationOwner", scrap_type=enum.ScrapType.CSS_SELECTOR, timeout=5.0, step=0.1, presence=True, position=0)
tmenupopup = next(iter(self.web_scrap(".tmenupopup.activationOwner", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container = "body")), None)
if not tmenupopup:
if self.driver.execute_script("return app.VERSION").split('-')[0] >= "4.6.4":
self.tmenu_out_iframe = False
self.log_error("SearchBrowse - Column: couldn't find the new menupopup")
self.click(self.soup_to_selenium(tmenupopup.select('a')[1]))
spans = tmenupopup.select("span")
if ',' in search_column:
search_column_itens = search_column.split(',')
filtered_column_itens = list(map(lambda x: x.strip(), search_column_itens))
for item in filtered_column_itens:
span = next(iter(list(filter(lambda x: x.text.lower().strip() == item.lower(),spans))), None)
if not span:
span = next(iter(list(filter(lambda x: x.text.lower().replace(" ","") == search_column.lower().replace(" ","") ,spans))), None)
self.click(self.soup_to_selenium(span))
else:
span = next(iter(list(filter(lambda x: x.text.lower().strip() == search_column.lower().strip() ,spans))), None)
if not span:
span = next(iter(list(filter(lambda x: x.text.lower().replace(" ","") == search_column.lower().replace(" ","") ,spans))), None)
self.click(self.soup_to_selenium(span))
if self.driver.execute_script("return app.VERSION").split('-')[0] >= "4.6.4":
self.tmenu_out_iframe = False
def fill_search_browse(self, term, search_elements):
"""
[Internal]
Fills search input method and presses the search button.
:param term: The term to be searched
:type term: str
:param search_elements: Tuple of Search elements
:type search_elements: Tuple of Beautiful Soup objects
Usage:
>>> #Preparing the tuple:
>>> search_elements = self.get_search_browse_elements("Products")
>>> # Calling the method:
>>> self.fill_search_browse("D MG 01", search_elements)
"""
self.wait_blocker()
endtime = time.time() + self.config.time_out
sel_browse_input = lambda: self.driver.find_element_by_xpath(xpath_soup(search_elements[1]))
sel_browse_icon = lambda: self.driver.find_element_by_xpath(xpath_soup(search_elements[2]))
current_value = self.get_element_value(sel_browse_input())
while (time.time() < endtime and current_value.rstrip() != term.strip()):
try:
self.wait_until_to( expected_condition = "element_to_be_clickable", element = search_elements[2], locator = By.XPATH, timeout=True)
self.click(sel_browse_input())
self.set_element_focus(sel_browse_input())
self.send_keys(sel_browse_input(), Keys.DELETE)
self.wait_until_to( expected_condition = "element_to_be_clickable", element = search_elements[1], locator = By.XPATH, timeout=True)
sel_browse_input().clear()
self.set_element_focus(sel_browse_input())
self.wait_until_to( expected_condition = "element_to_be_clickable", element = search_elements[1], locator = By.XPATH, timeout=True)
sel_browse_input().send_keys(term.strip())
current_value = self.get_element_value(sel_browse_input())
except StaleElementReferenceException:
self.get_search_browse_elements()
except:
pass
if current_value.rstrip() != term.strip():
self.log_error(f"Couldn't search f{search_elements} current value is {current_value.rstrip()}")
self.send_keys(sel_browse_input(), Keys.ENTER)
self.wait_blocker()
self.double_click(sel_browse_icon())
return True
def search_browse_key_input_value(self, browse_input ):
"""
[Internal]
Get the search browse input value
"""
self.get_current_DOM()
input_value = self.soup_to_selenium(browse_input).get_attribute('value')
self.driver.switch_to.default_content()
return input_value
def wait_blocker(self):
"""
[Internal]
Wait blocker disappear
"""
logger().debug("Waiting blocker to continue...")
soup = None
result = True
endtime = time.time() + 300
while(time.time() < endtime and result):
blocker_container = None
blocker = None
soup = self.get_current_DOM()
blocker_container = self.blocker_containers(soup)
if blocker_container:
blocker = soup.select('.ajax-blocker') if len(soup.select('.ajax-blocker')) > 0 else \
'blocked' in blocker_container.attrs['class'] if blocker_container and hasattr(blocker_container, 'attrs') else None
if blocker:
result = True
else:
return False
return result
def blocker_containers(self, soup):
"""
Return The container index by z-index and filter if it is displayed
:param soup: soup object
:return: The container index by z-index and filter if it is displayed
"""
try:
containers = self.zindex_sort(soup.select(self.containers_selectors["BlockerContainers"]), True)
if containers:
containers_filtered = list(filter(lambda x: self.element_is_displayed(x), containers))
if containers_filtered:
return next(iter(containers_filtered), None)
else:
return None
else:
return None
except AttributeError as e:
logger().exception(f"Warning: wait_blocker > blocker_containers Exeception (AttributeError)\n {str(e)}")
except Exception as e:
logger().exception(f"Warning: wait_blocker > blocker_containers Exeception {str(e)}")
def get_panel_name_index(self, panel_name):
"""
[Internal]
Gets the index of search box element based on the panel name associated with it.
:param panel_name:
:type panel_name:
:return: The index of the search box starting with 0
:rtype: int
Usage:
>>> # Calling the method:
>>> index = self.get_panel_name_index("Products")
"""
soup = self.get_current_DOM()
panels = soup.select(".tmodaldialog > .tpanelcss > .tpanelcss")
tsays = list(map(lambda x: x.select(".tsay"), panels))
label = next(iter(list(filter(lambda x: x.text.lower() == panel_name.lower(), tsays)), None))
return tsays.index(label)
def search_element_position(self, field, position=1, input_field=True, direction=None):
"""
[Internal]
Usage:
>>> # Calling the method
>>> self.search_element_position(field)
"""
endtime = (time.time() + self.config.time_out)
label = None
elem = []
term=".tget, .tcombobox, .tmultiget"
position-=1
if not input_field:
term=".tsay"
try:
while( time.time() < endtime and not label ):
container = self.get_current_container()
labels = container.select("label")
labels_displayed = list(filter(lambda x: self.element_is_displayed(x) ,labels))
labels_list = list(filter(lambda x: re.search(r"^{}([^a-zA-Z0-9]+)?$".format(re.escape(field)),x.text) ,labels_displayed))
labels_list_filtered = list(filter(lambda x: 'th' not in self.element_name(x.parent.parent) , labels_list))
if labels_list_filtered and len(labels_list_filtered) -1 >= position:
label = labels_list_filtered[position]
if not label:
self.log_error(f"Label: '{field}'' wasn't found.")
self.wait_until_to( expected_condition = "element_to_be_clickable", element = label, locator = By.XPATH )
container_size = self.get_element_size(container['id'])
# The safe values add to postion of element
width_safe, height_safe = self.width_height(container_size)
label_s = lambda:self.soup_to_selenium(label)
xy_label = self.driver.execute_script('return arguments[0].getPosition()', label_s())
list_in_range = self.web_scrap(term=term, scrap_type=enum.ScrapType.CSS_SELECTOR)
list_in_range = list(filter(lambda x: self.element_is_displayed(x) and 'readonly' not in self.soup_to_selenium(x).get_attribute("class") or 'readonly focus' in self.soup_to_selenium(x).get_attribute("class"), list_in_range))
if not input_field:
list_in_range = list(filter(lambda x: field.strip().lower() != x.text.strip().lower(), list_in_range))
position_list = list(map(lambda x:(x[0], self.get_position_from_bs_element(x[1])), enumerate(list_in_range)))
position_list = self.filter_by_direction(xy_label, width_safe, height_safe, position_list, direction)
distance = self.get_distance_by_direction(xy_label, position_list, direction)
if distance:
elem = min(distance, key = lambda x: x[1])
elem = list_in_range[elem[0]]
if not elem:
self.log_error(f"Label '{field}' wasn't found")
return elem
except AssertionError as error:
raise error
except Exception as error:
logger().exception(str(error))
self.log_error(str(error))
def width_height(self, container_size):
if not self.range_multiplier:
width_safe = (container_size['width'] * 0.015)
height_safe = (container_size['height'] * 0.01)
elif self.range_multiplier == 1:
width_safe = (container_size['width'] * 0.03)
height_safe = (container_size['height'] * 0.02)
else:
width_safe = (container_size['width'] * (0.015 * self.range_multiplier))
height_safe = (container_size['height'] * (0.01 * self.range_multiplier))
return (width_safe, height_safe)
def get_position_from_bs_element(self,element):
"""
[Internal]
"""
selenium_element = self.soup_to_selenium(element)
position = self.driver.execute_script('return arguments[0].getPosition()', selenium_element)
return position
def get_distance(self,label_pos,element_pos):
"""
[internal]
"""
return sqrt((pow(element_pos['x'] - label_pos['x'], 2)) + pow(element_pos['y'] - label_pos['y'],2))
def get_element_size(self, id):
"""
Internal
Return Height/Width
"""
script = f'return document.getElementById("{id}").offsetHeight;'
height = self.driver.execute_script(script)
script = f'return document.getElementById("{id}").offsetWidth;'
width = self.driver.execute_script(script)
return {'height': height, 'width':width}
def get_distance_x(self, x_label, x_element):
"""
[Internal]
"""
return (x_element['x'] - x_label['x'])
def get_distance_y(self, y_label, y_element):
"""
[Internal]
"""
return (y_element['y'] - y_label['y'])
def filter_by_direction(self, xy_label, width_safe, height_safe, position_list, direction):
"""
[Internal]
"""
if not direction:
return list(filter(lambda xy_elem: (
xy_elem[1]['y'] + width_safe >= xy_label['y'] and xy_elem[1]['x'] + height_safe >= xy_label['x']),
position_list))
elif direction.lower() == 'right':
return list(filter(
lambda xy_elem: (xy_elem[1]['x'] > xy_label['x']) and (xy_elem[1]['y'] >= xy_label['y'] - height_safe and xy_elem[1]['y'] <= xy_label[
'y'] + height_safe), position_list))
elif direction.lower() == 'down':
return list(filter(
lambda xy_elem: (xy_elem[1]['y'] > xy_label['y']) and (xy_elem[1]['x'] + width_safe >= xy_label['x'] and
xy_elem[1]['x'] - width_safe <= xy_label['x']), position_list))
def get_distance_by_direction(self, xy_label, position_list, direction):
if not direction:
get_distance = self.get_distance
elif direction.lower() == 'right':
get_distance = self.get_distance_x
elif direction.lower() == 'down':
get_distance = self.get_distance_y
return list(map(lambda x: (x[0], get_distance(xy_label, x[1])), position_list))
def SetValue(self, field, value, grid=False, grid_number=1, ignore_case=True, row=None, name_attr=False, position = 1, check_value=None, grid_memo_field=False, range_multiplier=None, direction=None, duplicate_fields=[]):
"""
Sets value of an input element.
.. note::
Attention on the grid use the field mask.
:param field: The field name or label to receive the value
:type field: str
:param value: The value to be inputted on the element.
:type value: str or bool
:param grid: Boolean if this is a grid field or not. - **Default:** False
:type grid: bool
:param grid_number: Grid number of which grid should be inputted when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param ignore_case: Boolean if case should be ignored or not. - **Default:** True
:type ignore_case: bool
:param check_value: Boolean ignore input check - **Default:** True
:type name_attr: bool
:param row: Row number that will be filled
:type row: int
:param name_attr: Boolean if search by Name attribute must be forced. - **Default:** False
:type name_attr: bool
:param grid_memo_field: Boolean if this is a memo grid field. - **Default:** False
:type grid_memo_field: bool
:param range_multiplier: Integer value that refers to the distance of the label from the input object. The safe value must be between 1 to 10.
:type range_multiplier: int
:param direction: Desired direction to search for the element from a label, currently accepts right and down.
:type direction: str
Usage:
>>> # Calling method to input value on a field:
>>> oHelper.SetValue("A1_COD", "000001")
>>> #-----------------------------------------
>>> # Calling method to input value on a field from a label text and looking an input field for a specific direction:
>>> oHelper.SetValue("Codigo", "000001", direction='right')
>>> #-----------------------------------------
>>> # Calling method to input value on a field that is a grid:
>>> oHelper.SetValue("Client", "000001", grid=True)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Calling method to checkbox value on a field that is a grid:
>>> oHelper.SetValue('Confirmado?', True, grid=True)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Calling method to checkbox value on a field that isn't a grid:
>>> oHelper.SetValue('', True, name_attr=True, position=1)
>>> #-----------------------------------------
>>> # Calling method to input value on a field that is on the second grid of the screen:
>>> oHelper.SetValue("Order", "000001", grid=True, grid_number=2)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Calling method to input value on a field that is a grid *Will not attempt to verify the entered value. Run only once.* :
>>> oHelper.SetValue("Order", "000001", grid=True, grid_number=2, check_value = False)
>>> oHelper.LoadGrid()
>>> # Calling method to input value in cases that have duplicate fields:
>>> oHelper.SetValue('Tipo Entrada' , '073', grid=True, grid_number=2, name_attr=True)
>>> self.oHelper.SetValue('Tipo Entrada' , '073', grid=True, grid_number=2, name_attr=True, duplicate_fields=['tipo entrada', 10])
>>> oHelper.LoadGrid()
"""
check_value = self.check_value(check_value)
if grid_memo_field:
self.grid_memo_field = True
if range_multiplier:
self.range_multiplier = range_multiplier
if grid:
self.input_grid_appender(field, value, grid_number - 1, row = row, check_value = check_value, duplicate_fields=duplicate_fields)
elif isinstance(value, bool):
self.click_check_radio_button(field, value, name_attr, position)
else:
self.input_value(field, value, ignore_case, name_attr, position, check_value, direction)
def check_value(self, check_value):
if check_value != None:
check_value = check_value
elif self.config.check_value != None:
check_value = self.config.check_value
else:
check_value = True
return check_value
def input_value(self, field, value, ignore_case=True, name_attr=False, position=1, check_value=True, direction=None):
"""
[Internal]
Sets value of an input element.
Returns True if succeeded, False if it failed.
:param field: The field name or label to receive the value
:type field: str
:param value: The value to be set on the field
:type value: str
:param ignore_case: Boolean if case should be ignored or not. - **Default:** True
:type ignore_case: bool
:param name_attr: Boolean if search by Name attribute must be forced. - **Default:** False
:type name_attr: bool
:param check_value: Boolean ignore input check - **Default:** True
:type name_attr: bool
:returns: True if succeeded, False if it failed.
:rtype: bool
Usage:
>>> # Calling the method
>>> self.input_value("A1_COD", "000001")
"""
self.wait_blocker()
field = re.sub(r"([\s\?:\*\.]+)?$", "", field).strip()
main_element = None
if name_attr:
self.wait_element(term=f"[name$='{field}']", scrap_type=enum.ScrapType.CSS_SELECTOR)
else:
self.wait_element(field)
success = False
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not success):
unmasked_value = self.remove_mask(value)
logger().info(f"Looking for element: {field}")
if field.lower() == self.language.From.lower():
element = self.get_field("cDeCond", name_attr=True, direction=direction)
elif field.lower() == self.language.To.lower():
element = self.get_field("cAteCond", name_attr=True, direction=direction)
else:
element = self.get_field(field, name_attr, position, direction=direction)
if element:
input_field = lambda : self.soup_to_selenium(element)
self.scroll_to_element(input_field())
if not element or not self.element_is_displayed(element):
continue
main_element = element
if "tmultiget" in element.attrs['class'] if self.element_name(element) == 'div' else None:
textarea = element.select("textarea")
if not textarea:
input_field = lambda : self.soup_to_selenium(element)
else:
input_field = lambda : self.soup_to_selenium(next(iter(textarea), None))
else:
input_field = lambda : self.soup_to_selenium(element)
if input_field:
valtype = "C"
main_value = unmasked_value if value != unmasked_value and self.check_mask(input_field()) else value
interface_value = self.get_web_value(input_field())
current_value = interface_value.strip()
interface_value_size = len(interface_value)
user_value_size = len(value)
if self.element_name(element) == "input":
valtype = element.attrs["valuetype"]
self.scroll_to_element(input_field())
try:
#Action for Combobox elements
if ((hasattr(element, "attrs") and "class" in element.attrs and "tcombobox" in element.attrs["class"]) or
(hasattr(element.find_parent(), "attrs") and "class" in element.find_parent().attrs and "tcombobox" in element.find_parent().attrs["class"])):
self.set_element_focus(input_field())
main_element = element.parent
self.try_element_to_be_clickable(main_element)
self.select_combo(element, main_value)
current_value = self.get_web_value(input_field()).strip()
#Action for Input elements
else:
self.wait_until_to( expected_condition = "visibility_of", element = input_field, timeout=True)
self.wait_until_to( expected_condition = "element_to_be_clickable", element = element, locator = By.XPATH, timeout=True)
self.double_click(input_field())
#if Character input
if valtype != 'N':
self.set_element_focus(input_field())
self.wait_until_to( expected_condition = "element_to_be_clickable", element = element, locator = By.XPATH, timeout=True)
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys(Keys.HOME).key_up(Keys.CONTROL).perform()
ActionChains(self.driver).key_down(Keys.CONTROL).key_down(Keys.SHIFT).send_keys(
Keys.END).key_up(Keys.CONTROL).key_up(Keys.SHIFT).perform()
time.sleep(0.1)
if main_value == '':
ActionChains(self.driver).move_to_element(input_field()).send_keys_to_element(input_field(), " ").perform()
else:
self.wait_blocker()
self.wait_until_to( expected_condition = "element_to_be_clickable", element = element, locator = By.XPATH, timeout=True)
ActionChains(self.driver).move_to_element(input_field()).send_keys_to_element(input_field(), main_value).perform()
#if Number input
else:
tries = 0
try_counter = 1
while(tries < 3):
self.set_element_focus(input_field())
self.wait_until_to( expected_condition = "element_to_be_clickable", element = element, locator = By.XPATH, timeout=True)
self.try_send_keys(input_field, main_value, try_counter)
current_number_value = self.get_web_value(input_field())
if self.remove_mask(current_number_value).strip() == main_value:
break
tries+=1
try_counter+=1
if user_value_size < interface_value_size:
self.send_keys(input_field(), Keys.ENTER)
if not check_value:
return
if self.check_mask(input_field()):
current_value = self.remove_mask(self.get_web_value(input_field()).strip())
if re.findall(r"\s", current_value):
current_value = re.sub(r"\s", "", current_value)
else:
current_value = self.get_web_value(input_field()).strip()
if current_value != "" and current_value.encode('latin-1', 'ignore'):
logger().info(f"Current field value: {current_value}")
if ((hasattr(element, "attrs") and "class" in element.attrs and "tcombobox" in element.attrs["class"]) or
(hasattr(element.find_parent(), "attrs") and "class" in element.find_parent().attrs and "tcombobox" in element.find_parent().attrs["class"])):
current_value = current_value[0:len(str(value))]
if re.match(r"^●+$", current_value):
success = len(current_value) == len(str(value).strip())
elif ignore_case:
success = current_value.lower().strip() == main_value.lower().strip()
else:
success = current_value == main_value
except:
continue
if "disabled" in element.attrs:
self.log_error(self.create_message(['', field],enum.MessageType.DISABLED))
if not success:
self.log_error(f"Could not input value {value} in field {field}")
else:
self.wait_until_to( expected_condition = "element_to_be_clickable", element = main_element, locator = By.XPATH )
def get_field(self, field, name_attr=False, position=1, input_field=True, direction=None):
"""
[Internal]
This method decides if field would be found by either it's name or by it's label.
Internal method of input_value and CheckResult.
:param field: Field name or field label to be searched
:type field: str
:param name_attr: Boolean if search by Name attribute must be forced. - **Default:** False
:type name_attr: bool
:return: Field element
:rtype: Beautiful Soup object
Usage:
>>> # Calling the method:
>>> element1 = self.get_field("A1_COD")
>>> element2 = self.get_field("Product")
"""
endtime = time.time() + self.config.time_out
element = None
if re.match(r"\w+(_)", field) or name_attr:
position -= 1
while(time.time() < endtime and element is None):
if re.match(r"\w+(_)", field) or name_attr:
element_list = self.web_scrap(f"[name$='{field}']", scrap_type=enum.ScrapType.CSS_SELECTOR)
if element_list and len(element_list) -1 >= position:
element = element_list[position]
else:
element = next(iter(self.web_scrap(field, scrap_type=enum.ScrapType.TEXT, label=True, input_field=input_field, direction=direction, position=position)), None)
if element:
element_children = next((x for x in element.contents if self.element_name(x) in ["input", "select"]), None)
return element_children if element_children is not None else element
else:
self.log_error("Element wasn't found.")
def get_web_value(self, element):
"""
[Internal]
Gets the current value or text of element.
:param element: The element to get value or text from
:type element: Selenium object
:return: The value or text of passed element
:rtype: str
Usage:
>>> # Calling the method:
>>> current_value = self.get_web_value(selenium_field_element)
"""
if element.tag_name == "div":
element_children = element.find_element(By.CSS_SELECTOR, "div > * ")
if element_children is not None:
element = element_children
if element.tag_name == "label":
web_value = element.get_attribute("text")
if not web_value:
web_value = element.text.strip()
elif element.tag_name == "select":
current_select = 0 if element.get_attribute('value') == '' else int(element.get_attribute('value'))
selected_element = element.find_elements(By.CSS_SELECTOR, "option")[current_select]
web_value = selected_element.text
else:
web_value = element.get_attribute("value")
return web_value
def CheckResult(self, field, user_value, grid=False, line=1, grid_number=1, name_attr=False, input_field=True, direction=None, grid_memo_field=False):
"""
Checks if a field has the value the user expects.
:param field: The field or label of a field that must be checked.
:type field: str
:param user_value: The value that the field is expected to contain.
:type user_value: str
:param grid: Boolean if this is a grid field or not. - **Default:** False
:type grid: bool
:param line: Grid line that contains the column field to be checked.- **Default:** 1
:type line: int
:param grid_number: Grid number of which grid should be checked when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param name_attr: Boolean if search by Name attribute must be forced. - **Default:** False
:type name_attr: bool
:param input_field: False if the desired field is not an input type
:type bool
:param direction: Desired direction to search for the element, currently accepts right and down
:type str
:param grid_memo_field: Boolean if this is a memo grid field. - **Default:** False
:type grid_memo_field: bool
Usage:
>>> # Calling method to check a value of a field:
>>> oHelper.CheckResult("A1_COD", "000001")
>>> #-----------------------------------------
>>> # Calling method to check a field that is on the second line of a grid:
>>> oHelper.CheckResult("Client", "000001", grid=True, line=2)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Calling method to check a field that is on the second grid of the screen:
>>> oHelper.CheckResult("Order", "000001", grid=True, line=1, grid_number=2)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Call method to check a field value that is not an input field and is on the right:
>>> oHelper.CheckResult("Saldo Titulo", "100.000,00", input_field=False, direction='right')
>>> oHelper.LoadGrid()
"""
self.wait_blocker()
if grid_memo_field:
self.grid_memo_field = True
if grid:
self.check_grid_appender(line - 1, field, user_value, grid_number - 1)
elif isinstance(user_value, bool):
current_value = self.result_checkbox(field, user_value)
self.log_result(field, user_value, current_value)
else:
field = re.sub(r"(\:*)(\?*)", "", field).strip()
if name_attr:
self.wait_element(term=f"[name$='{field}']", scrap_type=enum.ScrapType.CSS_SELECTOR)
else:
self.wait_element(field)
element = self.get_field(field, name_attr=name_attr, input_field=input_field, direction=direction)
if not element:
self.log_error(f"Couldn't find element: {field}")
field_element = lambda: self.driver.find_element_by_xpath(xpath_soup(element))
self.set_element_focus(field_element())
self.scroll_to_element(field_element())
endtime = time.time() + self.config.time_out
current_value = ''
while(time.time() < endtime and not current_value):
current_value = self.get_web_value(field_element()).strip()
logger().info(f"Value for Field {field} is: {current_value}")
#Remove mask if present.
if self.check_mask(field_element()):
current_value = self.remove_mask(current_value)
user_value = self.remove_mask(user_value)
#If user value is string, Slice string to match user_value's length
if type(current_value) is str:
current_value = current_value[0:len(str(user_value))]
self.log_result(field, user_value, current_value)
def log_result(self, field, user_value, captured_value):
"""
[Internal]
Logs the result of comparison between user value and captured value.
:param field: The field whose values would be compared
:type field: str
:param user_value: The value the user expects
:type user_value: str
:param captured_value: The value that was captured on the screen
:type captured_value: str
Usage:
>>> # Calling the method:
>>> self.log_result("A1_COD", "000001", "000001")
"""
txtaux = ""
message = ""
if user_value != captured_value:
message = self.create_message([txtaux, field, user_value, captured_value], enum.MessageType.INCORRECT)
self.compare_field_values(field, user_value, captured_value, message)
def GetValue(self, field, grid=False, line=1, grid_number=1, grid_memo_field=False):
"""
Gets the current value or text of element.
:param field: The field or label of a field that must be checked.
:type field: str
:param grid: Boolean if this is a grid field or not. - **Default:** False
:type grid: bool
:param line: Grid line that contains the column field to be checked.- **Default:** 1
:type line: int
:param grid_number: Grid number of which grid should be checked when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param grid_memo_field: Boolean if this is a memo grid field. - **Default:** False
:type grid_memo_field: bool
Usage:
>>> # Calling the method:
>>> current_value = oHelper.GetValue("A1_COD")
"""
endtime = time.time() + self.config.time_out
element = None
if grid_memo_field:
self.grid_memo_field = True
if not grid:
while ( (time.time() < endtime) and (not element) and (not hasattr(element, "name")) and (not hasattr(element, "parent"))):
element = self.get_field(field)
if ( hasattr(element, "name") and hasattr(element, "parent") ):
selenium_element = lambda: self.driver.find_element_by_xpath(xpath_soup(element))
value = self.get_web_value(selenium_element())
else:
field_array = [line-1, field, "", grid_number-1]
x3_dictionaries = self.create_x3_tuple()
value = self.check_grid(field_array, x3_dictionaries, get_value=True)
if ( not value ):
self.log_error("GetValue element is none")
return value
def restart(self):
"""
[Internal]
Restarts the Protheus Webapp and fills the initial screens.
Usage:
>>> # Calling the method:
>>> self.restart()
"""
webdriver_exception = None
try:
if self.restart_counter == 2:
logger().info("Closing the Browser")
self.driver.close()
logger().info("Starting the Browser")
self.Start()
else:
logger().info("Refreshing the Browser")
self.driver_refresh()
except WebDriverException as e:
webdriver_exception = e
if webdriver_exception:
message = f"Wasn't possible execute Start() method: {next(iter(webdriver_exception.msg.split(':')), None)}"
self.assertTrue(False, message)
if self.config.coverage and self.config.initial_program != '' and self.restart_counter < 3:
self.open_url_coverage(url=self.config.url, initial_program=self.config.initial_program, environment=self.config.environment)
try:
self.driver.switch_to_alert().accept()
except:
pass
if self.config.initial_program != '' and self.restart_counter < 3:
if not self.config.skip_environment and not self.config.coverage:
self.program_screen(self.config.initial_program)
self.user_screen()
self.environment_screen()
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not self.element_exists(term=".tmenu", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")):
self.close_warning_screen()
self.close_modal()
if self.config.routine:
if self.routine == 'SetLateralMenu':
self.SetLateralMenu(self.config.routine, save_input=False)
elif self.routine == 'Program':
self.set_program(self.config.routine)
def driver_refresh(self):
"""
[Internal]
Refresh the driver.
Usage:
>>> # Calling the method:
>>> self.driver_refresh()
"""
if self.config.smart_test or self.config.debug_log:
logger().info("Driver Refresh")
self.driver.refresh()
self.wait_blocker()
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys(Keys.F5).key_up(Keys.CONTROL).perform()
def Finish(self):
"""
Exit the protheus Webapp.
Usage:
>>> # Calling the method.
>>> oHelper.Finish()
"""
element = None
text_cover = None
string = "Aguarde... Coletando informacoes de cobertura de codigo."
timeout = 900
click_counter = 1
if self.config.coverage:
endtime = time.time() + timeout
while((time.time() < endtime) and (not element or not text_cover)):
ActionChains(self.driver).key_down(Keys.CONTROL).perform()
ActionChains(self.driver).key_down('q').perform()
ActionChains(self.driver).key_up(Keys.CONTROL).perform()
element = self.wait_element_timeout(term=self.language.finish, scrap_type=enum.ScrapType.MIXED,
optional_term=".tsay", timeout=5, step=1, main_container="body", check_error = False)
if element:
if self.click_button_finish(click_counter):
self.WaitShow(string)
text_cover = self.search_text(selector=".tsay", text=string)
if text_cover:
logger().info(string)
timeout = endtime - time.time()
if timeout > 0:
self.wait_element_timeout(term=string, scrap_type=enum.ScrapType.MIXED,
optional_term=".tsay", timeout=timeout, step=0.1, main_container="body", check_error = False)
click_counter += 1
if click_counter > 3:
click_counter = 1
else:
endtime = time.time() + self.config.time_out
while( time.time() < endtime and not element ):
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys('q').key_up(Keys.CONTROL).perform()
soup = self.get_current_DOM()
element = soup.find_all(text=self.language.finish)
self.wait_element_timeout(term=self.language.finish, scrap_type=enum.ScrapType.MIXED, optional_term=".tsay", timeout=5, step=0.5, main_container="body")
if not element:
logger().warning("Warning method finish use driver.refresh. element not found")
self.driver_refresh() if not element else self.SetButton(self.language.finish)
def click_button_finish(self, click_counter=None):
"""
[internal]
This method is reponsible to click on button finish
"""
button = None
listButtons = []
try:
soup = self.get_current_DOM()
listButtons = soup.select('button')
button = next(iter(list(filter(lambda x: x.text == self.language.finish ,listButtons ))), None)
if button:
button_element = lambda : self.soup_to_selenium(button)
self.scroll_to_element(button_element())
self.set_element_focus(button_element())
if self.click(button_element(), click_type=enum.ClickType(click_counter)):
return True
else:
return False
except Exception as e:
logger().exception(f"Warning Finish method exception - {str(e)}")
return False
def LogOff(self):
"""
Logs out of the Protheus Webapp.
Usage:
>>> # Calling the method.
>>> oHelper.LogOff()
"""
element = None
text_cover = None
string = "Aguarde... Coletando informacoes de cobertura de codigo."
timeout = 900
click_counter = 1
if self.config.coverage:
endtime = time.time() + timeout
while((time.time() < endtime) and (not element or not text_cover)):
ActionChains(self.driver).key_down(Keys.CONTROL).perform()
ActionChains(self.driver).key_down('q').perform()
ActionChains(self.driver).key_up(Keys.CONTROL).perform()
element = self.wait_element_timeout(term=self.language.logOff, scrap_type=enum.ScrapType.MIXED,
optional_term=".tsay", timeout=5, step=1, main_container="body", check_error = False)
if element:
if self.click_button_logoff(click_counter):
text_cover = self.search_text(selector=".tsay", text=string)
if text_cover:
logger().info(string)
timeout = endtime - time.time()
if timeout > 0:
self.wait_element_timeout(term=string, scrap_type=enum.ScrapType.MIXED,
optional_term=".tsay", timeout=timeout, step=0.1, main_container="body", check_error = False)
click_counter += 1
if click_counter > 3:
click_counter = 1
else:
endtime = time.time() + self.config.time_out
while( time.time() < endtime and not element ):
ActionChains(self.driver).key_down(Keys.CONTROL).send_keys('q').key_up(Keys.CONTROL).perform()
soup = self.get_current_DOM()
element = soup.find_all(text=self.language.logOff)
self.wait_element_timeout(term=self.language.logOff, scrap_type=enum.ScrapType.MIXED, optional_term=".tsay", timeout=5, step=0.5, main_container="body")
if not element:
logger().warning("Warning method finish use driver.refresh. element not found")
self.driver_refresh() if not element else self.SetButton(self.language.logOff)
def click_button_logoff(self, click_counter=None):
"""
[internal]
This method is reponsible to click on button finish
"""
button = None
listButtons = []
try:
soup = self.get_current_DOM()
listButtons = soup.select('button')
button = next(iter(list(filter(lambda x: x.text == self.language.logOff ,listButtons ))), None)
if button:
button_element = lambda : self.soup_to_selenium(button)
self.scroll_to_element(button_element())
self.set_element_focus(button_element())
if self.click(button_element(), click_type=enum.ClickType(click_counter)):
return True
else:
return False
except Exception as e:
logger().exception(f"Warning Finish method exception - {str(e)}")
return False
def web_scrap(self, term, scrap_type=enum.ScrapType.TEXT, optional_term=None, label=False, main_container=None, check_error=True, check_help=True, input_field=True, direction=None, position=1):
"""
[Internal]
Returns a BeautifulSoup object list based on the search parameters.
Does not support ScrapType.XPATH as scrap_type parameter value.
:param term: The first search term. A text or a selector
:type term: str
:param scrap_type: The type of webscraping. - **Default:** enum.ScrapType.TEXT
:type scrap_type: enum.ScrapType.
:param optional_term: The second search term. A selector used in MIXED webscraping. - **Default:** None
:type optional_term: str
:param label: If the search is based on a label near the element. - **Default:** False
:type label: bool
:param main_container: The selector of a container element that has all other elements. - **Default:** None
:type main_container: str
:param position: Position which element is located. - **Default:** 1
:type position: int
:return: List of BeautifulSoup4 elements based on search parameters.
:rtype: List of BeautifulSoup4 objects
Usage:
>>> #All buttons
>>> buttons = self.web_scrap(term="button", scrap_type=enum.ScrapType.CSS_SELECTOR)
>>> #----------------#
>>> #Elements that contain the text "Example"
>>> example_elements = self.web_scrap(term="Example")
>>> #----------------#
>>> #Elements with class "my_class" and text "my_text"
>>> elements = self.web_scrap(term="my_text", scrap_type=ScrapType.MIXED, optional_term=".my_class")
"""
try:
endtime = time.time() + self.config.time_out
container = None
while(time.time() < endtime and container is None):
soup = self.get_current_DOM()
if check_error:
self.search_for_errors(check_help)
if self.config.log_file:
with open(f"{term + str(scrap_type) + str(optional_term) + str(label) + str(main_container) + str(random.randint(1, 101)) }.txt", "w") as text_file:
text_file.write(f" HTML CONTENT: {str(soup)}")
container_selector = self.base_container
if (main_container is not None):
container_selector = main_container
containers = self.zindex_sort(soup.select(container_selector), reverse=True)
if self.base_container in container_selector:
container = self.containers_filter(containers)
container = next(iter(containers), None) if isinstance(containers, list) else container
if container is None:
raise Exception(f"Web Scrap couldn't find container - term: {term}")
if (scrap_type == enum.ScrapType.TEXT):
if label:
return self.find_label_element(term, container, input_field=input_field, direction=direction, position=position)
elif not re.match(r"\w+(_)", term):
return self.filter_label_element(term, container) if self.filter_label_element(term, container) else []
else:
return list(filter(lambda x: term.lower() in x.text.lower(), container.select("div > *")))
elif (scrap_type == enum.ScrapType.CSS_SELECTOR):
return container.select(term)
elif (scrap_type == enum.ScrapType.MIXED and optional_term is not None):
return list(filter(lambda x: term.lower() in x.text.lower(), container.select(optional_term)))
elif (scrap_type == enum.ScrapType.SCRIPT):
script_result = self.driver.execute_script(term)
return script_result if isinstance(script_result, list) else []
else:
return []
except AssertionError:
raise
except Exception as e:
self.log_error(str(e))
def search_for_errors(self, check_help=True):
"""
[Internal]
Searches for errors and alerts in the screen.
Usage:
>>> # Calling the method:
>>> self.search_for_errors()
"""
endtime = time.time() + self.config.time_out
soup = None
top_layer = None
while(time.time() < endtime and not soup):
soup = self.get_current_DOM()
try:
if not soup:
self.log_error("Search for erros couldn't find DOM")
message = ""
top_layer = next(iter(self.zindex_sort(soup.select(".tmodaldialog, .ui-dialog"), True)), None)
except AttributeError as e:
self.log_error(f"Search for erros couldn't find DOM\n Exception: {str(e)}")
if not top_layer:
return None
icon_alert = next(iter(top_layer.select("img[src*='fwskin_info_ico.png']")), None)
icon_error_log = next(iter(top_layer.select("img[src*='openclosing.png']")), None)
if (not icon_alert or not check_help) and not icon_error_log:
return None
if icon_alert:
label = reduce(lambda x,y: f"{x} {y}", map(lambda x: x.text.strip(), top_layer.select(".tsay label")))
if self.language.messages.error_msg_required in label:
message = self.language.messages.error_msg_required
elif "help:" in label.lower() and self.language.problem in label:
message = label
else:
return None
elif icon_error_log:
label = reduce(lambda x,y: f"{x} {y}", map(lambda x: x.text.strip(), top_layer.select(".tsay label")))
textarea = next(iter(top_layer.select("textarea")), None)
textarea_value = self.driver.execute_script(f"return arguments[0].value", self.driver.find_element_by_xpath(xpath_soup(textarea)))
error_paragraphs = textarea_value.split("\n\n")
error_message = f"Error Log: {error_paragraphs[0]} - {error_paragraphs[1]}" if len(error_paragraphs) > 2 else label
message = error_message.replace("\n", " ")
button = next(iter(filter(lambda x: self.language.details.lower() in x.text.lower(),top_layer.select("button"))), None)
self.click(self.driver.find_element_by_xpath(xpath_soup(button)))
time.sleep(1)
self.restart_counter += 1
self.log_error(message)
def get_function_from_stack(self):
"""
[Internal]
Gets the function name that called the Webapp class from the call stack.
Usage:
>>> # Calling the method:
>>> self.get_function_from_stack()
"""
stack_item = next(iter(filter(lambda x: x.filename == self.config.routine, inspect.stack())), None)
return stack_item.function if stack_item and stack_item.function else "function_name"
def create_message(self, args, message_type=enum.MessageType.CORRECT):
"""
[Internal]
Returns default messages used all throughout the class based on input parameters.
Each message type has a different number of placeholders to be passed as a list through args parameter:
Correct Message = *"{} Value of field {} is correct!"* - **2 placeholders**
Incorrect Message = *"{} Value expected for field \"{}\" ({}) is not equal to what was found ({})."* - **3 placeholders**
Disabled Message = *"{} Field \"{}\" is disabled."* - **2 placeholders**
AssertError Message = *"Failed: Value expected for field {}: \"{}\" is different from what was found \"{}\"."* - **2 placeholders**
:param args: A list of strings to be replaced in each message.
:type args: List of str
:param message_type: Enum of which message type should be created. - **Default:** enum.MessageType.Correct
:type message_type: enum.MessageType
Usage:
>>> # Calling the method:
>>> message = self.create_message([txtaux, field, user_value, captured_value], enum.MessageType.INCORRECT)
"""
correctMessage = "{} Value of field {} is correct!"
incorrectMessage = "{} Value expected for field \"{}\" ({}) is not equal to what was found ({})."
disabledMessage = "{} Field \"{}\" is disabled."
assertErrorMessage = "Failed: Value expected for field {}: \"{}\" is different from what was found \"{}\"."
if message_type == enum.MessageType.INCORRECT:
return incorrectMessage.format(args[0], args[1], args[2], args[3])
elif message_type == enum.MessageType.DISABLED:
return disabledMessage.format(args[0], args[1])
elif message_type == enum.MessageType.ASSERTERROR:
return assertErrorMessage.format(args[0], args[1], args[2])
else:
return correctMessage.format(args[0], args[1])
def element_exists(self, term, scrap_type=enum.ScrapType.TEXT, position=0, optional_term="", main_container=".tmodaldialog,.ui-dialog", check_error=True):
"""
[Internal]
Returns a boolean if element exists on the screen.
:param term: The first term to use on a search of element
:type term: str
:param scrap_type: Type of element search. - **Default:** enum.ScrapType.TEXT
:type scrap_type: enum.ScrapType
:param position: Position which element is located. - **Default:** 0
:type position: int
:param optional_term: Second term to use on a search of element. Used in MIXED search. - **Default:** "" (empty string)
:type optional_term: str
:return: True if element is present. False if element is not present.
:rtype: bool
Usage:
>>> element_is_present = element_exists(term=".ui-dialog", scrap_type=enum.ScrapType.CSS_SELECTOR)
>>> element_is_present = element_exists(term=".tmodaldialog.twidget", scrap_type=enum.ScrapType.CSS_SELECTOR, position=initial_layer+1)
>>> element_is_present = element_exists(term=text, scrap_type=enum.ScrapType.MIXED, optional_term=".tsay")
"""
element_list = []
containers = None
if scrap_type == enum.ScrapType.SCRIPT:
return bool(self.driver.execute_script(term))
elif (scrap_type != enum.ScrapType.MIXED and not (scrap_type == enum.ScrapType.TEXT and not re.match(r"\w+(_)", term))):
selector = term
if scrap_type == enum.ScrapType.CSS_SELECTOR:
by = By.CSS_SELECTOR
elif scrap_type == enum.ScrapType.XPATH:
by = By.XPATH
elif scrap_type == enum.ScrapType.TEXT:
by = By.CSS_SELECTOR
selector = f"[name*='{term}']"
if scrap_type != enum.ScrapType.XPATH:
soup = self.get_current_DOM()
if not soup:
return False
if check_error:
self.search_for_errors()
container_selector = self.base_container
if (main_container is not None):
container_selector = main_container
try:
containers_soup = soup.select(container_selector)
if not containers_soup:
return False
containers = self.zindex_sort(containers_soup, reverse=True)
except Exception as e:
logger().exception(f"Warning element_exists containers exception:\n {str(e)}")
pass
if self.base_container in container_selector:
container = self.containers_filter(containers)
container = next(iter(containers), None) if isinstance(containers, list) else containers
if not container:
return False
try:
container_element = self.driver.find_element_by_xpath(xpath_soup(container))
except:
return False
else:
container_element = self.driver
try:
if self.config.poui_login:
self.wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, selector)))
return self.driver.find_element(By.CSS_SELECTOR, selector)
element_list = container_element.find_elements(by, selector)
except StaleElementReferenceException:
pass
else:
if scrap_type == enum.ScrapType.MIXED:
selector = optional_term
else:
selector = "div"
if not element_list:
element_list = self.web_scrap(term=term, scrap_type=scrap_type, optional_term=optional_term, main_container=main_container, check_error=check_error)
if not element_list:
return None
if position == 0:
return len(element_list) > 0
else:
return len(element_list) >= position
def SetLateralMenu(self, menu_itens, save_input=True):
"""
Navigates through the lateral menu using provided menu path.
e.g. "MenuItem1 > MenuItem2 > MenuItem3"
:param menu_itens: String with the path to the menu.
:type menu_itens: str
:param save_input: Boolean if all input info should be saved for later usage. Leave this flag 'True' if you are not sure. **Default:** True
:type save_input: bool
Usage:
>>> # Calling the method:
>>> oHelper.SetLateralMenu("Updates > Registers > Products > Groups")
"""
submenu = ""
endtime = time.time() + self.config.time_out
wait_screen = True if menu_itens != self.language.menu_about else False
if save_input:
self.routine = 'SetLateralMenu'
self.config.routine = menu_itens
logger().info(f"Navigating lateral menu: {menu_itens}")
self.wait_element(term=".tmenu", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
ActionChains(self.driver).key_down(Keys.ESCAPE).perform()
self.wait_element(term=".tmenu", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
menu_itens = list(map(str.strip, menu_itens.split(">")))
soup = self.get_current_DOM()
menu_xpath = soup.select(".tmenu")
menu = menu_xpath[0]
child = menu
count = 0
try:
for menuitem in menu_itens:
logger().info(f'Menu item: "{menuitem}"')
self.wait_blocker()
self.wait_until_to(expected_condition="element_to_be_clickable", element = ".tmenu", locator=By.CSS_SELECTOR )
self.wait_until_to(expected_condition="presence_of_all_elements_located", element = ".tmenu .tmenuitem", locator = By.CSS_SELECTOR )
menuitem_presence = self.wait_element_timeout(term=menuitem, scrap_type=enum.ScrapType.MIXED, timeout = self.config.time_out, optional_term=".tmenuitem", main_container="body")
if not menuitem_presence and submenu:
submenu().click()
subMenuElements = menu.select(".tmenuitem")
subMenuElements = list(filter(lambda x: self.element_is_displayed(x), subMenuElements))
while not subMenuElements or len(subMenuElements) < self.children_element_count(f"#{child.attrs['id']}", ".tmenuitem"):
menu = self.get_current_DOM().select(f"#{child.attrs['id']}")[0]
subMenuElements = menu.select(".tmenuitem")
if time.time() > endtime and (not subMenuElements or len(subMenuElements) < self.children_element_count(".tmenu", ".tmenuitem")):
self.restart_counter += 1
self.log_error(f"Couldn't find menu item: {menuitem}")
child = list(filter(lambda x: x.text.startswith(menuitem) and EC.element_to_be_clickable((By.XPATH, xpath_soup(x))), subMenuElements))[0]
submenu = lambda: self.driver.find_element_by_xpath(xpath_soup(child))
if subMenuElements and submenu():
self.expanded_menu(child)
self.scroll_to_element(submenu())
self.wait_until_to( expected_condition = "element_to_be_clickable", element = child, locator = By.XPATH )
self.wait_blocker()
ActionChains(self.driver).move_to_element(submenu()).click().perform()
if count < len(menu_itens) - 1:
self.wait_element(term=menu_itens[count], scrap_type=enum.ScrapType.MIXED, optional_term=".tmenuitem", main_container="body")
menu = self.get_current_DOM().select(f"#{child.attrs['id']}")[0]
else:
self.restart_counter += 1
self.log_error(f"Error - Menu Item does not exist: {menuitem}")
count+=1
if not re.search("\([0-9]\)$", child.text):
self.slm_click_last_item(f"#{child.attrs['id']} > label")
start_time = time.time()
child_is_displayed = True
child_attrs = f"#{child.attrs['id']} > label"
child_object = next(iter(
self.web_scrap(term=child_attrs, scrap_type=enum.ScrapType.CSS_SELECTOR,
main_container="body")), None)
counter_child = 1
if menuitem != self.language.menu_about.split('>')[1].strip():
while (time.time() < endtime) and (child_is_displayed and counter_child <=3):
time.sleep(1)
try:
if child_object:
child_element = lambda: self.soup_to_selenium(child_object)
if hasattr(child_element(), 'is_displayed'):
child_is_displayed = child_element().is_displayed()
elapsed_time = time.time() - start_time
self.wait_blocker()
time.sleep(1)
if elapsed_time >= 20:
start_time = time.time()
logger().info(f'Trying an additional click in last menu item: "{menuitem}"')
if not re.search("\([0-9]\)$", child.text):
self.slm_click_last_item(f"#{child.attrs['id']} > label")
else:
counter_child +=1
except:
counter_child +=1
if wait_screen and self.config.initial_program.lower() == 'sigaadv':
self.close_warning_screen_after_routine()
self.close_coin_screen_after_routine()
except AssertionError as error:
raise error
except Exception as error:
logger().exception(str(error))
self.restart_counter += 1
self.log_error(str(error))
def expanded_menu(self, element):
expanded = lambda: True if "expanded" in element.attrs['class'] else False
if expanded():
parent_menu = self.driver.find_element_by_xpath(xpath_soup(element.select('label')[0]))
self.wait_blocker()
ActionChains(self.driver).move_to_element(parent_menu).click().perform()
def tmenuitem_element(self, menu):
subMenuElements = menu.select(".tmenuitem")
subMenuElements = list(filter(lambda x: self.element_is_displayed(x), subMenuElements))
def children_element_count(self, element_selector, children_selector):
"""
[Internal]
Returns the count of elements of a certain CSS Selector that exists within a certain element located also via CSS Selector.
:param element_selector: The selector to find the first element.
:type element_selector: str
:param children_selector: The selector to find the children elements inside of the first element.
:type children_selector: str
:return: The count of elements matching the children_selector inside of element_selector.
:rtype: int
Usage:
>>> # Calling the method:
>>> self.children_element_count(".tmenu", ".tmenuitem")
"""
script = f"return document.querySelector('{element_selector}').querySelectorAll('{children_selector}').length;"
return int(self.driver.execute_script(script))
def slm_click_last_item(self, sub_menu_child_label):
"""
[Internal]
SetLateralMenu, this method retry click in the last sub item
"""
try:
child_label = next(iter(self.web_scrap(term=sub_menu_child_label,
scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")), None)
child_label_s = self.soup_to_selenium(child_label)
child_label_s.click()
except Exception as e:
if self.config.smart_test or self.config.debug_log:
logger().warning(f"Warning SetLateralMenu click last item method exception: {str(e)} ")
def SetButton(self, button, sub_item="", position=1, check_error=True):
"""
Method that clicks on a button on the screen.
:param button: Button to be clicked.
:type button: str
:param sub_item: Sub item to be clicked inside the first button. - **Default:** "" (empty string)
:type sub_item: str
:param position: Position which element is located. - **Default:** 1
:type position: int
Usage:
>>> # Calling the method to click on a regular button:
>>> oHelper.SetButton("Add")
>>> #-------------------------------------------------
>>> # Calling the method to click on a sub item inside a button.
>>> oHelper.SetButton("Other Actions", "Process")
>>> #-------------------------------------------------
>>> # Calling the method to click on a sub item inside a button, this form is an alternative.
>>> oHelper.SetButton("Other Actions", "Process, Process_02, Process_03")
"""
self.wait_blocker()
container = self.get_current_container()
if container and 'id' in container.attrs:
id_container = container.attrs['id']
logger().info(f"Clicking on {button}")
try:
soup_element = ""
if (button.lower() == "x"):
self.set_button_x(position, check_error)
return
else:
self.wait_element_timeout(term=button, scrap_type=enum.ScrapType.MIXED, optional_term="button, .thbutton", timeout=10, step=0.1, check_error=check_error)
position -= 1
layers = 0
if button in [self.language.confirm, self.language.save]:
layers = len(self.driver.find_elements(By.CSS_SELECTOR, ".tmodaldialog"))
success = False
endtime = time.time() + self.config.time_out
starttime = time.time()
if self.config.smart_test:
logger().debug(f"***System Info*** Before Clicking on button:")
system_info()
while(time.time() < endtime and not soup_element):
soup_objects = self.web_scrap(term=button, scrap_type=enum.ScrapType.MIXED, optional_term="button, .thbutton", main_container = self.containers_selectors["SetButton"], check_error=check_error)
soup_objects = list(filter(lambda x: self.element_is_displayed(x), soup_objects ))
if soup_objects and len(soup_objects) - 1 >= position:
self.wait_until_to( expected_condition = "element_to_be_clickable", element = soup_objects[position], locator = By.XPATH, timeout=True)
soup_element = lambda : self.soup_to_selenium(soup_objects[position])
parent_element = self.soup_to_selenium(soup_objects[0].parent)
id_parent_element = parent_element.get_attribute('id')
if self.config.smart_test:
logger().debug(f"Clicking on Button {button} Time Spent: {time.time() - starttime} seconds")
if not soup_element:
other_action = next(iter(self.web_scrap(term=self.language.other_actions, scrap_type=enum.ScrapType.MIXED, optional_term="button", check_error=check_error)), None)
if (other_action is None or not hasattr(other_action, "name") and not hasattr(other_action, "parent")):
self.log_error(f"Couldn't find element: {button}")
other_action_element = lambda : self.soup_to_selenium(other_action)
self.scroll_to_element(other_action_element())
self.click(other_action_element())
success = self.click_sub_menu(button if button.lower() != self.language.other_actions.lower() else sub_item)
if success:
return
else:
self.log_error(f"Element {button} not found!")
if soup_element:
self.scroll_to_element(soup_element())
self.set_element_focus(soup_element())
self.wait_until_to( expected_condition = "element_to_be_clickable", element = soup_objects[position], locator = By.XPATH )
if button.lower() == self.language.other_actions.lower() and self.config.initial_program.lower() == 'sigaadv':
self.click(soup_element())
else:
self.send_action(self.click, soup_element)
self.wait_element_is_not_focused(soup_element)
if sub_item and ',' not in sub_item:
if self.driver.execute_script("return app.VERSION").split('-')[0] >= "4.6.4":
self.tmenu_out_iframe = True
soup_objects_filtered = None
while(time.time() < endtime and not soup_objects_filtered):
soup_objects = self.web_scrap(term=sub_item, scrap_type=enum.ScrapType.MIXED, optional_term=".tmenupopupitem", main_container="body", check_error=check_error)
soup_objects_filtered = self.filter_is_displayed(soup_objects)
contents = list(map(lambda x: x.contents, soup_objects_filtered))
soup_objects_filtered = next(iter(list(filter(lambda x: x[0].text.strip().lower() == sub_item.strip().lower(), contents))), None)
if soup_objects_filtered:
soup_element = lambda : self.soup_to_selenium(soup_objects_filtered[0])
self.wait_until_to( expected_condition = "element_to_be_clickable", element = soup_objects_filtered[0], locator = By.XPATH )
self.click(soup_element())
self.tmenu_out_iframe = False
else:
result = False
self.tmenu_out_iframe = False
soup_objects = self.web_scrap(term=button, scrap_type=enum.ScrapType.MIXED, optional_term="button, .thbutton", main_container = self.containers_selectors["SetButton"], check_error=check_error)
soup_objects = list(filter(lambda x: self.element_is_displayed(x), soup_objects ))
if soup_objects and len(soup_objects) - 1 >= position:
soup_element = lambda : self.soup_to_selenium(soup_objects[position])
else:
self.log_error(f"Couldn't find element {button}")
self.scroll_to_element(soup_element())#posiciona o scroll baseado na height do elemento a ser clicado.
self.set_element_focus(soup_element())
self.wait_until_to( expected_condition = "element_to_be_clickable", element = soup_objects[position], locator = By.XPATH )
self.send_action(self.click, soup_element)
result = self.click_sub_menu(sub_item)
if not result:
self.log_error(f"Couldn't find element {sub_item}")
else:
return
elif ',' in sub_item:
list_sub_itens = sub_item.split(',')
filtered_sub_itens = list(map(lambda x: x.strip(), list_sub_itens))
self.click_sub_menu(filtered_sub_itens[len(filtered_sub_itens)-1])
buttons = [self.language.Ok, self.language.confirm, self.language.finish,self.language.save, self.language.exit, self.language.next, "x"]
buttons_filtered = list(map(lambda x: x.lower(), buttons))
if button.lower() in buttons_filtered:
if self.used_ids:
self.used_ids = self.pop_dict_itens(self.used_ids, id_container)
elif self.grid_counters:
self.grid_counters = {}
if button == self.language.save and id_parent_element in self.get_enchoice_button_ids(layers):
self.wait_element_timeout(term="", scrap_type=enum.ScrapType.MIXED, optional_term="[style*='fwskin_seekbar_ico']", timeout=10, step=0.1, check_error=False, main_container="body")
self.wait_element_timeout(term="", scrap_type=enum.ScrapType.MIXED, presence=False, optional_term="[style*='fwskin_seekbar_ico']", timeout=10, step=0.1, check_error=False, main_container="body")
elif button == self.language.confirm and id_parent_element in self.get_enchoice_button_ids(layers):
self.wait_element_timeout(term=".tmodaldialog", scrap_type=enum.ScrapType.CSS_SELECTOR, position=layers + 1, main_container="body", timeout=10, step=0.1, check_error=False)
except ValueError as error:
logger().exception(str(error))
logger().exception(f"Button {button} could not be located.")
except AssertionError:
raise
except Exception as error:
logger().exception(str(error))
self.log_error(str(error))
if self.config.smart_test:
logger().debug(f"***System Info*** After Clicking on button:")
system_info()
def set_button_x(self, position=1, check_error=True):
position -= 1
term_button = ".ui-button.ui-dialog-titlebar-close[title='Close'], img[src*='fwskin_delete_ico.png'], img[src*='fwskin_modal_close.png']"
wait_button = self.wait_element(term=term_button, scrap_type=enum.ScrapType.CSS_SELECTOR, position=position, check_error=check_error)
soup = self.get_current_DOM() if not wait_button else self.get_current_container()
close_list = soup.select(term_button)
if not close_list:
self.log_error(f"Element not found")
if len(close_list) < position+1:
self.log_error(f"Element x position: {position} not found")
if position == 0:
element_soup = close_list.pop()
else:
element_soup = close_list.pop(position)
element_selenium = self.soup_to_selenium(element_soup)
self.scroll_to_element(element_selenium)
self.wait_until_to( expected_condition = "element_to_be_clickable", element = element_soup, locator = By.XPATH )
self.click(element_selenium)
def click_sub_menu(self, sub_item):
"""
[Internal]
Clicks on the sub menu of buttons. Returns True if succeeded.
Internal method of SetButton.
:param sub_item: The menu item that should be clicked.
:type sub_item: str
:return: Boolean if click was successful.
:rtype: bool
Usage:
>>> # Calling the method:
>>> self.click_sub_menu("Process")
"""
if self.driver.execute_script("return app.VERSION").split('-')[0] >= "4.6.4":
self.driver.switch_to.default_content()
content = self.driver.page_source
soup = BeautifulSoup(content,"html.parser")
menu_id = self.zindex_sort(soup.select(".tmenupopup.active"), True)[0].attrs["id"]
menu = self.driver.find_element_by_id(menu_id)
menu_itens = menu.find_elements(By.CSS_SELECTOR, ".tmenupopupitem")
result = self.find_sub_menu_text(sub_item, menu_itens)
item = ""
if result[1]:
item = self.find_sub_menu_child(sub_item, result[1])
elif result[0]:
item = result[0]
else:
return False
if item:
self.scroll_to_element(item)
time.sleep(0.5)
self.click(item)
return True
else:
return False
def find_sub_menu_child(self, sub_item, containers):
"""
[Internal]
Finds the menu item inside child menu layers.
:param sub_item: The menu item that should be clicked.
:type sub_item: str
:param containers: The menu itens of the current layer that have children.
:type containers: List of Beautiful Soup objects
:return: The item that was found. If None was found, it returns an empty string.
:rtype: Selenium object
Usage:
>>> # Calling the method:
>>> item = self.find_sub_menu_child("Process", container_list)
"""
item = ""
for child in containers:
child_id = child.get_attribute("id")
old_class = self.driver.execute_script("return document.querySelector('#{}').className".format(child_id))
new_class = old_class + " highlighted expanded, highlight expanded"
self.driver.execute_script("document.querySelector('#{}').className = '{}'".format(child_id, new_class))
child_itens = child.find_elements(By.CSS_SELECTOR, ".tmenupopupitem")
result = self.find_sub_menu_text(sub_item, child_itens)
if not result[0] and result[1]:
item = self.find_sub_menu_child(sub_item, result[1])
else:
item = result[0]
if item:
break
self.driver.execute_script("document.querySelector('#{}').className = '{}'".format(child_id, old_class))
return item
def find_sub_menu_text(self, menu_item, current_itens):
"""
[Internal]
Returns a tuple containing a possible match of a menu item among the current itens.
If none was found it will be an empty string.
The second position will contain the itens that have children itens.
If none has children itens, it will be an empty list.
:param menu_item: The menu item that should be clicked.
:type menu_item: str
:param current_item: The menu itens in the current layer.
:type current_item: List of Selenium objects.
:return: Tuple containing a possible match of a menu item and the itens that have children itens.
:rtype: Tuple (selenium object, list of selenium objects)
Usage:
>>> # Calling the method:
>>> result = self.find_sub_menu_text(item, child_itens)
"""
submenu = ""
containers = []
for child in current_itens:
if "container" in child.get_attribute("class"):
containers.append(child)
elif child.text.startswith(menu_item):
submenu = child
return (submenu, containers)
def SetBranch(self, branch):
"""
Chooses the branch on the branch selection screen.
:param branch: The branch that would be chosen.
:type branch: str
Usage:
>>> # Calling the method:
>>> oHelper.SetBranch("D MG 01 ")
"""
logger().info(f"Setting branch: {branch}.")
self.wait_element(term="[style*='fwskin_seekbar_ico']", scrap_type=enum.ScrapType.CSS_SELECTOR, position=2, main_container="body")
Ret = self.fill_search_browse(branch, self.get_search_browse_elements())
if Ret:
self.SetButton('OK')
def WaitHide(self, string, timeout=None, throw_error = True):
"""
Search string that was sent and wait hide the element.
:param string: String that will hold the wait.
:type string: str
Usage:
>>> # Calling the method:
>>> oHelper.WaitHide("Processing")
"""
logger().info("Waiting processing...")
if not timeout:
timeout = 1200
endtime = time.time() + timeout
while(time.time() < endtime):
element = None
element = self.web_scrap(term=string, scrap_type=enum.ScrapType.MIXED, optional_term=".tsay, .tgroupbox", main_container = self.containers_selectors["AllContainers"], check_help=False)
if not element:
return
if endtime - time.time() < 1180:
time.sleep(0.5)
if not throw_error:
return False
else:
self.log_error(f"Element {string} not found")
def WaitShow(self, string, timeout=None, throw_error = True):
"""
Search string that was sent and wait show the elements.
:param string: String that will hold the wait.
:type string: str
Usage:
>>> # Calling the method:
>>> oHelper.WaitShow("Processing")
"""
logger().info(f"Waiting show text '{string}'...")
if not timeout:
timeout = 1200
endtime = time.time() + timeout
while(time.time() < endtime):
element = None
element = self.web_scrap(term=string, scrap_type=enum.ScrapType.MIXED, optional_term=".tsay, .tgroupbox", main_container = self.containers_selectors["AllContainers"], check_help=False)
if element:
return element
if endtime - time.time() < 1180:
time.sleep(0.5)
if not throw_error:
return False
else:
self.log_error(f"Element {string} not found")
def WaitProcessing(self, itens, timeout=None):
"""
Uses WaitShow and WaitHide to Wait a Processing screen
:param itens: List of itens that will hold the wait.
:type itens: str
Usage:
>>> # Calling the method:
>>> oHelper.WaitProcessing("Processing")
"""
if not timeout:
timeout = 1200
self.WaitShow(itens, timeout, throw_error = False)
self.WaitHide(itens, timeout, throw_error = False)
def SetTabEDAPP(self, table):
"""
Chooses the table on the generic query (EDAPP).
:param table: The table that would be chosen.
:type table: str
Usage:
>>> # Calling the method:
>>> oHelper.SetTabEDAPP("AAB")
"""
try:
field = self.get_field("cPesq", name_attr=True)
element = lambda: self.driver.find_element_by_xpath(xpath_soup(field))
self.click(element())
self.send_keys(element(), table)
time.sleep(0.5)
self.send_keys(element(), Keys.ENTER)
self.send_keys(element(), Keys.ENTER)
self.SetButton("Ok")
except:
logger().exception("Search field could not be located.")
def ClickFolder(self, folder_name, position):
"""
Clicks on folder elements on the screen.
:param folder_name: Which folder item should be clicked.
:type folder_name: str
:param position: In case of two or more folders with the same name in the screen, you could choose by position in order
:type position: int
Usage:
>>> # Calling the method:
>>> oHelper.ClickFolder("Folder1")
>>> # Second folder named as Folder1 in the same screen
>>> oHelper.ClickFolder("Folder1", position=2)
"""
self.wait_blocker()
element = ""
position -= 1
self.wait_element(term=folder_name, scrap_type=enum.ScrapType.MIXED, optional_term=".tfolder.twidget, .button-bar a")
endtime = time.time() + self.config.time_out
half_config_timeout = time.time() + self.config.time_out / 2
while(time.time() < endtime and not element):
panels = self.web_scrap(term=".button-bar a", scrap_type=enum.ScrapType.CSS_SELECTOR,main_container = self.containers_selectors["GetCurrentContainer"])
panels_filtered = self.filter_is_displayed(list(filter(lambda x: x.text == folder_name, panels)))
if time.time() >= half_config_timeout:
panels_filtered = list(filter(lambda x: x.text == folder_name, panels))
if panels_filtered:
if position > 0:
panel = panels_filtered[position] if position < len(panels_filtered) else None
else:
panel = next(iter(panels_filtered), None)
element = self.soup_to_selenium(panel) if panel else None
if element:
self.scroll_to_element(element)#posiciona o scroll baseado na height do elemento a ser clicado.
self.set_element_focus(element)
time.sleep(1)
self.driver.execute_script("arguments[0].click()", element)
if not element:
self.log_error("Couldn't find panel item.")
def ClickBox(self, fields="", content_list="", select_all=False, grid_number=1, itens=False):
"""
Clicks on Checkbox elements of a grid.
:param field: Comma divided string with values that must be checked, combine with content_list.
:type field: str
:param content_list: Comma divided string with values that must be checked. - **Default:** "" (empty string)
:type content_list: str
:param select_all: Boolean if all options should be selected. - **Default:** False
:type select_all: bool
:param grid_number: Which grid should be used when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param itens: Bool parameter that click in all itens based in the field and content reference.
:type itens: bool
Usage:
>>> # Calling the method to select a specific checkbox:
>>> oHelper.ClickBox("Branch", "D MG 01 ")
>>> #--------------------------------------------------
>>> # Calling the method to select multiple checkboxes:
>>> oHelper.ClickBox("Branch", "D MG 01 , D RJ 02")
>>> #--------------------------------------------------
>>> # Calling the method to select all checkboxes:
>>> oHelper.ClickBox("Branch", select_all=True)
>>> #--------------------------------------------------
>>> # Calling the method to performe click based in 2 fields and contens:
>>> test_helper.ClickBox('Numero da SC, Item da SC', 'COM068, 0001')
>>> #--------------------------------------------------
>>> # Calling the method to click in all itens with this reference:
>>> test_helper.ClickBox('Numero da SC', 'COM068', itens=True)
"""
self.wait_blocker()
logger().info(f"ClickBox - Clicking on {content_list}")
grid_number -= 1
if not select_all:
fields = list(map(lambda x: x.strip(), fields.split(',')))
content_list = list(map(lambda x: x.strip(), content_list.split(',')))
if len(fields) == 2 and len(content_list) == 2 and not select_all:
self.click_box_dataframe(*fields, *content_list, grid_number=grid_number)
# self.click_box_dataframe(first_column=fields[0], second_column=fields[1], first_content=content_list[0], second_content=content_list[1], grid_number=grid_number)
elif len(fields) == 1 and len(content_list) == 2 and not select_all:
self.click_box_dataframe(first_column=fields, first_content=content_list[0], second_content=content_list[1], grid_number=grid_number)
elif len(fields) == 1 and not select_all:
self.click_box_dataframe(first_column=fields[0], first_content=content_list[0], grid_number=grid_number, itens=itens)
if select_all:
self.wait_element_timeout(term=self.language.invert_selection, scrap_type=enum.ScrapType.MIXED, optional_term="label span")
grid = self.get_grid(grid_number)
is_select_all_button = self.element_exists(term=self.language.invert_selection, scrap_type=enum.ScrapType.MIXED, optional_term="label span")
if select_all and is_select_all_button:
self.wait_element(term=self.language.invert_selection, scrap_type=enum.ScrapType.MIXED, optional_term="label span")
element = next(iter(self.web_scrap(term="label.tcheckbox input", scrap_type=enum.ScrapType.CSS_SELECTOR)), None)
if element:
box = lambda: self.driver.find_element_by_xpath(xpath_soup(element))
self.click(box())
elif select_all and not is_select_all_button:
th = next(iter(grid.select('th')))
th_element = self.soup_to_selenium(th)
th_element.click()
def performing_click(self, element_bs4, class_grid):
self.wait_until_to(expected_condition="element_to_be_clickable", element=element_bs4,
locator=By.XPATH)
element = lambda: self.soup_to_selenium(element_bs4)
self.set_element_focus(element())
self.scroll_to_element(element())
try:
element().click()
except:
ActionChains(self.driver).move_to_element(element()).click(element()).perform()
time.sleep(1)
if class_grid == 'tmsselbr':
ActionChains(self.driver).move_to_element(element()).click(element()).perform()
event = "var evt = document.createEvent('MouseEvents');\
evt.initMouseEvent('dblclick',true, false, window, 0, 0, 0, 0, 0, false, false, false, false, 0,null);\
arguments[0].dispatchEvent(evt);"
self.driver.execute_script(event, element())
elif class_grid != "tgrid":
ActionChains(self.driver).move_to_element(element()).send_keys_to_element(
element(), Keys.ENTER).perform()
else:
self.double_click(element(), click_type=enum.ClickType.ACTIONCHAINS)
def click_box_dataframe(self, first_column=None, second_column=None, first_content=None, second_content=None, grid_number=0, itens=False):
index_number = []
count = 0
endtime = time.time() + self.config.time_out
while time.time() < endtime and len(index_number) < 1 and count <= 3:
try:
df, grid = self.grid_dataframe(grid_number=grid_number)
last_df = df
class_grid = next(iter(grid.attrs['class']))
if not df.empty:
if first_column and second_column:
index_number = df.loc[(df[first_column] == first_content) & (df[second_column] == second_content)].index.array
elif first_column and (first_content and second_content):
index_number = df.loc[(df[first_column[0]] == first_content) | (df[first_column[0]] == second_content)].index.array
elif itens:
index_number = df.loc[(df[first_column] == first_content)].index.array
elif first_column and first_content:
first_column = next(iter(list(filter(lambda x: first_column.lower().strip() in x.lower().strip(), df.columns))))
first_column_values = df[first_column].values
first_column_formatted_values = list(map(lambda x: x.replace(' ', ''), first_column_values))
content = next(iter(list(filter(lambda x: x == first_content.replace(' ', ''), first_column_formatted_values))), None)
if content:
index_number.append(first_column_formatted_values.index(content))
if len(index_number) > 0:
index_number = [index_number[0]]
else:
index_number.append(0)
if len(index_number) < 1 and count <= 3:
first_element_focus = next(iter(grid.select('tbody > tr > td')), None)
if first_element_focus:
self.wait_until_to(expected_condition="element_to_be_clickable", element=first_element_focus, locator=By.XPATH)
self.soup_to_selenium(first_element_focus).click()
ActionChains(self.driver).key_down(Keys.PAGE_DOWN).perform()
self.wait_blocker()
df, grid = self.grid_dataframe(grid_number=grid_number)
if df.equals(last_df):
count +=1
except Exception as e:
self.log_error(f"Content doesn't found on the screen! {str(e)}")
if len(index_number) < 1:
self.log_error(f"Content doesn't found on the screen! {first_content}")
tr = grid.select('tbody > tr')
if hasattr(index_number, '__iter__'):
for index in index_number:
element_bs4 = next(iter(tr[index].select('td')))
self.wait_blocker()
self.performing_additional_click(element_bs4, tr, index, class_grid, grid_number)
else:
index = index_number
element_bs4 = next(iter(tr[index].select('td')))
self.wait_blocker()
self.performing_additional_click(element_bs4, tr, index, class_grid, grid_number)
def performing_additional_click(self, element_bs4, tr, index, class_grid, grid_number):
if element_bs4:
success = False
td = next(iter(tr[index].select('td')))
if hasattr(td, 'style'):
last_box_state = td.attrs['style']
endtime = time.time() + self.config.time_out
while time.time() < endtime and not success:
self.performing_click(element_bs4, class_grid)
self.wait_blocker()
time.sleep(2)
tmodal = self.element_exists(term=".tmodaldialog.twidget.active", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body", check_error=False)
if tmodal:
return
grid = self.get_grid(grid_number=grid_number)
tr = grid.select('tbody > tr')
td = next(iter(tr[index].select('td')))
new_box_state = td.attrs['style']
success = last_box_state != new_box_state
else:
logger().debug(f"Couldn't check box element td: {str(td)}")
def grid_dataframe(self, grid_number=0):
self.wait_element(term=".tgetdados,.tgrid,.tcbrowse,.tmsselbr", scrap_type=enum.ScrapType.CSS_SELECTOR)
grid = self.get_grid(grid_number=grid_number)
df = (next(iter(pd.read_html(str(grid)))))
converters = {c: lambda x: str(x) for c in df.columns}
df, grid = (next(iter(pd.read_html(str(grid), converters=converters)), None), grid)
if not df.empty:
df = df.fillna('Not Value')
return (df, grid)
def wait_element_is_blocked(self, parent_id):
"""
:param parent_id:
:return:
"""
logger().debug("Wait for element to be blocked...")
element = False
endtime = time.time() + 10
while(time.time() < endtime and not element):
tpanels = self.get_current_container().select(".tpanel")
if tpanels:
tpanels_filtered = list(filter(lambda x: self.element_is_displayed(x), tpanels))
element = next(iter(list(filter(lambda x: x.attrs["id"] == parent_id, tpanels_filtered))), None)
if element:
return "readonly" in element.get_attribute_list("class") or "hidden" in element.get_attribute_list("class")
else:
return False
def ScrollGrid(self, column, match_value, grid_number=1):
"""
Scrolls Grid until a matching column is found.
:param field: The column to be matched.
:type field: str
:param match_value: The value to be matched in defined column.
:type match_value: str
:param grid_number: Which grid should be used when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
Usage:
>>> # Calling the method to scroll to a column match:
>>> oHelper.ScrollGrid(column="Branch",match_value="D MG 01 ")
>>> #--------------------------------------------------
>>> # Calling the method to scroll to a column match of the second grid:
>>> oHelper.ScrollGrid(column="Branch", match_value="D MG 01 ", grid_number=2)
"""
grid_number -= 1
td_element = None
actions = ActionChains(self.driver)
self.wait_element_timeout(term = column, scrap_type = enum.ScrapType.TEXT, timeout = self.config.time_out , optional_term = 'label')
endtime = time.time() + self.config.time_out
grid = self.get_grid(grid_number)
get_current = lambda: self.selected_row(grid_number)
column_enumeration = list(enumerate(grid.select("thead label")))
chosen_column = next(iter(list(filter(lambda x: column in x[1].text, column_enumeration))), None)
column_index = chosen_column[0] if chosen_column else self.log_error("Couldn't find chosen column.")
current = get_current()
td = lambda: next(iter(current.select(f"td[id='{column_index}']")), None)
frozen_table = next(iter(grid.select('table.frozen-table')),None)
if (not self.click_grid_td(td()) and not frozen_table):
self.log_error(" Couldn't click on column, td class or tr is not selected ")
while( time.time() < endtime and not td_element ):
grid = self.get_grid(grid_number)
current = get_current()
td_list = grid.select(f"td[id='{column_index}']")
td_element_not_filtered = next(iter(td_list), None)
td_list_filtered = list(filter(lambda x: x.text.strip() == match_value and self.element_is_displayed(x) ,td_list))
td_element = next(iter(td_list_filtered), None)
if not td_element and next(self.scroll_grid_check_elements_change(xpath_soup(td_element_not_filtered))):
actions.key_down(Keys.PAGE_DOWN).perform()
self.wait_element_is_not_displayed(td().parent)
if not td_element:
self.log_error("Scroll Grid couldn't find the element")
if frozen_table:
self.soup_to_selenium(td_element.next_sibling).click()
self.click(self.soup_to_selenium(td_element))
def click_grid_td(self, td_soup):
"""
Click on a td element and checks if is selected
:param td: The column to be matched.
:type td: bs4 element
>>> # Calling the method to click on td and check if is selected:
>>> oHelper.click_grid_td(td)
"""
success = None
endtime = time.time() + 10
while ( not success and time.time() < endtime ):
try:
td_selenium = lambda: self.soup_to_selenium(td_soup)
tr_selenium_class = lambda: self.soup_to_selenium(td_soup.parent).get_attribute('class')
td_is_selected = lambda: True if 'selected' in td_selenium().get_attribute('class') or 'selected' in tr_selenium_class() else False
self.set_element_focus(td_selenium())
td_selenium().click()
if not td_is_selected():
self.wait_until_to( expected_condition = "visibility_of", element = td_selenium, timeout=True)
self.wait_until_to(expected_condition="element_to_be_clickable", element = td_selenium, locator = By.XPATH, timeout=True)
success = td_is_selected()
else:
success = td_is_selected()
except:
pass
return success
def scroll_grid_check_elements_change(self, xpath):
"""
[Internal]
Used to check PG_DOWN correct execute.
"""
elements_set = set()
elements_set.add(xpath)
yield True if xpath else False
while(True):
old_lenght = len(elements_set)
elements_set.add(xpath)
yield True if len(elements_set) > old_lenght and xpath else False
def selected_row(self, grid_number = 0):
"""
[Internal]
Returns the selected row in the grid.
:param grid_number: Which grid should be used when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
Usage:
>>> # Calling the method to return the selected row:
>>> oHelper.selected_row(grid_number = 0)
"""
row_selected = None
grid = self.get_grid(grid_number)
if grid:
row = next(iter(grid.select('tbody tr.selected-row')), None)
column = next(iter(grid.select('td.selected-cell')), None)
row_selected = column.parent if column else row
return row_selected
def wait_selected_row(self, grid_number = 0, column_index = 0, text = "default-text", time_out = 5):
"""
[Internal]
This method expects the selected line to be the line with the text value entered.
:param grid_number: Which grid should be used when there are multiple grids on the same screen. - **Default:** 0
:type grid_number: int
:param column_index: The column index
:type column_index: int
:param text: The value of column to be matched
:type text: string
Usage:
>>> # Calling the method to wait the selected row:
>>> oHelper.wait_selected_row(grid_number = 0, column_index = 0, text= "value" )
"""
success = False
endtime = time.time() + time_out
while( time.time() < endtime and not success):
current = self.selected_row(grid_number)
td = next(iter(current.select(f"td[id='{column_index}']")), None)
success = td.text in text
def get_grid(self, grid_number=0, grid_element = None):
"""
[Internal]
Gets a grid BeautifulSoup object from the screen.
:param grid_number: The number of the grid on the screen.
:type: int
:param grid_element: Grid class name in HTML ex: ".tgrid".
:type: str
:return: Grid BeautifulSoup object
:rtype: BeautifulSoup object
Usage:
>>> # Calling the method:
>>> my_grid = self.get_grid()
"""
endtime = time.time() + self.config.time_out
grids = None
while(time.time() < endtime and not grids):
if not grid_element:
grids = self.web_scrap(term=".tgetdados,.tgrid,.tcbrowse,.tmsselbr", scrap_type=enum.ScrapType.CSS_SELECTOR)
else:
grids = self.web_scrap(term= grid_element, scrap_type=enum.ScrapType.CSS_SELECTOR)
if grids:
grids = list(filter(lambda x: self.element_is_displayed(x), grids))
if grids:
if len(grids) - 1 >= grid_number:
return grids[grid_number]
if not grids:
self.log_error("Couldn't find grid.")
def check_mask(self, element):
"""
[Internal]
Checks whether the element has a mask or not.
:param element: The element that must be checked.
:type element: Selenium object
:return: Boolean if element has a mask or not.
:rtype: bool
Usage:
>>> # Calling the method:
>>> self.check_mask(my_element)
"""
reg = (r"^[1-9.\/-:\+]+|(@. )[1-9.\/-:\+]+")
mask = element.get_attribute("picture")
if mask is None:
child = element.find_elements(By.CSS_SELECTOR, "input")
if child:
mask = child[0].get_attribute("picture")
return (mask != "" and mask is not None and (re.findall(reg, mask)))
def remove_mask(self, string):
"""
[Internal]
Removes special characters from received string.
:param string: The string that would have its characters removed.
:type string: str
:return: The string with its special characters removed.
:rtype: str
Usage:
>>> # Calling the method:
>>> value_without_mask = self.remove_mask("111-111.111")
>>> # value_without_mask == "111111111"
"""
if type(string) is str:
caracter = (r'[.\/+-]')
if string[0:4] != 'http':
match = re.findall(caracter, string)
if match:
string = re.sub(caracter, '', string)
return string
def SetKey(self, key, grid=False, grid_number=1, additional_key="", wait_show = "", step = 3 ):
"""
Press the desired key on the keyboard on the focused element.
.. warning::
If this methods is the first to be called, we strongly recommend using some wait methods like WaitShow().
.. warning::
Before using this method, set focus on any element.
Supported keys: F1 to F12, CTRL+Key, ALT+Key, Up, Down, Left, Right, ESC, Enter and Delete ...
:param key: Key that would be pressed
:type key: str
:param grid: Boolean if action must be applied on a grid. (Usually with DOWN key)
:type grid: bool
:param grid_number: Grid number of which grid should be used when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param additional_key: Key additional that would be pressed.
:type additional_key: str
:param wait_show: String that will hold the wait after press a key.
:type wait_show: str
:param step: The amount of time each step should wait. - **Default:** 3
:type step: float
Usage:
>>> # Calling the method:
>>> oHelper.SetKey("ENTER")
>>> #--------------------------------------
>>> # Calling the method on a grid:
>>> oHelper.SetKey("DOWN", grid=True)
>>> #--------------------------------------
>>> # Calling the method on the second grid on the screen:
>>> oHelper.SetKey("DOWN", grid=True, grid_number=2)
>>> #--------------------------------------
>>> # Call the method with WaitShow when you expect a new window or text to appear on the screen:
>>> oHelper.SetKey( key = "F12", wait_show="Parametros", step = 3 )
>>> #--------------------------------------
>>> # Calling the method with special keys (using parameter additional_key):
>>> oHelper.SetKey(key="CTRL", additional_key="A")
"""
self.wait_blocker()
logger().info(f"Key pressed: {key + '+' + additional_key if additional_key != '' else key }")
#JavaScript function to return focused element if DIV/Input OR empty if other element is focused
script = """
var getActiveElement = () => {
if(document.activeElement.tagName.toLowerCase() == "input" || document.activeElement.tagName.toLowerCase() == "div"){
if(document.activeElement.attributes["id"]){
return document.activeElement.attributes["id"].value
}else if(document.activeElement.parentElement.attributes["id"]){
return document.activeElement.parentElement.attributes["id"].value
}
}
return ""
}
return getActiveElement()
"""
key = key.upper()
endtime = time.time() + self.config.time_out
hotkey = ["CTRL","ALT"]
grid_number-=1
tries = 0
success = False
try:
if key in hotkey and not additional_key:
self.log_error("Additional key is empty")
if key == "DOWN" and grid:
grid_number = 0 if grid_number is None else grid_number
self.grid_input.append(["", "", grid_number, True])
return
while(time.time() < endtime and not success):
if key not in hotkey and self.supported_keys(key):
if grid:
if key != "DOWN":
self.LoadGrid()
self.send_action(action=ActionChains(self.driver).key_down(self.supported_keys(key)).perform)
elif tries > 0:
ActionChains(self.driver).key_down(self.supported_keys(key)).perform()
tries = 0
else:
time.sleep(2)
Id = self.driver.execute_script(script)
element = lambda: self.driver.find_element_by_id(Id) if Id else self.driver.find_element(By.TAG_NAME, "html")
self.set_element_focus(element())
self.send_action(ActionChains(self.driver).move_to_element(element()).key_down(self.supported_keys(key)).perform)
tries +=1
elif additional_key:
self.send_action(action=ActionChains(self.driver).key_down(self.supported_keys(key)).send_keys(additional_key.lower()).key_up(self.supported_keys(key)).perform)
if wait_show:
success = self.WaitShow(wait_show, timeout=step, throw_error = False)
else:
success = True
except WebDriverException as e:
self.log_error(f"SetKey - Screen is not load: {e}")
except Exception as error:
self.log_error(str(error))
def supported_keys(self, key = ""):
"""
[Internal]
"""
try:
supported_keys = {
"F1" : Keys.F1,
"F2" : Keys.F2,
"F3" : Keys.F3,
"F4" : Keys.F4,
"F5" : Keys.F5,
"F6" : Keys.F6,
"F7" : Keys.F7,
"F8" : Keys.F8,
"F9" : Keys.F9,
"F10" : Keys.F10,
"F11" : Keys.F11,
"F12" : Keys.F12,
"UP" : Keys.UP,
"DOWN" : Keys.DOWN,
"LEFT" : Keys.LEFT,
"RIGHT" : Keys.RIGHT,
"DELETE" : Keys.DELETE,
"ENTER" : Keys.ENTER,
"ESC" : Keys.ESCAPE,
"CTRL" : Keys.CONTROL,
"ALT" : Keys.ALT,
"NUMPAD0" : Keys.NUMPAD0,
"NUMPAD1" : Keys.NUMPAD1,
"NUMPAD2" : Keys.NUMPAD2,
"NUMPAD3" : Keys.NUMPAD3,
"NUMPAD4" : Keys.NUMPAD4,
"NUMPAD5" : Keys.NUMPAD5,
"NUMPAD6" : Keys.NUMPAD6,
"NUMPAD7" : Keys.NUMPAD7,
"NUMPAD8" : Keys.NUMPAD8,
"NUMPAD9" : Keys.NUMPAD9,
"MULTIPLY" : Keys.MULTIPLY,
"ADD" : Keys.ADD,
"SEPARATOR" : Keys.SEPARATOR,
"SUBTRACT" : Keys.SUBTRACT,
"DECIMAL" : Keys.DECIMAL,
"DIVIDE" : Keys.DIVIDE,
"META" : Keys.META,
"COMMAND" : Keys.COMMAND,
"NULL" : Keys.NULL,
"CANCEL" : Keys.CANCEL,
"HELP" : Keys.HELP,
"BACKSPACE" : Keys.BACKSPACE,
"TAB" : Keys.TAB,
"CLEAR" : Keys.CLEAR,
"RETURN" : Keys.RETURN,
"SHIFT" : Keys.SHIFT,
"PAUSE" : Keys.PAUSE,
"ESCAPE" : Keys.ESCAPE,
"SPACE" : Keys.SPACE,
"END" : Keys.END,
"HOME" : Keys.HOME,
"INSERT" : Keys.INSERT,
"SEMICOLON" : Keys.SEMICOLON,
"EQUALS" : Keys.EQUALS,
"ARROW_LEFT" : Keys.ARROW_LEFT,
"ARROW_UP" : Keys.ARROW_UP,
"ARROW_RIGHT" : Keys.ARROW_RIGHT,
"ARROW_DOWN" : Keys.ARROW_DOWN,
"BACK_SPACE" : Keys.BACK_SPACE,
"LEFT_SHIFT" : Keys.LEFT_SHIFT,
"LEFT_CONTROL" : Keys.LEFT_CONTROL,
"LEFT_ALT" : Keys.LEFT_ALT,
"PAGE_UP" : Keys.PAGE_UP ,
"PAGE_DOWN" : Keys.PAGE_DOWN
}
return supported_keys[key.upper()]
except KeyError:
self.log_error("Key is not supported")
def SetFocus(self, field, grid_cell, row_number, position):
"""
Sets the current focus on the desired field.
:param field: The field that must receive the focus.
:type field: str
:param grid_cell: Indicates if the element that deserve focus is on a grid.
:type grid_cell: bool
:param row_number: Number of row in case of multiples rows.
:type row_number: int
:param position: Position which element is located. - **Default:** 1
:type position: int
Usage:
>>> # Calling the method:
>>> oHelper.SetFocus("A1_COD")
>>> oHelper.SetFocus("A1_COD", grid_cell = True)
"""
if grid_cell:
self.wait_element(field)
self.ClickGridCell(field, row_number)
time.sleep(1)
ActionChains(self.driver).key_down(Keys.ENTER).perform()
time.sleep(1)
else:
logger().debug(f"Setting focus on element {field}.")
label = False if re.match(r"\w+(_)", field) else True
if label:
container = self.get_current_container()
labels = container.select('label')
label_text_filtered = re.sub(r"[:;*?]", "", field)
label_filtered = next(iter(list(filter(
lambda x: re.sub(r"[:;*?]", "", x.text) == label_text_filtered, labels))), None)
if label_filtered and not self.element_is_displayed(label_filtered):
self.scroll_to_element( self.soup_to_selenium(label_filtered) )
element = next(iter(self.web_scrap(field, scrap_type=enum.ScrapType.TEXT, optional_term="label", main_container = self.containers_selectors["Containers"], label=label, position=position)), None)
if not element:
element = next(iter(self.web_scrap(f"[name$='{field}']", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container = self.containers_selectors["Containers"], label=label, position=position)), None)
if element and not self.element_is_displayed(element):
self.scroll_to_element( self.soup_to_selenium(element) )
try:
element = self.soup_to_selenium(element)
self.set_element_focus(element)
if self.driver.switch_to_active_element() != element:
self.click(element, click_type=enum.ClickType.SELENIUM)
except Exception as e:
logger().exception(f"Warning: SetFocus: '{field}' - Exception {str(e)}")
def click_check_radio_button(self, field, value, name_attr = False, position = 1):
"""
[Internal]
Identify and click on check or radio button.
:param field: The field that would receive the input.
:type field: str
:param value: The value that must be on the checkbox or grid.
:type value: bool
:return: The element that changed value.
:rtype: Selenium object
Usage:
>>> # Calling the method:
>>> element = self.check_checkbox("CheckBox1", True)
"""
logger().info(f'Clicking in "{self.returns_printable_string(field)}"')
position -= 1
element_list = []
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not element_list):
if re.match(r"\w+(_)", field):
self.wait_element(term=f"[name$='{field}']", scrap_type=enum.ScrapType.CSS_SELECTOR)
element_list = self.web_scrap(term=f"[name$='{field}']", scrap_type=enum.ScrapType.CSS_SELECTOR, position=position)
else:
self.wait_element(field, scrap_type=enum.ScrapType.MIXED, optional_term="label")
#element = next(iter(self.web_scrap(term=field, scrap_type=enum.ScrapType.MIXED, optional_term=".tradiobutton .tradiobuttonitem label, .tcheckbox span")), None)
element_list = self.web_scrap(term=field, scrap_type=enum.ScrapType.MIXED, optional_term=".tradiobutton .tradiobuttonitem label, .tcheckbox input", position=position)
if not element_list:
self.log_error("Couldn't find input element")
if element_list and len(element_list) -1 >= position:
element = element_list[position]
if 'input' not in element and element:
input_element = next(iter(element.find_parent().select("input")), None)
if not input_element:
self.log_error("Couldn't find input element")
xpath_input = lambda: self.driver.find_element_by_xpath(xpath_soup(input_element))
self.scroll_to_element(xpath_input())
self.click(xpath_input())
def result_checkbox(self, field, value):
"""
[Internal]
Checks expected value of a Checkbox element.
:param field: The field whose value would be checked.
:type field: str
:param value: The expected value of the radio button.
:type value: bool
:return: Boolean if expected value was found on the element or not.
:rtype: bool
Usage:
>>> # Calling the method:
>>> assertion_value = self.result_checkbox("CheckBox1", True)
"""
result = False
time.sleep(1)
lista = self.driver.find_elements(By.CSS_SELECTOR, ".tcheckbox.twidget")
for line in lista:
if line.is_displayed() and line.get_attribute('name').split('->')[1] == field:
if "CHECKED" in line.get_attribute('class').upper():
result = True
return result
def clear_grid(self):
"""
[Internal]
Empties the global grid list variables.
Usage:
>>> # Calling the method:
>>> self.clear_grid()
"""
self.grid_input = []
self.grid_check = []
def input_grid_appender(self, column, value, grid_number=0, new=False, row=None, check_value = True, duplicate_fields=[]):
"""
[Internal]
Adds a value to the input queue of a grid.
:param column: The column of the grid that would receive the input.
:type column: str
:param value: The value that would be inputted.
:type value: str
:param grid_number: Which grid should be used when there are multiple grids on the same screen. - **Default:** 0
:type grid_number: int
:param new: Boolean value if this is a new line that should be added. - **Default:** 1
:type new: bool
:param row: Row number that will be filled
:type row: int
Usage:
>>> # Calling the method:
>>> self.input_grid_appender("A1_COD", "000001", 0)
>>> # ---------------------------------------------
>>> # Calling the method for a new line:
>>> self.input_grid_appender("", "", 0, True)
"""
if row is not None:
row -= 1
self.grid_input.append([column, value, grid_number, new, row, check_value, duplicate_fields])
def check_grid_appender(self, line, column, value, grid_number=0):
"""
[Internal]
Adds a value to the check queue of a grid.
:param line: The line of the grid that would be checked.
:type line: int
:param column: The column of the grid that would be checked.
:type column: str
:param value: The value that is expected.
:type value: str
:param grid_number: Which grid should be used when there are multiple grids on the same screen. - **Default:** 0
:type grid_number: int
Usage:
>>> # Calling the method:
>>> self.check_grid_appender(0,"A1_COD", "000001", 0)
"""
self.grid_check.append([line, column, value, grid_number])
def LoadGrid(self):
"""
This method is responsible for running all actions of the input and check queues
of a grid. After running, the queues would be empty.
Must be called after SetValue and CheckResult calls that has the grid parameter set to True.
Usage:
>>> # After SetValue:
>>> oHelper.SetValue("A1_COD", "000001", grid=True)
>>> oHelper.LoadGrid()
>>> #--------------------------------------
>>> # After CheckResult:
>>> oHelper.CheckResult("A1_COD", "000001", grid=True, line=1)
>>> oHelper.LoadGrid()
"""
x3_dictionaries = self.create_x3_tuple()
duplicate_fields=[]
initial_layer = 0
if self.grid_input:
self.wait_element(term=".tgetdados, .tgrid, .tcbrowse", scrap_type=enum.ScrapType.CSS_SELECTOR)
soup = self.get_current_DOM()
container_soup = next(iter(soup.select('body')))
container_element = self.driver.find_element_by_xpath(xpath_soup(container_soup))
initial_layer = len(container_element.find_elements(By.CSS_SELECTOR, '.tmodaldialog'))
for field in self.grid_input:
if field[3] and field[0] == "":
self.new_grid_line(field)
else:
self.wait_blocker()
logger().info(f"Filling grid field: {field[0]}")
if len(field[6]) > 0:
duplicate_fields=field[6]
self.fill_grid(field, x3_dictionaries, initial_layer, duplicate_fields)
for field in self.grid_check:
logger().info(f"Checking grid field value: {field[1]}")
self.check_grid(field, x3_dictionaries)
self.clear_grid()
def create_x3_tuple(self):
"""
[Internal]
Returns a tuple of dictionaries of field information based on all fields in the grid queues.
:return: A tuple containing the needed x3 information.
:rtype: Tuple of Dictionaries
Usage:
>>> # Calling the method:
>>> x3_dictionaries = self.create_x3_tuple()
"""
x3_dictionaries = ()
inputs = list(map(lambda x: x[0], self.grid_input))
checks = list(map(lambda x: x[1], self.grid_check))
fields = list(filter(lambda x: "_" in x, inputs + checks))
if fields:
x3_dictionaries = self.get_x3_dictionaries(fields)
return x3_dictionaries
def fill_grid(self, field, x3_dictionaries, initial_layer, duplicate_fields=[]):
"""
[Internal]
Fills the grid cell with the passed parameters.
:param field: An item from the grid's input queue
:type field: List of values
:param x3_dictionaries: Tuple of dictionaries containing information extracted from x3.
:type x3_dictionaries: Tuple of dictionaries
:param initial_layer: The initial layer of elements of Protheus Webapp
:type initial_layer: int
Usage:
>>> # Calling the method:
>>> self.fill_grid(["A1_COD", "000001", 0, False], x3_dictionaries, 0)
"""
field_to_label = {}
field_to_valtype = {}
field_to_len = {}
current_value = ""
column_name = ""
rows = ""
headers = ""
columns = ""
grids = None
try_counter = 1
grid_reload = True
check_value = field[5]
if(field[1] == True):
field_one = 'is a boolean value'
elif(field[1] == False):
field_one = ''
elif(isinstance(field[1],str)):
field_one = self.remove_mask(field[1]).strip()
if x3_dictionaries:
field_to_label = x3_dictionaries[2]
field_to_valtype = x3_dictionaries[0]
field_to_len = x3_dictionaries[1]
if "_" in field[0]:
try:
column_name = field_to_label[field[0]].lower().strip()
except:
self.log_error("Couldn't find column '" + field[0] + "' in sx3 file. Try with the field label.")
else:
column_name = field[0].lower().strip()
self.wait_element_timeout(term = column_name,
scrap_type = enum.ScrapType.MIXED, timeout = self.config.time_out, optional_term = 'th label', main_container = 'body')
endtime = time.time() + self.config.time_out
while(self.element_exists(term=".tmodaldialog", scrap_type=enum.ScrapType.CSS_SELECTOR, position=initial_layer+1, main_container="body") and time.time() < endtime):
logger().debug("Waiting for container to be active")
time.sleep(1)
endtime = time.time() + self.config.time_out
while(self.remove_mask(current_value).strip().replace(',','') != field_one.replace(',','') and time.time() < endtime):
endtime_row = time.time() + self.config.time_out
while(time.time() < endtime_row and grid_reload):
if not field[4]:
grid_reload = False
container = self.get_current_container()
if container:
try:
container_id = self.soup_to_selenium(container).get_attribute("id") if self.soup_to_selenium(container) else None
except Exception as err:
container_id = None
logger().exception(str(err))
pass
grids = container.select(".tgetdados, .tgrid, .tcbrowse")
grids = self.filter_displayed_elements(grids)
if grids:
headers = self.get_headers_from_grids(grids, duplicate_fields)
if field[2] + 1 > len(grids):
grid_reload = True
else:
grid_id = grids[field[2]].attrs["id"]
if grid_id not in self.grid_counters:
self.grid_counters[grid_id] = 0
down_loop = 0
rows = grids[field[2]].select("tbody tr")
else:
grid_reload = True
if (field[4] is not None) and not (field[4] > len(rows) - 1 or field[4] < 0):
grid_reload = False
if (field[4] is not None) and (field[4] > len(rows) - 1 or field[4] < 0):
self.log_error(f"Couldn't select the specified row: {field[4] + 1}")
if grids:
if field[2] + 1 > len(grids):
self.log_error(f'{self.language.messages.grid_number_error} Grid number: {field[2] + 1} Grids in the screen: {len(grids)}')
else:
self.log_error("Grid element doesn't appear in DOM")
row = rows[field[4]] if field[4] else self.get_selected_row(rows) if self.get_selected_row(rows) else(next(iter(rows), None))
if row:
while (int(row.attrs["id"]) < self.grid_counters[grid_id]) and (down_loop < 2) and self.down_loop_grid and field[4] is None and time.time() < endtime:
self.new_grid_line(field, False)
row = self.get_selected_row(self.get_current_DOM().select(f"#{grid_id} tbody tr"))
down_loop+=1
self.down_loop_grid = False
columns = row.select("td")
if columns:
if column_name in headers[field[2]]:
column_number = headers[field[2]][column_name]
current_value = columns[column_number].text.strip()
xpath = xpath_soup(columns[column_number])
current_value = self.remove_mask(current_value).strip()
selenium_column = lambda: self.get_selenium_column_element(xpath) if self.get_selenium_column_element(xpath) else self.try_recover_lost_line(field, grid_id, row, headers, field_to_label)
self.scroll_to_element(selenium_column())
self.click(selenium_column())
self.set_element_focus(selenium_column())
soup = self.get_current_DOM()
tmodal_list = soup.select('.tmodaldialog')
tmodal_layer = len(tmodal_list) if tmodal_list else 0
if self.grid_memo_field:
term = ".tmodaldialog"
else:
term = ".tmodaldialog.twidget.borderless"
while (time.time() < endtime and not self.element_exists(term=term, scrap_type=enum.ScrapType.CSS_SELECTOR, position=tmodal_layer + 1, main_container="body")):
time.sleep(1)
self.scroll_to_element(selenium_column())
self.set_element_focus(selenium_column())
self.click(selenium_column())
try:
ActionChains(self.driver).move_to_element(selenium_column()).send_keys_to_element(selenium_column(), Keys.ENTER).perform()
except WebDriverException:
try:
self.send_keys(selenium_column(), Keys.ENTER)
except WebDriverException:
pass
except:
pass
time.sleep(1)
if(field[1] == True):
field_one = ''
break
if(field[1] == True): break # if boolean field finish here.
self.wait_element(term=".tmodaldialog", scrap_type=enum.ScrapType.CSS_SELECTOR, position=initial_layer+1, main_container="body")
soup = self.get_current_DOM()
new_container = self.zindex_sort(soup.select(".tmodaldialog.twidget"), True)[0]
child = new_container.select("input, textarea")
child_type = "input"
option_text = ""
if not child:
child = new_container.select("select")
child_type = "select"
if child_type == "input":
time.sleep(2)
selenium_input = lambda: self.driver.find_element_by_xpath(xpath_soup(child[0]))
self.wait_element(term=xpath_soup(child[0]), scrap_type=enum.ScrapType.XPATH)
valtype = selenium_input().get_attribute("valuetype")
lenfield = len(self.get_element_value(selenium_input()))
user_value = field[1]
check_mask = self.check_mask(selenium_input())
if check_mask:
if (check_mask[0].startswith('@D') and user_value == ''):
user_value = '00000000'
user_value = self.remove_mask(user_value)
self.wait_until_to( expected_condition = "visibility_of", element = selenium_input, timeout=True)
self.set_element_focus(selenium_input())
self.click(selenium_input())
if 'tget' in self.get_current_container().next.attrs['class'] or 'tmultiget' in self.get_current_container().next.attrs['class']:
bsoup_element = self.get_current_container().next
self.wait_until_to(expected_condition="element_to_be_clickable", element = bsoup_element, locator = By.XPATH, timeout=True)
self.try_send_keys(selenium_input, user_value, try_counter)
if self.grid_memo_field:
self.SetButton('Ok')
check_value = False
self.grid_memo_field = False
if try_counter < 2:
try_counter += 1
else:
try_counter = 0
if (("_" in field[0] and field_to_len != {} and int(field_to_len[field[0]]) > len(field[1])) or lenfield > len(field[1])):
if (("_" in field[0] and field_to_valtype != {} and field_to_valtype[field[0]] != "N") or valtype != "N"):
self.send_keys(selenium_input(), Keys.ENTER)
else:
if not (re.match(r"[0-9]+,[0-9]+", user_value)):
self.send_keys(selenium_input(), Keys.ENTER)
else:
self.wait_element_timeout(term= ".tmodaldialog.twidget", scrap_type= enum.ScrapType.CSS_SELECTOR, position=initial_layer+1, presence=False, main_container="body")
if self.element_exists(term=".tmodaldialog.twidget", scrap_type=enum.ScrapType.CSS_SELECTOR, position=initial_layer+1, main_container="body"):
self.wait_until_to(expected_condition="element_to_be_clickable", element = bsoup_element, locator = By.XPATH, timeout=True)
self.send_keys(selenium_input(), Keys.ENTER)
elif lenfield == len(field[1]) and self.get_current_container().attrs['id'] != container_id:
try:
self.send_keys(selenium_input(), Keys.ENTER)
except:
pass
try_endtime = self.config.time_out / 4
while try_endtime > 0:
element_exist = self.wait_element_timeout(term=xpath_soup(child[0]), scrap_type=enum.ScrapType.XPATH, timeout = 10, presence=False)
time.sleep(1)
if element_exist:
current_value = self.get_element_text(selenium_column())
if current_value == None:
current_value = ''
break
else:
try_endtime = try_endtime - 10
containers = self.get_current_DOM().select(self.containers_selectors["GetCurrentContainer"])
if child[0].parent.parent in containers:
containers.remove(child[0].parent.parent)
container_current = next(iter(self.zindex_sort(containers, True)))
if container_current.attrs['id'] != container_id:
logger().debug("Consider using the waithide and setkey('ESC') method because the input can remain selected.")
return
else:
option_text_list = list(filter(lambda x: field[1] == x[0:len(field[1])], map(lambda x: x.text ,child[0].select('option'))))
option_value_dict = dict(map(lambda x: (x.attrs["value"], x.text), child[0].select('option')))
option_value = self.get_element_value(self.driver.find_element_by_xpath(xpath_soup(child[0])))
option_text = next(iter(option_text_list), None)
if not option_text:
self.log_error("Couldn't find option")
if (option_text != option_value_dict[option_value]):
self.select_combo(child[0], field[1])
if field[1] in option_text[0:len(field[1])]:
current_value = field[1]
else:
self.send_keys(self.driver.find_element_by_xpath(xpath_soup(child[0])), Keys.ENTER)
current_value = field[1]
if not check_value:
break
if ( check_value and self.remove_mask(current_value).strip().replace(',','') != field_one.replace(',','')):
self.search_for_errors()
self.check_grid_error(grids, headers, column_name, rows, columns, field)
self.log_error(f"Current value: {current_value} | Couldn't fill input: {field_one} value in Column: '{column_name}' of Grid: '{headers[field[2]].keys()}'.")
def get_selenium_column_element(self, xpath):
"""
[Internal]
Tries to get the selenium element out of a grid column.
Workaround method to be used instead of a lambda function on fill_grid method.
:param xpath: The xpath to the column.
:type xpath: str
Usage:
>>> # Calling the method:
>>> self.get_selenium_column_element(xpath)
"""
try:
return self.driver.find_element_by_xpath(xpath)
except:
return False
def try_recover_lost_line(self, field, grid_id, row, headers, field_to_label):
"""
[Internal]
Tries to recover the position if a new line is lost.
Workaround method to keep trying to get the right row fill_grid method.
:param field: An item from the grid's input queue
:type field: List of values
:param grid_id: The grid's ID
:type grid_id: str
:param row: The current row
:type row: Beautiful Soup object
:param headers: List of dictionaries with each grid's headers
:type headers: List of Dictionaries
:param field_to_label: Dictionary from the x3 containing the field to label relationship.
:type field_to_label: Dict
Usage:
>>> # Calling the method:
>>> self.try_recover_lost_line(field, grid_id, row, headers, field_to_label)
"""
ret = None
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not ret):
if self.config.debug_log:
logger().debug("Recovering lost line")
while ( time.time() < endtime and int(row.attrs["id"]) < self.grid_counters[grid_id]):
self.new_grid_line(field, False)
row = self.get_selected_row(self.get_current_DOM().select(f"#{grid_id} tbody tr"))
columns = row.select("td")
if columns:
if "_" in field[0]:
column_name = field_to_label[field[0]]
else:
column_name = field[0]
column_name = column_name.lower()
if column_name not in headers[field[2]]:
self.log_error(f"{self.language.messages.grid_column_error} Coluna: '{column_name}' Grid: '{headers[field[2]].keys()}'")
column_number = headers[field[2]][column_name]
xpath = xpath_soup(columns[column_number])
ret = self.get_selenium_column_element(xpath)
return ret
def check_grid(self, field, x3_dictionaries, get_value=False):
"""
[Internal]
Checks the grid cell with the passed parameters.
:param field: An item from the grid's check queue
:type field: List of values
:param x3_dictionaries: Tuple of dictionaries containing information extracted from x3.
:type x3_dictionaries: Tuple of dictionaries
:param get_value: Boolean value if check_grid should return its value.
:type get_value: bool
:return: If get_value flag is True, it will return the captured value.
:return type: str
Usage:
>>> # Calling the method:
>>> self.check_grid([0, "A1_COD", "000001", 0], x3_dictionaries, False)
"""
text = ""
column_name = ""
field_to_label = {}
grids = None
columns = None
headers = None
rows = None
success = False
endtime = time.time() + self.config.time_out
if x3_dictionaries:
field_to_label = x3_dictionaries[2]
while(self.element_exists(term=".tmodaldialog .ui-dialog", scrap_type=enum.ScrapType.CSS_SELECTOR, position=3, main_container="body") and time.time() < endtime):
if self.config.debug_log:
logger().debug("Waiting for container to be active")
time.sleep(1)
while(time.time() < endtime and not success):
containers = self.web_scrap(term=".tmodaldialog", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
container = next(iter(self.zindex_sort(containers, True)), None)
if container:
grids = container.select(".tgetdados, .tgrid, .tcbrowse")
grids = self.filter_displayed_elements(grids)
if grids:
headers = self.get_headers_from_grids(grids)
column_name = ""
if field[3] > len(grids):
self.log_error(self.language.messages.grid_number_error)
rows = grids[field[3]].select("tbody tr")
if rows:
if field[0] > len(rows)-1:
if get_value:
return ''
else:
self.log_error(self.language.messages.grid_line_error)
field_element = next(iter(field), None)
if field_element != None and len(rows) -1 >= field_element:
columns = rows[field_element].select("td")
if columns and rows:
if "_" in field[1]:
column_name = field_to_label[field[1]].lower()
else:
column_name = field[1].lower()
if column_name in headers[field[3]]:
column_number = headers[field[3]][column_name]
if self.grid_memo_field:
text = self.check_grid_memo(columns[column_number])
self.grid_memo_field = False
else:
text = columns[column_number].text.strip()
success = True
if success and get_value and text:
return text
field_name = f"({field[0]}, {column_name})"
self.log_result(field_name, field[2], text)
logger().info(f"Collected value: {text}")
if not success:
self.check_grid_error( grids, headers, column_name, rows, columns, field )
def check_grid_memo(self, element):
"""
[Internal]
:param element:
:return:
"""
self.soup_to_selenium(element).click()
ActionChains(self.driver).key_down(Keys.ENTER).perform()
container = self.get_current_container()
textarea = next(iter(container.select("textarea")), None)
content = self.driver.execute_script(f"return arguments[0].value",self.driver.find_element_by_xpath(xpath_soup(textarea))).strip()
self.SetButton('Ok')
return content
def check_grid_error(self, grid, headers, column_name, rows, columns, field):
"""
[Internal]
"""
error = False
if not grid:
self.log_error("Couldn't find grids.")
error = True
if not error and column_name not in headers[field[3]]:
self.log_error(f"{self.language.messages.grid_column_error} Coluna: '{column_name}' Grid: '{headers[field[3]].keys()}'")
error = True
if not error and not columns:
self.log_error("Couldn't find columns.")
if not error and not rows:
self.log_error("Couldn't find rows.")
error = True
return
def new_grid_line(self, field, add_grid_line_counter=True):
"""
[Internal]
Creates a new line on the grid.
:param field: An item from the grid's input queue
:type field: List of values
:param add_grid_line_counter: Boolean if counter should be incremented when method is called. - **Default:** True
:type add_grid_line_counter: bool
Usage:
>>> # Calling the method:
>>> self.new_grid_line(["", "", 0, True])
"""
grids = ''
endtime = time.time() + self.config.time_out
self.down_loop_grid = True
while(not grids and time.time() < endtime):
soup = self.get_current_DOM()
containers = soup.select(".tmodaldialog.twidget")
if containers:
containers = self.zindex_sort(containers, True)
grids = self.filter_displayed_elements(containers[0].select(".tgetdados, .tgrid"))
time.sleep(1)
if grids:
if field[2] > len(grids):
self.log_error(self.language.messages.grid_number_error)
rows = grids[field[2]].select("tbody tr")
row = self.get_selected_row(rows)
if row:
columns = row.select("td")
if columns:
second_column = lambda: self.driver.find_element_by_xpath(xpath_soup(columns[1]))
# self.scroll_to_element(second_column())
self.driver.execute_script("$('.horizontal-scroll').scrollLeft(-400000);")
self.set_element_focus(second_column())
self.wait_until_to(expected_condition="visibility_of_element_located", element = columns[0], locator=By.XPATH )
ActionChains(self.driver).move_to_element(second_column()).send_keys_to_element(second_column(), Keys.DOWN).perform()
endtime = time.time() + self.config.time_out
while (time.time() < endtime and not(self.element_exists(term=".tgetdados tbody tr, .tgrid tbody tr", scrap_type=enum.ScrapType.CSS_SELECTOR, position=len(rows)+1))):
if self.config.debug_log:
logger().debug("Waiting for the new line to show")
time.sleep(1)
if (add_grid_line_counter):
self.add_grid_row_counter(grids[field[2]])
else:
self.log_error("Couldn't find columns.")
else:
self.log_error("Couldn't find rows.")
else:
self.log_error("Couldn't find grids.")
def ClickGridCell(self, column, row_number=1, grid_number=1):
"""
Clicks on a Cell of a Grid.
:param column: The column that should be clicked.
:type column: str
:param row_number: Grid line that contains the column field to be checked.- **Default:** 1
:type row_number: int
:param grid_number: Grid number of which grid should be checked when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
Usage:
>>> # Calling the method:
>>> oHelper.ClickGridCell("Product", 1)
"""
success = False
grids = None
row_number -= 1
grid_number -= 1
column_name = ""
column = column.strip()
column_element_old_class = None
columns = None
rows = None
same_location = False
self.wait_blocker()
self.wait_element(term=".tgetdados tbody tr, .tgrid tbody tr, .tcbrowse", scrap_type=enum.ScrapType.CSS_SELECTOR)
self.wait_element_timeout(term = column, scrap_type = enum.ScrapType.TEXT, timeout = self.config.time_out , optional_term = 'label')
endtime = time.time() + self.config.time_out
if re.match(r"\w+(_)", column):
column_name = self.get_x3_dictionaries([column])[2][column].lower()
else:
column_name = column.lower()
while(not success and time.time() < endtime):
containers = self.web_scrap(term=".tmodaldialog,.ui-dialog", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")
container = next(iter(self.zindex_sort(containers, True)), None)
if container:
grids = self.filter_displayed_elements(container.select(".tgetdados, .tgrid, .tcbrowse"))
if grids:
if len(grids) > 1:
grids, same_location = self.filter_non_obscured(grids, grid_number)
if same_location:
grid_number = 0
grids = list(filter(lambda x:x.select("tbody tr"), grids))
headers = self.get_headers_from_grids(grids)
if grid_number < len(grids):
rows = grids[grid_number].select("tbody tr")
if rows:
if row_number < len(rows):
columns = rows[row_number].select("td")
if columns:
if column_name in headers[grid_number]:
column_number = headers[grid_number][column_name]
column_element = lambda : self.driver.find_element_by_xpath(xpath_soup(columns[column_number]))
if column_element_old_class == None:
column_element_old_class = column_element().get_attribute("class")
self.wait_until_to(expected_condition="element_to_be_clickable", element = columns[column_number], locator = By.XPATH, timeout=True)
self.click(column_element())
self.wait_element_is_focused(element_selenium = column_element, time_out = 2)
if column_element_old_class != column_element().get_attribute("class") or 'selected' in column_element().get_attribute("class") :
self.wait_until_to(expected_condition="element_to_be_clickable", element = columns[column_number], locator = By.XPATH, timeout=True)
self.wait_blocker()
success = True
elif grids[grid_number] and "tcbrowse" in grids[grid_number].attrs['class']:
time.sleep(0.5)
success = True
if not success:
self.log_error(f"Couldn't Click on grid cell \ngrids:{grids}\nrows: {rows} ")
def filter_non_obscured(self, elements, grid_number):
same_position = []
main_element = self.soup_to_selenium(elements[grid_number])
x, y = main_element.location['x'], main_element.location['y']
for element in elements:
selenium_element = self.soup_to_selenium(element)
if x == selenium_element.location['x'] and y == selenium_element.location['y'] and \
not main_element == selenium_element:
same_position.append(element)
if same_position:
same_position.append(elements[grid_number])
return [next(iter(list(self.zindex_sort(same_position, reverse=True))))], True
return elements, False
def ClickGridHeader( self, column = 1, column_name = '', grid_number = 1):
"""
Clicks on a Cell of a Grid Header.
:param column: The column index that should be clicked.
:type column: int
:param column_name: The column index that should be clicked.
:type row_number: str
:param grid_number: Grid number of which grid should be checked when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
Usage:
>>> # Calling the method:
>>> oHelper.ClickGridHeader(column = 1 , grid_number = 1)
>>> oHelper.ClickGridHeader(column_name = 'Código' , grid_number = 1)
>>> oHelper.ClickGridHeader(column = 1 , grid_number = 2)
"""
grid_number -= 1
column -=1 if column > 0 else 0
self.wait_element(term=".tgetdados tbody tr, .tgrid tbody tr, .tcbrowse", scrap_type=enum.ScrapType.CSS_SELECTOR)
grid = self.get_grid(grid_number)
header = self.get_headers_from_grids(grid)
if not column_name:
column_element = grid.select('thead label')[column].parent.parent
column_element_selenium = self.soup_to_selenium(column_element)
self.set_element_focus(column_element_selenium)
self.wait_until_to(expected_condition="element_to_be_clickable", element = column_element, locator = By.XPATH )
column_element_selenium.click()
else:
column_name =column_name.lower()
header = self.get_headers_from_grids(grid)
if column_name in header[grid_number]:
column_number = header[grid_number][column_name]
column_element = grid.select('thead label')[column_number].parent.parent
column_element_selenium = self.soup_to_selenium(column_element)
self.set_element_focus(column_element_selenium)
self.wait_until_to(expected_condition="element_to_be_clickable", element = column_element, locator = By.XPATH )
column_element_selenium.click()
def search_column_index(self, grid, column):
column_enumeration = list(enumerate(grid.select("thead label")))
chosen_column = next(iter(list(filter(lambda x: column in x[1].text, column_enumeration))), None)
if chosen_column:
column_index = chosen_column[0]
else:
self.log_error("Couldn't find chosen column.")
return column_index
def get_x3_dictionaries(self, fields):
"""
[Internal]
Generates the dictionaries with field comparisons from the x3 file,
Dictionaries:Field to Type, Field to Size, Field to Title.
:param fields: List of fields that must be located in x3.
:type fields: List of str
:return: The three x3 dictionaries in a Tuple.
:trype: Tuple of Dictionary
Usage:
>>> # Calling the method:
>>> x3_dictionaries = self.get_x3_dictionaries(field_list)
"""
prefixes = list(set(map(lambda x:x.split("_")[0] + "_" if "_" in x else "", fields)))
regex = self.generate_regex_by_prefixes(prefixes)
#caminho do arquivo csv(SX3)
path = os.path.join(os.path.dirname(__file__), r'core\\data\\sx3.csv')
#DataFrame para filtrar somente os dados da tabela informada pelo usuário oriundo do csv.
data = pd.read_csv(path, sep=';', encoding='latin-1', header=None, error_bad_lines=False,
index_col='Campo', names=['Campo', 'Tipo', 'Tamanho', 'Titulo', 'Titulo_Spa', 'Titulo_Eng', None], low_memory=False)
df = pd.DataFrame(data, columns=['Campo', 'Tipo', 'Tamanho', 'Titulo', 'Titulo_Spa', 'Titulo_Eng', None])
if not regex:
df_filtered = df.query("Tipo=='C' or Tipo=='N' or Tipo=='D' ")
else:
df_filtered = df.filter(regex=regex, axis=0)
if self.config.language == "es-es":
df_filtered.Titulo = df_filtered.loc[:,('Titulo_Spa')].str.strip()
elif self.config.language == "en-us":
df_filtered.Titulo = df_filtered.loc[:,('Titulo_Eng')].str.strip()
else:
df_filtered.Titulo = df_filtered.loc[:,('Titulo')].str.strip()
df_filtered.index = df_filtered.index.map(lambda x: x.strip())
dict_ = df_filtered.to_dict()
return (dict_['Tipo'], dict_['Tamanho'], dict_['Titulo'])
def generate_regex_by_prefixes(self, prefixes):
"""
[Internal]
Returns a regex string created by combining all field prefixes.
:param prefixes: Prefixes of fields to be combined in a regex.
:type prefixes: List of str
Usage:
>>> # Calling the method:
>>> regex = self.generate_regex_by_prefixes(field_prefixes)
"""
filtered_prefixes = list(filter(lambda x: x != "", prefixes))
regex = ""
for prefix in filtered_prefixes:
regex += "^" + prefix + "|"
return regex[:-1]
def get_headers_from_grids(self, grids, duplicate_fields=[]):
"""
[Internal]
Returns the headers of each grid in *grids* parameter.
:param grids: The grids to extract the headers.
:type grids: List of BeautifulSoup objects
:return: List of Dictionaries with each header value and index.
:rtype: List of Dict
Usage:
>>> # Calling the method:
>>> headers = self.get_headers_from_grids(grids)
"""
headers = []
for item in grids:
labels = item.select("thead tr label")
if labels:
keys = list(map(lambda x: x.text.strip().lower(), labels))
values = list(map(lambda x: x[0], enumerate(labels)))
headers.append(dict(zip(keys, values)))
if len(duplicate_fields) > 0:
duplicated_key = duplicate_fields[0].lower()
duplicated_value = duplicate_fields[1]
for header in headers:
if duplicated_key in header:
header[duplicated_key] = duplicated_value
return headers
def add_grid_row_counter(self, grid):
"""
[Internal]
Adds the counter of rows to the global dictionary.
:param grid: The grid whose rows are being controlled.
:type grid: BeautifulSoup object.
Usage:
>>> # Calling the method:
>>> self.add_grid_row_counter(grid)
"""
grid_id = grid.attrs["id"]
if grid_id not in self.grid_counters:
self.grid_counters[grid_id] = 0
else:
self.grid_counters[grid_id]+=1
def wait_element_is_not_displayed(self, element_soup, timeout = 5 , step=0.3):
"""
[Internal]
Wait element.is_displayed() return false
:param element_soup: The element soup.
:type element_soup: BeautifulSoup object.
:param timeout: The maximum amount of time of wait. - **Default:** 5.0
:type timeout: float
:param step: The amount of time each step should wait. - **Default:** 0.1
:type step: float
Usage:
>>> # Calling the method:
>>> self.wait_element_is_not_displayed(soup_element, 10, 0.5)
"""
endtime = time.time() + timeout
try:
logger().debug('Waiting for element to disappear')
while(self.element_is_displayed(element_soup) and time.time() <= endtime):
time.sleep(step)
except Exception:
return
def wait_element_is_focused(self, element_selenium = None, time_out = 5, step = 0.1):
"""
[ Internal ]
Wait element Lose focus
"""
endtime = time.time() + time_out
while( element_selenium and time.time() < endtime and self.switch_to_active_element() != element_selenium() ):
time.sleep(step)
def wait_element_is_not_focused(self, element_selenium = None, time_out = 5, step = 0.1):
"""
[ Internal ]
Wait element Lose focus
"""
endtime = time.time() + time_out
while( element_selenium and time.time() < endtime and self.switch_to_active_element() == element_selenium() ):
time.sleep(step)
def switch_to_active_element(self):
"""
[Internal]
Call switch_to_active_element method
"""
try:
self.driver.switch_to_active_element()
except NoSuchElementException:
return None
except Exception as e:
logger().exception(f"Warning switch_to_active_element() exception : {str(e)}")
return None
def wait_element(self, term, scrap_type=enum.ScrapType.TEXT, presence=True, position=0, optional_term=None, main_container=".tmodaldialog,.ui-dialog", check_error=True):
"""
[Internal]
Waits until the desired element is located on the screen.
:param term: The first search term. A text or a selector.
:type term: str
:param scrap_type: The type of webscraping. - **Default:** enum.ScrapType.TEXT
:type scrap_type: enum.ScrapType.
:param presence: If the element should exist or not in the screen. - **Default:** False
:type presence: bool
:param position: If the element should exist at a specific position. e.g. The fourth button. - **Default:** 0
:type position: int
:param optional_term: The second search term. A selector used in MIXED webscraping. - **Default:** None
:type optional_term: str
:param main_container: The selector of a container element that has all other elements. - **Default:** None
:type main_container: str
Usage:
>>> # Calling the method:
>>> self.wait_element(term=".ui-button.ui-dialog-titlebar-close[title='Close']", scrap_type=enum.ScrapType.CSS_SELECTOR)
"""
endtime = time.time() + self.config.time_out
if self.config.debug_log:
logger().debug("Waiting for element")
if presence:
while (not self.element_exists(term, scrap_type, position, optional_term, main_container, check_error) and time.time() < endtime):
time.sleep(0.1)
else:
while (self.element_exists(term, scrap_type, position, optional_term, main_container, check_error) and time.time() < endtime):
time.sleep(0.1)
if time.time() > endtime:
if term == "[name='cGetUser']":
self.close_resolution_screen()
else:
if ".ui-button.ui-dialog-titlebar-close[title='Close']" in term:
return False
self.restart_counter += 1
self.log_error(f"Element {term} not found!")
presence_endtime = time.time() + 10
if presence:
if self.config.debug_log:
logger().debug("Element found! Waiting for element to be displayed.")
element = next(iter(self.web_scrap(term=term, scrap_type=scrap_type, optional_term=optional_term, main_container=main_container, check_error=check_error)), None)
if element is not None:
sel_element = lambda:self.soup_to_selenium(element)
sel_element_isdisplayed = False
while(not sel_element_isdisplayed and time.time() < presence_endtime):
try:
if sel_element != None:
sel_element_isdisplayed = sel_element().is_displayed()
else:
sel_element = lambda:self.soup_to_selenium(element)
time.sleep(0.1)
except AttributeError:
pass
except StaleElementReferenceException:
pass
def wait_element_timeout(self, term, scrap_type=enum.ScrapType.TEXT, timeout=5.0, step=0.1, presence=True, position=0, optional_term=None, main_container=".tmodaldialog,.ui-dialog, body", check_error=True):
"""
[Internal]
Waits until the desired element is located on the screen or until the timeout is met.
:param term: The first search term. A text or a selector.
:type term: str
:param scrap_type: The type of webscraping. - **Default:** enum.ScrapType.TEXT
:type scrap_type: enum.ScrapType.
:param timeout: The maximum amount of time of wait. - **Default:** 5.0
:type timeout: float
:param timeout: The amount of time each step should wait. - **Default:** 0.1
:type timeout: float
:param presence: If the element should exist or not in the screen. - **Default:** False
:type presence: bool
:param position: If the element should exist at a specific position. e.g. The fourth button. - **Default:** 0
:type position: int
:param optional_term: The second search term. A selector used in MIXED webscraping. - **Default:** None
:type optional_term: str
:param main_container: The selector of a container element that has all other elements. - **Default:** None
:type main_container: str
Usage:
>>> # Calling the method:
>>> self.wait_element_timeout(term=button, scrap_type=enum.ScrapType.MIXED, optional_term="button", timeout=10, step=0.1)
"""
success = False
if presence:
endtime = time.time() + timeout
while time.time() < endtime:
time.sleep(step)
if self.element_exists(term, scrap_type, position, optional_term, main_container, check_error):
success = True
break
else:
endtime = time.time() + timeout
while time.time() < endtime:
time.sleep(step)
if not self.element_exists(term, scrap_type, position, optional_term, main_container, check_error):
success = True
break
if presence and success:
if self.config.debug_log:
logger().debug("Element found! Waiting for element to be displayed.")
element = next(iter(self.web_scrap(term=term, scrap_type=scrap_type, optional_term=optional_term, main_container=main_container, check_error=check_error)), None)
if element is not None:
sel_element = lambda: self.driver.find_element_by_xpath(xpath_soup(element))
endtime = time.time() + timeout
while(time.time() < endtime and not self.element_is_displayed(element)):
try:
time.sleep(0.1)
self.scroll_to_element(sel_element())
if(sel_element().is_displayed()):
break
except:
continue
return success
def get_selected_row(self, rows):
"""
[Internal]
From a list of rows, filter the selected one.
:param rows: List of rows.
:type rows: List of Beautiful Soup objects
:return: The selected row.
:rtype: Beautiful Soup object.
Usage:
>>> # Calling the method:
>>> selected_row = self.get_selected_row(rows)
"""
filtered_rows = list(filter(lambda x: len(x.select("td.selected-cell")), rows))
if filtered_rows:
return next(iter(filtered_rows))
else:
filtered_rows = list(filter(lambda x: "selected-row" == self.soup_to_selenium(x).get_attribute('class'), rows))
if filtered_rows:
return next(iter(list(filter(lambda x: "selected-row" == self.soup_to_selenium(x).get_attribute('class'), rows))), None)
def SetFilePath(self, value, button = ""):
"""
Fills the path screen with the desired path
.. warning::
Necessary informed the button name or the program will select the current button name.
:param value: Path to be inputted.
:type value: str
:param button: Name button from path screen.
:type button: str
Usage:
>>> # Calling the method:
>>> oHelper.SetFilePath(r"C:\\folder")
>>> oHelper.SetFilePath(r"C:\\folder","save")
"""
self.wait_element(self.language.file_name)
element = self.driver.find_element(By.CSS_SELECTOR, ".filepath input")
if element:
self.driver.execute_script("document.querySelector('#{}').value='';".format(element.get_attribute("id")))
self.send_keys(element, value)
elements = self.driver.find_elements(By.CSS_SELECTOR, ".tremoteopensave button")
if elements:
for line in elements:
if button != "":
if line.text.strip().upper() == button.upper():
self.click(line)
break
elif line.text.strip().upper() == self.language.open.upper():
self.click(line)
break
elif line.text.strip().upper() == self.language.save.upper():
self.click(line)
break
else:
self.log_error(f"Button: {button} not found")
def MessageBoxClick(self, button_text):
"""
Clicks on desired button inside a Messagebox element.
:param button_text: Desired button to click.
:type button_text: str
Usage:
>>> # Calling the method:
>>> oHelper.MessageBoxClick("Ok")
"""
self.wait_element(".messagebox-container", enum.ScrapType.CSS_SELECTOR)
content = self.driver.page_source
soup = BeautifulSoup(content,"html.parser")
container = soup.select(".messagebox-container")
if container:
buttons = container[0].select(".ui-button")
button = list(filter(lambda x: x.text.lower() == button_text.lower(), buttons))
if button:
selenium_button = self.driver.find_element_by_xpath(xpath_soup(button[0]))
self.click(selenium_button)
def get_enchoice_button_ids(self, layer):
"""
[Internal]
If current layer level has an enchoice, returns all buttons' ids.
:param layer: Current layer level that the application is.
:type layer: int
:return: List with enchoice's buttons' ids.
:rtype: List of str
Usage:
>>> # Calling the method:
>>> self.get_enchoice_button_ids(current_layer)
"""
try:
soup = self.get_current_DOM()
current_layer = self.zindex_sort(soup.select(".tmodaldialog"), False)[layer - 1]
buttons = list(filter(lambda x: x.text.strip() != "", current_layer.select(".tpanel button")))
return list(map(lambda x: x.parent.attrs["id"], buttons))
except Exception as error:
return []
def CheckView(self, text, element_type="help"):
"""
Checks if a certain text is present in the screen at the time and takes an action.
"help" - alerts with messages of errors.
:param text: Text to be checked.
:type text: str
:param element_type: Type of element. - **Default:** "help"
:type element_type: str
Usage:
>>> # Calling the method.
>>> oHelper.CheckView("Processing")
"""
if element_type == "help":
logger().info(f"Checking text on screen: {text}")
self.wait_element_timeout(term=text, scrap_type=enum.ScrapType.MIXED, timeout=2.5, step=0.5, optional_term=".tsay", check_error=False)
if not self.element_exists(term=text, scrap_type=enum.ScrapType.MIXED, optional_term=".tsay", check_error=False):
self.errors.append(f"{self.language.messages.text_not_found}({text})")
def try_send_keys(self, element_function, key, try_counter=0):
"""
[Internal]
Tries to send value to element using different techniques.
Meant to be used inside of a loop.
:param element_function: The function that returns the element that would receive the value.
:type element_function: function object
:param key: The value that would be sent to the element.
:type key: str or selenium.webdriver.common.keys
:param try_counter: This counter will decide which technique should be used. - **Default:** 0
:type try_counter: int
Usage:
>>> # Calling the method:
>>> self.try_send_keys(selenium_input, user_value, try_counter)
"""
self.wait_until_to( expected_condition = "visibility_of", element = element_function )
if try_counter == 0:
element_function().send_keys(Keys.HOME)
ActionChains(self.driver).key_down(Keys.SHIFT).send_keys(Keys.END).key_up(Keys.SHIFT).perform()
element_function().send_keys(key)
elif try_counter == 1:
element_function().send_keys(Keys.HOME)
ActionChains(self.driver).key_down(Keys.SHIFT).send_keys(Keys.END).key_up(Keys.SHIFT).perform()
ActionChains(self.driver).move_to_element(element_function()).send_keys_to_element(element_function(), key).perform()
else:
element_function().send_keys(Keys.HOME)
ActionChains(self.driver).key_down(Keys.SHIFT).send_keys(Keys.END).key_up(Keys.SHIFT).perform()
ActionChains(self.driver).move_to_element(element_function()).send_keys(key).perform()
def find_label_element(self, label_text, container= None, position = 1, input_field=True, direction=None):
"""
[Internal]
Find input element next to label containing the label_text parameter.
:param label_text: The label text to be searched
:type label_text: str
:param container: The main container object to be used
:type container: BeautifulSoup object
:return: A list containing a BeautifulSoup object next to the label
:rtype: List of BeautifulSoup objects
Usage:
>>> self.find_label_element("User:", container_object)
"""
try:
if container:
elements = self.filter_label_element(label_text, container)
if elements:
for element in elements:
elem = self.search_element_position(label_text, position, input_field, direction)
if elem:
return elem
#Checking previous and next element:
next_sibling = element.find_next_sibling("div")
second_next_sibling = next_sibling.find_next_sibling("div")
previous_sibling = element.find_next_sibling("div")
second_previous_sibling = previous_sibling.find_next_sibling("div")
#If current element is tsay and next or second next element is tget or tcombobox => return tget or tcombobox
if (hasattr(element, "attrs") and "class" in element.attrs
and "tsay" in element.attrs["class"]
and (hasattr(next_sibling, "attrs") and "class" in next_sibling.attrs and "id" in next_sibling.attrs
and ("tget" in next_sibling.attrs["class"] or "tcombobox" in next_sibling.attrs["class"])
and next_sibling.attrs["id"] not in self.used_ids)
or (hasattr(second_next_sibling, "attrs") and "class" in second_next_sibling.attrs and "id" in second_next_sibling.attrs
and ("tget" in second_next_sibling.attrs["class"] or "tcombobox" in second_next_sibling.attrs["class"])
and second_next_sibling.attrs["id"] not in self.used_ids)):
if (("tget" in next_sibling.attrs["class"]
or "tcombobox" in next_sibling.attrs["class"])
and next_sibling.attrs["id"] not in self.used_ids):
self.used_ids[next_sibling.attrs["id"]] = container.attrs["id"]
return [next_sibling]
elif (("tget" in second_next_sibling.attrs["class"]
or "tcombobox" in second_next_sibling.attrs["class"])
and second_next_sibling.attrs["id"] not in self.used_ids):
self.used_ids[second_next_sibling.attrs["id"]] = container.attrs["id"]
return [second_next_sibling]
else:
return[]
#If current element is tsay and previous or second previous element is tget or tcombobox => return tget or tcombobox
elif (hasattr(element, "attrs") and "class" in element.attrs
and "tsay" in element.attrs["class"]
and (hasattr(previous_sibling, "attrs") and "class" in previous_sibling.attrs and "id" in previous_sibling.attrs
and ("tget" in previous_sibling.attrs["class"] or "tcombobox" in previous_sibling.attrs["class"])
and previous_sibling.attrs["id"] not in self.used_ids)
or (hasattr(second_previous_sibling, "attrs") and "class" in second_previous_sibling.attrs and "id" in second_previous_sibling.attrs
and ("tget" in second_previous_sibling.attrs["class"] or "tcombobox" in second_previous_sibling.attrs["class"])
and second_previous_sibling.attrs["id"] not in self.used_ids)):
if (("tget" in previous_sibling.attrs["class"]
or "tcombobox" in previous_sibling.attrs["class"])
and previous_sibling.attrs["id"] not in self.used_ids):
self.used_ids[previous_sibling.attrs["id"]] = container.attrs["id"]
return [previous_sibling]
elif (("tget" in second_previous_sibling.attrs["class"]
or "tcombobox" in second_previous_sibling.attrs["class"])
and second_previous_sibling.attrs["id"] not in self.used_ids):
self.used_ids[second_previous_sibling.attrs["id"]] = container.attrs["id"]
return [second_previous_sibling]
else:
return []
#If element is not tsay => return it
elif (hasattr(element, "attrs") and "class" in element.attrs
and "tsay" not in element.attrs["class"]):
return self.search_element_position(label_text)
#If label exists but there is no element associated with it => return empty list
if not element:
return []
else:
return self.search_element_position(label_text)
else:
return []
except AttributeError:
return self.search_element_position(label_text)
def log_error(self, message, new_log_line=True, skip_restart=False):
"""
[Internal]
Finishes execution of test case with an error and creates the log information for that test.
:param message: Message to be logged
:type message: str
:param new_log_line: Boolean value if Message should be logged as new line or not. - **Default:** True
:type new_log_line: bool
Usage:
>>> #Calling the method:
>>> self.log_error("Element was not found")
"""
self.clear_grid()
logger().warning(f"Warning log_error {message}")
if self.config.smart_test:
logger().debug(f"***System Info*** in log_error():")
system_info()
routine_name = self.config.routine if ">" not in self.config.routine else self.config.routine.split(">")[-1].strip()
routine_name = routine_name if routine_name else "error"
stack_item = self.log.get_testcase_stack()
test_number = f"{stack_item.split('_')[-1]} -" if stack_item else ""
log_message = f"{test_number} {message}"
self.message = log_message
self.expected = False
self.log.seconds = self.log.set_seconds(self.log.initial_time)
self.log.testcase_seconds = self.log.set_seconds(self.log.testcase_initial_time)
self.log.ct_method, self.log.ct_number = self.log.ident_test()
if self.config.new_log:
self.execution_flow()
if self.config.screenshot:
self.log.take_screenshot_log(self.driver, stack_item, test_number)
if new_log_line:
self.log.new_line(False, log_message)
if ((stack_item != "setUpClass") or (stack_item == "setUpClass" and self.restart_counter == 3)):
self.log.save_file()
if not self.config.skip_restart and len(self.log.list_of_testcases()) > 1 and self.config.initial_program != '':
self.restart()
elif self.config.coverage and self.config.initial_program != '':
self.restart()
else:
try:
self.driver.close()
except Exception as e:
logger().exception(f"Warning Log Error Close {str(e)}")
if self.restart_counter > 2:
if self.config.num_exec and stack_item == "setUpClass" and self.log.checks_empty_line():
if not self.num_exec.post_exec(self.config.url_set_end_exec, 'ErrorSetFimExec'):
self.restart_counter = 3
self.log_error(f"WARNING: Couldn't possible send num_exec to server please check log.")
if (stack_item == "setUpClass") :
try:
self.driver.close()
except Exception as e:
logger().exception(f"Warning Log Error Close {str(e)}")
if ((stack_item != "setUpClass") or (stack_item == "setUpClass" and self.restart_counter == 3)):
if self.restart_counter >= 3:
self.restart_counter = 0
self.assertTrue(False, log_message)
def ClickIcon(self, icon_text, position=1):
"""
Clicks on an Icon button based on its tooltip text or Alt attribute title.
:param icon_text: The tooltip/title text.
:type icon_text: str
:param position: Position which element is located. - **Default:** 1
:type position: int
Usage:
>>> # Call the method:
>>> oHelper.ClickIcon("Add")
>>> oHelper.ClickIcon("Edit")
"""
icon = ""
success = False
filtered_buttons = None
position -= 1
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not icon and not success):
self.wait_element(term=".ttoolbar, .tbtnbmp", scrap_type=enum.ScrapType.CSS_SELECTOR)
soup = self.get_current_DOM()
container = next(iter(self.zindex_sort(soup.select(".tmodaldialog"))), None)
container = container if container else soup
tbtnbmp_img = self.on_screen_enabled(container.select(".tbtnbmp > img"))
tbtnbmp_img_str = " ".join(str(x) for x in tbtnbmp_img) if tbtnbmp_img else ''
if icon_text not in tbtnbmp_img_str:
container = self.get_current_container()
tbtnbmp_img = self.on_screen_enabled(container.select(".tbtnbmp > img"))
if tbtnbmp_img and len(tbtnbmp_img) -1 >= position:
icon = list(filter(lambda x: icon_text == self.soup_to_selenium(x).get_attribute("alt"), tbtnbmp_img))[position]
else:
buttons = self.on_screen_enabled(container.select("button[style]"))
logger().info("Searching for Icon")
if buttons:
filtered_buttons = self.filter_by_tooltip_value(buttons, icon_text)
if filtered_buttons and len(filtered_buttons) -1 >= position:
icon = next(iter(filtered_buttons), None)
if icon:
element = lambda: self.soup_to_selenium(icon)
self.set_element_focus(element())
success = self.click(element())
if not icon:
self.log_error(f"Couldn't find Icon: {icon_text}.")
if not success:
self.log_error(f"Couldn't click Icon: {icon_text}.")
def AddParameter(self, parameter, branch, portuguese_value, english_value="", spanish_value=""):
"""
Adds a parameter to the queue of parameters to be set by SetParameters method.
:param parameter: The parameter name.
:type parameter: str
:param branch: The branch to be filled in parameter edit screen.
:type branch: str
:param portuguese_value: The value for a portuguese repository.
:type portuguese_value: str
:param english_value: The value for an english repository.
:type english_value: str
:param spanish_value: The value for a spanish repository.
:type spanish_value: str
Usage:
>>> # Calling the method:
>>> oHelper.AddParameter("MV_MVCSA1", "", ".F.", ".F.", ".F.")
"""
endtime = time.time() + self.config.time_out
halftime = ((endtime - time.time()) / 2)
if(self.config.smart_test or self.config.parameter_url):
if self.tmenu_screen is None:
self.tmenu_screen = self.check_tmenu_screen()
value = self.parameter_url_value( self.config.language.lower(),
{'pt-br': portuguese_value, 'en-us': english_value, 'es-es': spanish_value})
self.driver.get(f"""{self.config.url}/?StartProg=u_AddParameter&a={parameter}&a={
branch}&a={value}&Env={self.config.environment}""")
while ( time.time() < endtime and not self.wait_element_timeout(term="[name='cGetUser'] > input",
scrap_type=enum.ScrapType.CSS_SELECTOR, main_container='body')):
tmessagebox = self.web_scrap(".tmessagebox", scrap_type=enum.ScrapType.CSS_SELECTOR,
optional_term=None, label=False, main_container="body")
if( tmessagebox ):
self.restart_counter = 3
self.log_error(f" AddParameter error: {tmessagebox[0].text}")
if ( not tmessagebox and ((endtime) - time.time() < halftime) ):
self.driver.get(f"""{self.config.url}/?StartProg=u_AddParameter&a={parameter}&a={
branch}&a={value}&Env={self.config.environment}""")
else:
self.parameters.append([parameter.strip(), branch, portuguese_value, english_value, spanish_value])
def SetParameters(self):
"""
Sets the parameters in CFG screen. The parameters must be passed with calls for **AddParameter** method.
Usage:
>>> # Adding Parameter:
>>> oHelper.AddParameter("MV_MVCSA1", "", ".F.", ".F.", ".F.")
>>> # Calling the method:
>>> oHelper.SetParameters()
"""
if(self.config.smart_test or self.config.parameter_url):
self.parameter_url(restore_backup=False)
else:
self.parameter_screen(restore_backup=False)
def RestoreParameters(self):
"""
Restores parameters to previous value in CFG screen. Should be used after a **SetParameters** call.
Usage:
>>> # Adding Parameter:
>>> oHelper.AddParameter("MV_MVCSA1", "", ".F.", ".F.", ".F.")
>>> # Calling the method:
>>> oHelper.SetParameters()
"""
if(self.config.smart_test or self.config.parameter_url):
self.tmenu_screen = self.check_tmenu_screen()
self.parameter_url(restore_backup=True)
else:
self.parameter_screen(restore_backup=True)
def parameter_url(self, restore_backup=False):
"""
[Internal]
Internal method of set and restore parameters.
:param restore_backup: Boolean if method should restore the parameters.
:type restore_backup: bool
Usage:
>>> # Calling the method:
>>> self.parameter_url(restore_backup=False)
"""
endtime = time.time() + self.config.time_out
function_to_call = "u_SetParam" if restore_backup is False else "u_RestorePar"
if restore_backup == True and self.parameters:
return
self.driver.get(f"""{self.config.url}/?StartProg={function_to_call}&a={self.config.group}&a={
self.config.branch}&a={self.config.user}&a={self.config.password}&Env={self.config.environment}""")
while ( time.time() < endtime and not self.wait_element_timeout(term="[name='cGetUser'] > input", timeout = 1,
scrap_type=enum.ScrapType.CSS_SELECTOR, main_container='body')):
tmessagebox = self.web_scrap(".tmessagebox", scrap_type=enum.ScrapType.CSS_SELECTOR,
optional_term=None, label=False, main_container="body")
if( tmessagebox ):
method = "SetParameters" if restore_backup is False else "RestoreParameters"
self.restart_counter = 3
self.log_error(f" {method} error: {tmessagebox[0].text}")
self.driver.get(self.config.url)
self.Setup(self.config.initial_program, self.config.date, self.config.group,
self.config.branch, save_input=not self.config.autostart)
if not self.tmenu_screen:
if ">" in self.config.routine:
self.SetLateralMenu(self.config.routine, save_input=False)
else:
self.Program(self.config.routine)
self.tmenu_screen = None
def parameter_screen(self, restore_backup):
"""
[Internal]
Internal method of SetParameters and RestoreParameters.
:param restore_backup: Boolean if method should restore the parameters.
:type restore_backup: bool
Usage:
>>> # Calling the method:
>>> self.parameter_screen(restore_backup=False)
"""
label_param = None
exception = None
stack = None
self.tmenu_screen = self.check_tmenu_screen()
try:
self.driver_refresh()
except Exception as error:
exception = error
if not exception:
if self.config.browser.lower() == "chrome":
try:
self.wait_until_to( expected_condition = "alert_is_present" )
self.driver.switch_to_alert().accept()
except:
pass
self.Setup("SIGACFG", self.config.date, self.config.group, self.config.branch, save_input=False)
self.SetLateralMenu(self.config.parameter_menu if self.config.parameter_menu else self.language.parameter_menu, save_input=False)
self.wait_element(term=".ttoolbar", scrap_type=enum.ScrapType.CSS_SELECTOR)
self.wait_element_timeout(term="img[src*=bmpserv1]", scrap_type=enum.ScrapType.CSS_SELECTOR, timeout=5.0, step=0.5)
if self.element_exists(term="img[src*=bmpserv1]", scrap_type=enum.ScrapType.CSS_SELECTOR):
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not label_param):
container = self.get_current_container()
img_serv1 = next(iter(container.select("img[src*='bmpserv1']")), None )
label_serv1 = next(iter(img_serv1.parent.select('label')), None)
if label_serv1:
self.ClickTree(label_serv1.text.strip())
self.wait_element_timeout(term="img[src*=bmpparam]", scrap_type=enum.ScrapType.CSS_SELECTOR, timeout=5.0, step=0.5)
container = self.get_current_container()
img_param = next(iter(container.select("img[src*='bmpparam']")), None )
if img_param and img_param.parent.__bool__():
label_param = next(iter(img_param.parent.select('label')), None)
self.ClickTree(label_param.text.strip())
if not label_param:
self.log_error(f"Couldn't find Icon")
self.ClickIcon(self.language.search)
self.fill_parameters(restore_backup=restore_backup)
self.parameters = []
self.ClickIcon(self.language.exit)
time.sleep(1)
if self.config.coverage:
self.driver_refresh()
else:
self.Finish()
self.Setup(self.config.initial_program, self.config.date, self.config.group, self.config.branch, save_input=not self.config.autostart)
if not self.tmenu_screen:
if ">" in self.config.routine:
self.SetLateralMenu(self.config.routine, save_input=False)
else:
self.Program(self.config.routine)
else:
stack = next(iter(list(map(lambda x: x.function, filter(lambda x: re.search('tearDownClass', x.function), inspect.stack())))), None)
if(stack and not stack.lower() == "teardownclass"):
self.restart_counter += 1
self.log_error(f"Wasn't possible execute parameter_screen() method Exception: {exception}")
def check_tmenu_screen(self):
"""
[Internal]
"""
try:
return self.element_is_displayed(
next(iter(self.web_scrap(term=".tmenu", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body")),
None))
except:
return False
def parameter_url_value(self, language, values = {'pt-br': '', 'en-us': '','es-es': '' }):
"""
[Internal]
Internal method of AddParameters to filter the values.
:param language: The language of config file.
:type language: str
:param values: The values organized by language.
:type values: dict[str, str]
Usage:
>>> # Calling the method:
>>> self.parameter_url_value(language = self.config.language.lower(), values = {'pt-br': portuguese_value })
"""
value = values[language]
if not value:
for vl in values.values():
if vl:
value = vl
value = value.replace("=","/\\")
value = value.replace("|","\\/")
value = value.replace("+","[2B]")
return value
def fill_parameters(self, restore_backup):
"""
[Internal]
Internal method of fill_parameters.
Searches and edits all parameters in the queue.
:param restore_backup: Boolean if method should restore the parameters.
:type restore_backup: bool
Usage:
>>> # Calling the method:
>>> self.fill_parameters(restore_backup=False)
"""
parameter_list = self.backup_parameters if restore_backup else self.parameters
for parameter in parameter_list:
self.SetValue(self.language.search_by, parameter[0])
self.used_ids = []
self.SetButton(self.language.search2)
self.ClickIcon(self.language.edit)
if not restore_backup:
current_branch = self.GetValue("X6_FIL")
current_pt_value = self.GetValue("X6_CONTEUD")
current_en_value = self.GetValue("X6_CONTENG")
current_spa_value = self.GetValue("X6_CONTSPA")
self.backup_parameters.append([parameter[0], current_branch.strip(), current_pt_value.strip(), current_en_value.strip(), current_spa_value.strip()])
self.SetValue("X6_FIL", parameter[1]) if parameter[1] else None
self.SetValue("X6_CONTEUD", parameter[2]) if parameter[2] else None
self.SetValue("X6_CONTENG", parameter[3]) if parameter[3] else None
self.SetValue("X6_CONTSPA", parameter[4]) if parameter[4] else None
self.SetButton(self.language.save)
def filter_by_tooltip_value(self, element_list, expected_text):
"""
[Internal]
Filters elements by finding the tooltip value that is shown when mouseover event
is triggered.
:param element_list: The list to be filtered
:type element_list: Beautiful Soup object list
:param expected_text: The expected tooltip text.
:type expected_text: str
:return: The filtered list of elements.
:rtype: Beautiful Soup object list
Usage:
>>> # Calling the method:
>>> filtered_elements = self.filter_by_tooltip_value(my_element_list, "Edit")
"""
return list(filter(lambda x: self.check_element_tooltip(x, expected_text), element_list))
def check_element_tooltip(self, element, expected_text, contains=False):
"""
[Internal]
Internal method of ClickIcon.
Fires the MouseOver event of an element, checks tooltip text, fires the MouseOut event and
returns a boolean whether the tooltip has the expected text value or not.
:param element: The target element object.
:type element: BeautifulSoup object
:param expected_text: The text that is expected to exist in button's tooltip.
:type expected_text: str
:return: Boolean value whether element has tooltip text or not.
:rtype: bool
Usage:
>>> # Call the method:
>>> has_add_text = self.check_element_tooltip(button_object, "Add")
"""
has_text = False
element_function = lambda: self.driver.find_element_by_xpath(xpath_soup(element))
self.driver.execute_script(f"$(arguments[0]).mouseover()", element_function())
time.sleep(1)
tooltips = self.driver.find_elements(By.CSS_SELECTOR, ".ttooltip")
if not tooltips:
tooltips = self.get_current_DOM().select('.ttooltip')
if tooltips:
has_text = (len(list(filter(lambda x: expected_text.lower() in x.text.lower(), tooltips))) > 0 if contains else (tooltips[0].text.lower() == expected_text.lower()))
self.driver.execute_script(f"$(arguments[0]).mouseout()", element_function())
return has_text
def WaitFieldValue(self, field, expected_value):
"""
Wait until field has expected value.
Recommended for Trigger fields.
:param field: The desired field.
:type field: str
:param expected_value: The expected value.
:type expected_value: str
Usage:
>>> # Calling method:
>>> self.WaitFieldValue("CN0_DESCRI", "MY DESCRIPTION")
"""
logger().info(f"Waiting for field {field} value to be: {expected_value}")
field = re.sub(r"(\:*)(\?*)", "", field).strip()
self.wait_element(field)
field_soup = self.get_field(field)
if not field_soup:
self.log_error(f"Couldn't find field {field}")
field_element = lambda: self.driver.find_element_by_xpath(xpath_soup(field_soup))
success = False
endtime = time.time() + 60
while(time.time() < endtime and not success):
if ((field_element().text.strip() == expected_value) or
(field_element().get_attribute("value").strip() == expected_value)):
success = True
time.sleep(0.5)
def assert_result(self, expected, script_message):
"""
[Internal]
Asserts the result based on the expected value.
:param expected: Expected value
:type expected: bool
Usage :
>>> #Calling the method:
>>> self.assert_result(True)
"""
assert_false = self.search_stack('AssertFalse')
self.expected = expected
log_message = f"{self.log.ident_test()[1]} - "
self.log.seconds = self.log.set_seconds(self.log.initial_time)
if self.grid_input or self.grid_check:
self.log_error("Grid fields were queued for input/check but weren't added/checked. Verify the necessity of a LoadGrid() call.")
if self.errors:
if self.expected:
for field_msg in self.errors:
log_message += (" " + field_msg)
else:
log_message = ""
self.expected = not self.expected
if self.expected:
self.message = "" if not self.errors else log_message
self.log.new_line(True, self.message)
elif script_message:
self.message = f"{log_message}{script_message}"
self.log.new_line(False, self.message)
else:
self.message = self.language.assert_false_message if assert_false and not self.errors else log_message
self.log.new_line(False, self.message)
self.log.save_file()
self.errors = []
logger().info(self.message) if self.message else None
if self.expected:
self.assertTrue(True, "Passed")
else:
self.assertTrue(False, self.message)
self.message = ""
def ClickCheckBox(self, label_box_name, position=1):
"""
Clicks on a Label in box on the screen.
:param label_box_name: The label box name
:type label_box_name: str
:param position: position label box on interface
:type position: int
Usage:
>>> # Call the method:
>>> oHelper.ClickCheckBox("Search",1)
"""
if position > 0:
self.wait_element(label_box_name)
container = self.get_current_container()
if not container:
self.log_error("Couldn't locate container.")
labels_boxs = container.select("span")
filtered_labels_boxs = list(filter(lambda x: label_box_name.lower() in x.text.lower(), labels_boxs))
if position <= len(filtered_labels_boxs):
position -= 1
label_box = filtered_labels_boxs[position].parent
if 'tcheckbox' in label_box.get_attribute_list('class'):
label_box_element = lambda: self.soup_to_selenium(label_box)
self.click(label_box_element())
else:
self.log_error("Index the Ckeckbox invalid.")
else:
self.log_error("Index the Ckeckbox invalid.")
else:
self.log_error("Index the Ckeckbox invalid.")
def ClickLabel(self, label_name):
"""
Clicks on a Label on the screen.
:param label_name: The label name
:type label_name: str
Usage:
>>> # Call the method:
>>> oHelper.ClickLabel("Search")
"""
label = ''
self.wait_element(label_name)
logger().info(f"Clicking on {label_name}")
endtime = time.time() + self.config.time_out
while(not label and time.time() < endtime):
container = self.get_current_container()
if not container:
self.log_error("Couldn't locate container.")
labels = container.select("label")
filtered_labels = list(filter(lambda x: label_name.lower() in x.text.lower(), labels))
filtered_labels = list(filter(lambda x: EC.element_to_be_clickable((By.XPATH, xpath_soup(x))), filtered_labels))
label = next(iter(filtered_labels), None)
if not label:
self.log_error("Couldn't find any labels.")
label_element = lambda: self.soup_to_selenium(label)
time.sleep(2)
self.scroll_to_element(label_element())
self.wait_until_to(expected_condition="element_to_be_clickable", element = label, locator = By.XPATH )
self.set_element_focus(label_element())
self.wait_until_to(expected_condition="element_to_be_clickable", element = label, locator = By.XPATH )
self.click(label_element())
def get_current_container(self):
"""
[Internal]
An internal method designed to get the current container.
Returns the BeautifulSoup object that represents this container or NONE if nothing is found.
:return: The container object
:rtype: BeautifulSoup object
Usage:
>>> # Calling the method:
>>> container = self.get_current_container()
"""
soup = self.get_current_DOM()
containers = self.zindex_sort(soup.select(self.containers_selectors["GetCurrentContainer"]), True)
return next(iter(containers), None)
def get_all_containers(self):
"""
[Internal]
An internal method designed to get all containers.
Returns the List of BeautifulSoup object that represents this containers or NONE if nothing is found.
:return: List containers object
:rtype: List BeautifulSoup object
Usage:
>>> # Calling the method:
>>> container = self.get_all_containers()
"""
soup = self.get_current_DOM()
containers = soup.select(self.containers_selectors["AllContainers"])
return containers
def ClickTree(self, treepath, right_click=False, position=1):
"""
Clicks on TreeView component.
:param treepath: String that contains the access path for the item separate by ">" .
:type string: str
:param right_click: Clicks with the right button of the mouse in the last element of the tree.
:type string: bool
Usage:
>>> # Calling the method:
>>> oHelper.ClickTree("element 1 > element 2 > element 3")
>>> # Right Click example:
>>> oHelper.ClickTree("element 1 > element 2 > element 3", right_click=True)
"""
self.click_tree(treepath, right_click, position)
def click_tree(self, treepath, right_click, position):
"""
[Internal]
Take treenode and label to filter and click in the toggler element to expand the TreeView.
"""
logger().info(f"Clicking on Tree: {treepath}")
hierarchy=None
position -= 1
labels = list(map(str.strip, treepath.split(">")))
for row, label in enumerate(labels):
self.wait_blocker()
last_item = True if row == len(labels)-1 else False
success = False
try_counter = 0
label_filtered = label.lower().strip()
try:
if self.tree_base_element and label_filtered == self.tree_base_element[0]:
self.scroll_to_element(self.tree_base_element[1])
except:
pass
endtime = time.time() + self.config.time_out
while((time.time() < endtime) and (try_counter < 3 and not success)):
tree_node = self.find_tree_bs(label_filtered)
tree_node_filtered = list(filter(lambda x: "hidden" not in x.parent.parent.parent.parent.attrs['class'], tree_node))
elements = list(filter(lambda x: label_filtered in x.text.lower().strip() and self.element_is_displayed(x), tree_node_filtered))
if elements:
if position:
elements = elements[position] if len(elements) >= position + 1 else next(iter(elements))
if hierarchy:
elements = elements if elements.attrs['hierarchy'].startswith(hierarchy) and elements.attrs['hierarchy'] != hierarchy else None
else:
elements = list(filter(lambda x: self.element_is_displayed(x), elements))
if hierarchy:
elements = list(filter(lambda x: x.attrs['hierarchy'].startswith(hierarchy) and x.attrs['hierarchy'] != hierarchy, elements))
for element in elements:
if not success:
element_class = next(iter(element.select(".toggler, .lastchild, .data")), None)
if "data" in element_class.get_attribute_list("class"):
element_class = element_class.select("img, span")
for element_class_item in element_class:
if not success:
element_click = lambda: self.soup_to_selenium(element_class_item)
try:
if last_item:
start_time = time.time()
self.wait_blocker()
self.scroll_to_element(element_click())
element_click().click()
if self.check_toggler(label_filtered, element):
success = self.check_hierarchy(label_filtered)
if success and right_click:
self.send_action(action=self.click, element=element_click, right_click=right_click)
else:
if right_click:
self.send_action(action=self.click, element=element_click, right_click=right_click)
success = self.clicktree_status_selected(label_filtered)
else:
self.tree_base_element = label_filtered, self.soup_to_selenium(element_class_item)
self.scroll_to_element(element_click())
element_click().click()
success = self.check_hierarchy(label_filtered)
try_counter += 1
except:
pass
if not success:
try:
element_click = lambda: self.soup_to_selenium(element_class_item.parent)
self.scroll_to_element(element_click())
element_click().click()
success = self.clicktree_status_selected(label_filtered) if last_item and not self.check_toggler(label_filtered) else self.check_hierarchy(label_filtered)
except:
pass
if not last_item:
treenode_selected = self.treenode_selected(label_filtered)
hierarchy = treenode_selected.attrs['hierarchy']
if not success:
self.log_error(f"Couldn't click on tree element {label}.")
def find_tree_bs(self, label):
"""
[Internal]
Search the label string in current container and return a treenode element.
"""
tree_node = ""
self.wait_element(term=label, scrap_type=enum.ScrapType.MIXED, optional_term=".ttreenode, .data")
endtime = time.time() + self.config.time_out
while (time.time() < endtime and not tree_node):
container = self.get_current_container()
tree_node = container.select(".ttreenode")
if not tree_node:
self.log_error("Couldn't find tree element.")
return(tree_node)
def clicktree_status_selected(self, label_filtered, check_expanded=False):
"""
[Internal]
"""
treenode_selected = None
success = True
container_function = lambda: self.get_current_container() if success else self.get_current_DOM()
endtime = time.time() + self.config.time_out
while ((time.time() < endtime) and not treenode_selected):
container = container_function()
tr = container.select("tr")
tr_class = list(filter(lambda x: "class" in x.attrs, tr))
ttreenode = list(filter(lambda x: "ttreenode" in x.attrs['class'], tr_class))
treenode_selected = list(filter(lambda x: "selected" in x.attrs['class'], ttreenode))
if not treenode_selected:
success = not success
if not check_expanded:
if list(filter(lambda x: label_filtered == x.text.lower().strip(), treenode_selected)):
return True
else:
return False
else:
tree_selected = next(iter(list(filter(lambda x: label_filtered == x.text.lower().strip(), treenode_selected))), None)
if tree_selected.find_all_next("span"):
if "toggler" in next(iter(tree_selected.find_all_next("span"))).attrs['class']:
return "expanded" in next(iter(tree_selected.find_all_next("span")), None).attrs['class']
else:
return False
def check_toggler(self, label_filtered, element):
"""
[Internal]
"""
element_id = element.get_attribute_list('id')
tree_selected = self.treenode_selected(label_filtered)
if tree_selected:
if tree_selected.find_all_next("span"):
first_span = next(iter(tree_selected.find_all_next("span"))).find_parent('tr')
if first_span:
if next(iter(element_id)) == next(iter(first_span.get_attribute_list('id'))):
try:
return "toggler" in next(iter(tree_selected.find_all_next("span")), None).attrs['class']
except:
return False
else:
return False
else:
return False
else:
return False
else:
return False
def treenode_selected(self, label_filtered):
"""
[Internal]
Returns a tree node selected by label
"""
ttreenode = self.treenode()
treenode_selected = list(filter(lambda x: "selected" in x.attrs['class'], ttreenode))
return next(iter(list(filter(lambda x: label_filtered == x.text.lower().strip(), treenode_selected))), None)
def treenode(self):
"""
:return: treenode bs4 object
"""
container = self.get_current_container()
tr = container.select("tr")
tr_class = list(filter(lambda x: "class" in x.attrs, tr))
return list(filter(lambda x: "ttreenode" in x.attrs['class'], tr_class))
def check_hierarchy(self, label):
"""
:param label:
:return: True or False
"""
counter = 1
node_check = None
while (counter <= 3 and not node_check):
treenode_parent_id = self.treenode_selected(label).attrs['id']
treenode = list(filter(lambda x: self.element_is_displayed(x), self.treenode()))
node_check = next(iter(list(filter(lambda x: treenode_parent_id == x.attrs['parentid'], treenode))), None)
counter += 1
return True if node_check else self.clicktree_status_selected(label, check_expanded=True)
def GridTree(self, column , tree_path, right_click = False):
"""
Clicks on Grid TreeView component.
:param treepath: String that contains the access path for the item separate by ">" .
:type string: str
:param right_click: Clicks with the right button of the mouse in the last element of the tree.
:type string: bool
Usage:
>>> # Calling the method:
>>> oHelper.GridTree("element 1 > element 2 > element 3")
>>> # Right GridTree example:
>>> oHelper.GridTree("element 1 > element 2 > element 3", right_click=True)
"""
endtime = time.time() + self.config.time_out
tree_list = list(map(str.strip, tree_path.split(">")))
last_item = tree_list.pop()
grid = self.get_grid(grid_element = '.tcbrowse')
column_index = self.search_column_index(grid, column)
while(time.time() < endtime and tree_list ):
len_grid_lines = self.expand_treeGrid(column, tree_list[0])
grid = self.get_grid(grid_element = '.tcbrowse')
column_index = self.search_column_index(grid, column)
if self.lenght_grid_lines(grid) > len_grid_lines:
tree_list.remove(tree_list[0])
else:
len_grid_lines = self.expand_treeGrid(column, tree_list[0])
tree_list.remove(tree_list[0])
grid = self.get_grid(grid_element = '.tcbrowse')
column_index = self.search_column_index(grid, column)
div = self.search_grid_by_text(grid, last_item, column_index)
self.wait_until_to(expected_condition="element_to_be_clickable", element = div, locator = By.XPATH )
div_s = self.soup_to_selenium(div)
time.sleep(2)#TODO alterar antes de subir na master
self.click((div_s), enum.ClickType.SELENIUM , right_click)
def expand_treeGrid(self, column, item):
"""
[Internal]
Search for a column and expand the tree
Returns len of grid lines
"""
grid = self.get_grid(grid_element = '.tcbrowse')
column_index = self.search_column_index(grid, column)
len_grid_lines = self.lenght_grid_lines(grid)
div = self.search_grid_by_text(grid, item, column_index)
line = div.parent.parent
td = next(iter(line.select('td')), None)
self.expand_tree_grid_line(td)
self.wait_gridTree(len_grid_lines)
return len_grid_lines
def expand_tree_grid_line(self, element_soup):
"""
[Internal]
Click on a column and send the ENTER key
"""
self.wait_until_to(expected_condition="element_to_be_clickable", element = element_soup, locator = By.XPATH )
element_selenium = lambda: self.soup_to_selenium(element_soup)
element_selenium().click()
self.wait_blocker()
self.wait_until_to(expected_condition="element_to_be_clickable", element = element_soup, locator = By.XPATH )
self.send_keys(element_selenium(), Keys.ENTER)
def wait_gridTree(self, n_lines):
"""
[Internal]
Wait until the GridTree line count increases or decreases.
"""
endtime = time.time() + self.config.time_out
grid = self.get_grid(grid_element = '.tcbrowse')
while (time.time() < endtime and n_lines == self.lenght_grid_lines(grid) ):
grid = self.get_grid(grid_element = '.tcbrowse')
def search_grid_by_text(self, grid, text, column_index):
"""
[Internal]
Searches for text in grid columns
Returns the div containing the text
"""
columns_list = grid.select('td')
columns_list_filtered = list(filter(lambda x: int(x.attrs['id']) == column_index ,columns_list))
div_list = list(map(lambda x: next(iter(x.select('div')), None) ,columns_list_filtered))
div = next(iter(list(filter(lambda x: (text.strip() == x.text.strip() and x.parent.parent.attrs['id'] != '0') ,div_list))), None)
return div
def lenght_grid_lines(self, grid):
"""
[Internal]
Returns the leght of grid.
"""
grid_lines = grid.select("tbody tr")
return len(grid_lines)
def TearDown(self):
"""
Closes the webdriver and ends the test case.
Usage:
>>> #Calling the method
>>> self.TearDown()
"""
if self.config.new_log:
self.execution_flow()
webdriver_exception = None
timeout = 1500
string = "Aguarde... Coletando informacoes de cobertura de codigo."
if self.config.coverage:
try:
self.driver_refresh()
except WebDriverException as e:
logger().exception(str(e))
webdriver_exception = e
if webdriver_exception:
message = f"Wasn't possible execute self.driver.refresh() Exception: {next(iter(webdriver_exception.msg.split(':')), None)}"
logger().debug(message)
if not webdriver_exception and not self.tss:
self.wait_element(term="[name='cGetUser']", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container='body')
self.user_screen()
self.environment_screen()
endtime = time.time() + self.config.time_out
while (time.time() < endtime and (
not self.element_exists(term=".tmenu", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body"))):
self.close_warning_screen()
self.Finish()
elif not webdriver_exception:
self.SetupTSS(self.config.initial_program, self.config.environment )
self.SetButton(self.language.exit)
self.SetButton(self.language.yes)
if (self.search_text(selector=".tsay", text=string) and not webdriver_exception):
self.WaitProcessing(string, timeout)
if self.config.num_exec:
if not self.num_exec.post_exec(self.config.url_set_end_exec, 'ErrorSetFimExec'):
self.restart_counter = 3
self.log_error(f"WARNING: Couldn't possible send num_exec to server please check log.")
try:
self.driver.close()
except Exception as e:
logger().exception(f"Warning tearDown Close {str(e)}")
def containers_filter(self, containers):
"""
[Internal]
Filter and remove tsvg class an return a container_filtered
Usage:
>>> #Calling the method
>>> containers = self.containers_filter(containers)
"""
class_remove = "tsvg"
container_filtered = []
for container in containers:
iscorrect = True
container_class = list(filter(lambda x: "class" in x.attrs, container.select("div")))
if list(filter(lambda x: class_remove in x.attrs['class'], container_class)):
iscorrect = False
if iscorrect:
container_filtered.append(container)
return container_filtered
def filter_label_element(self, label_text, container):
"""
[Internal]
Filter and remove a specified character with regex, return only displayed elements if > 1.
Usage:
>>> #Calling the method
>>> elements = self.filter_label_element(label_text, container)
"""
elements = list(map(lambda x: self.find_first_div_parent(x), container.find_all(text=re.compile(f"^{re.escape(label_text)}" + r"([\s\?:\*\.]+)?"))))
return list(filter(lambda x: self.element_is_displayed(x), elements)) if len(elements) > 1 else elements
def filter_is_displayed(self, elements):
"""
[Internal]
Returns only displayed elements.
Usage:
>>> #Calling the method
>>> elements = self.filter_is_displayed(elements)
"""
return list(filter(lambda x: self.element_is_displayed(x), elements))
def element_is_displayed(self, element):
"""
[Internal]
"""
element_selenium = self.soup_to_selenium(element)
if element_selenium:
return element_selenium.is_displayed()
else:
return False
def search_text(self, selector, text):
"""
[Internal]
Return a element based on text and selector.
Usage:
>>> #Calling the method
>>> element = self.search_text(selector, text)
"""
container = self.get_current_container()
if container:
container_selector = container.select(selector)
if container_selector:
self.wait_element_timeout(term=text, scrap_type=enum.ScrapType.MIXED,
optional_term=".tsay", timeout=10, step=1, main_container="body", check_error = False)
return next(iter(list(filter(lambda x: text in re.sub(r"\t|\n|\r", " ", x.text), container_selector))), None)
def pop_dict_itens(self, dict_, element_id):
"""
[Internal]
"""
new_dictionary = {k: v for k, v in dict_.items() if v == element_id}
for key in list(new_dictionary.keys()):
dict_.pop(key)
return dict_
def get_program_name(self):
"""
[Internal]
"""
stack_item_splited = next(iter(map(lambda x: x.filename.split("\\"), filter(lambda x: "TESTSUITE.PY" in x.filename.upper() or "TESTCASE.PY" in x.filename.upper(), inspect.stack()))), None)
if stack_item_splited:
get_file_name = next(iter(list(map(lambda x: "TESTSUITE.PY" if "TESTSUITE.PY" in x.upper() else "TESTCASE.PY", stack_item_splited))))
program_name = next(iter(list(map(lambda x: re.findall(fr"(\w+)(?:{get_file_name})", x.upper()), filter(lambda x: ".PY" in x.upper(), stack_item_splited)))), None)
if program_name:
return next(iter(program_name))
else:
return None
else:
return None
def GetText(self, string_left="", string_right=""):
"""
This method returns a string from modal based on the string in the left or right position that you send on parameter.
If the string_left was filled then the right side content is return.
If the string_right was filled then the left side content is return.
If no parameter was filled so the full content is return.
:param string_left: String of the left side of content.
:type string_left: str
:param string_right: String of the right side of content.
:type string_right: str
Usage:
>>> # Calling the method:
>>> oHelper.GetText(string_left="Left Text", string_right="Right Text")
>>> oHelper.GetText(string_left="Left Text")
>>> oHelper.GetText()
"""
return self.get_text(string_left, string_right)
def get_text(self, string_left, string_right):
"""
:param string:
:return:
"""
if string_left:
string = string_left
else:
string = string_right
if string:
self.wait_element(string)
container = self.get_current_container()
labels = container.select('label')
label = next(iter(list(filter(lambda x: string.lower() in x.text.lower(), labels))))
return self.get_text_position(label.text, string_left, string_right)
def get_text_position(self, text="", string_left="", string_right=""):
"""
:param string_left:
:param srting_right:
:return:
"""
if string_left and string_right:
return text[len(string_left):text.index(string_right)].strip()
elif string_left:
return text[len(string_left):].strip()
elif string_right:
return text[:-len(string_right)].strip()
else:
return text.strip()
def wait_smart_erp_environment(self):
"""
[Internal]
"""
content = False
endtime = time.time() + self.config.time_out
logger().debug("Waiting for SmartERP environment assembly")
while not content and (time.time() < endtime):
try:
soup = self.get_current_DOM()
content = True if next(iter(soup.select("img[src*='resources/images/parametersform.png']")), None) else False
except AttributeError:
pass
def wait_until_to(self, expected_condition = "element_to_be_clickable", element = None, locator = None , timeout=False):
"""
[Internal]
This method is responsible for encapsulating "wait.until".
"""
expected_conditions_dictionary = {
"element_to_be_clickable" : EC.element_to_be_clickable,
"presence_of_all_elements_located" : EC.presence_of_all_elements_located,
"visibility_of" : EC.visibility_of,
"alert_is_present" : EC.alert_is_present,
"visibility_of_element_located" : EC.visibility_of_element_located
}
if not element and expected_condition != "alert_is_present" : self.log_error("Error method wait_until_to() - element is None")
element = xpath_soup(element) if locator == By.XPATH else element
if timeout:
setattr(self.wait, '_timeout', self.config.time_out / 10)
try:
if locator:
self.wait.until(expected_conditions_dictionary[expected_condition]((locator, element)))
elif element:
self.wait.until(expected_conditions_dictionary[expected_condition]( element() ))
elif expected_condition == "alert_is_present":
self.wait.until(expected_conditions_dictionary[expected_condition]())
except TimeoutException as e:
logger().exception(f"Warning waint_until_to TimeoutException - Expected Condition: {expected_condition}")
pass
if timeout:
setattr(self.wait, '_timeout', self.config.time_out)
def CheckHelp(self, text, button, text_help, text_problem, text_solution, verbosity):
"""
Checks if some help screen is present in the screen at the time and takes an action.
:param text: Text to be checked.
:type text: str
:param text_help: Only the help text will be checked.
:type text_help: str
:param text_problem: Only the problem text will be checked.
:type text_problem: str
:param text_solution: Only the solution text will be checked.
:type text_solution: str
:param button: Button to be clicked.
:type button: str
:param verbosity: Check the text with high accuracy.
:type verbosity: bool
Usage:
>>> # Calling method to check all window text.
>>> oHelper.CheckHelp("TK250CADRE Problema: Essa reclamação já foi informada anteriormente. Solução: Informe uma reclamação que ainda não tenha sido cadastrada nessa tabela.", "Fechar")
>>> # Calling method to check help text only.
>>> oHelper.CheckHelp(text_help="TK250CADRE", button="Fechar")
>>> # Calling method to check problem text only.
>>> oHelper.CheckHelp(text_problem="Problema: Essa reclamação já foi informada anteriormente.", button="Fechar")
>>> # Calling method to check problem text only.
>>> oHelper.CheckHelp(text_solution="Solução: Informe uma reclamação que ainda não tenha sido cadastrada nessa tabela.", button="Fechar")
>>> # Calling the method to check only the problem text with high precision.
>>> oHelper.CheckHelp(text_problem="Problema: Essa reclamação já foi informada anteriormente.", button="Fechar", verbosity=True)
"""
text_help_extracted = ""
text_problem_extracted = ""
text_solution_extracted = ""
text_extracted = ""
if not button:
button = self.get_single_button().text
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not text_extracted):
logger().info(f"Checking Help on screen: {text}")
# self.wait_element_timeout(term=text, scrap_type=enum.ScrapType.MIXED, timeout=2.5, step=0.5, optional_term=".tsay", check_error=False)
self.wait_element_timeout(term=text_help, scrap_type=enum.ScrapType.MIXED, timeout=2.5, step=0.5,
optional_term=".tsay", check_error=False)
container = self.get_current_container()
container_filtered = container.select(".tsay")
container_text = ''
for x in range(len(container_filtered)):
container_text += container_filtered[x].text + ' '
try:
text_help_extracted = container_text[container_text.index(self.language.checkhelp):container_text.index(self.language.checkproblem)]
text_problem_extracted = container_text[container_text.index(self.language.checkproblem):container_text.index(self.language.checksolution)]
text_solution_extracted = container_text[container_text.index(self.language.checksolution):]
except:
pass
if text_help:
text = text_help
text_extracted = text_help_extracted
elif text_problem:
text = text_problem
text_extracted = text_problem_extracted
elif text_solution:
text = text_solution
text_extracted = text_solution_extracted
else:
text_extracted = container_text
if text_extracted:
self.check_text_container(text, text_extracted, container_text, verbosity)
self.SetButton(button, check_error=False)
self.wait_element(term=text, scrap_type=enum.ScrapType.MIXED,
optional_term=".tsay", presence=False, main_container = self.containers_selectors["AllContainers"], check_error=False)
if not text_extracted:
self.log_error(f"Couldn't find: '{text}', text on display window is: '{container_text}'")
def check_text_container(self, text_user, text_extracted, container_text, verbosity):
if verbosity == False:
if text_user.replace(" ","") in text_extracted.replace(" ",""):
logger().info(f"Help on screen Checked: {text_user}")
return
else:
logger().info(f"Couldn't find: '{text_user}', text on display window is: '{container_text}'")
else:
if text_user in text_extracted:
logger().info(f"Help on screen Checked: {text_user}")
return
else:
logger().info(f"Couldn't find: '{text_user}', text on display window is: '{container_text}'")
def get_single_button(self):
"""
[Internal]
"""
container = self.get_current_container()
buttons = container.select("button")
button_filtered = next(iter(filter(lambda x: x.text != "", buttons)))
if not button_filtered:
self.log_error(f"Couldn't find button")
return button_filtered
def ClickMenuPopUpItem(self, label, right_click, position = 1):
"""
Clicks on MenuPopUp Item based in a text
:param text: Text in MenuPopUp to be clicked.
:type text: str
:param right_click: Button to be clicked.
:type button: bool
:param position: index item text
:type position: int
Usage:
>>> # Calling the method.
>>> oHelper.ClickMenuPopUpItem("Label")
>>> # Calling the method using position.
>>> oHelper.ClickMenuPopUpItem("Label", position = 2)
"""
position -= 1
self.wait_element(term=label, scrap_type=enum.ScrapType.MIXED, main_container="body", optional_term=".tmenupopup")
label = label.lower().strip()
endtime = time.time() + self.config.time_out
tmenupopupitem_filtered = ""
while(time.time() < endtime and not tmenupopupitem_filtered):
tmenupopupitem = self.tmenupopupitem()
if tmenupopupitem:
tmenupopupitem_displayed = list(filter(lambda x: self.element_is_displayed(x), tmenupopupitem))
tmenupopupitem_filtered = list(filter(lambda x: x.text.lower().strip() == label, tmenupopupitem_displayed))
if tmenupopupitem_filtered and len(tmenupopupitem_filtered) -1 >= position:
tmenupopupitem_filtered = tmenupopupitem_filtered[position]
if not tmenupopupitem_filtered:
self.log_error(f"Couldn't find tmenupopupitem: {label}")
tmenupopupitem_element = lambda: self.soup_to_selenium(tmenupopupitem_filtered)
if right_click:
self.click(tmenupopupitem_element(), right_click=right_click)
else:
self.click(tmenupopupitem_element())
def tmenupopupitem(self):
"""
:return:
"""
soup = self.get_current_DOM()
body = next(iter(soup.select("body")))
return body.select(".tmenupopupitem")
def get_release(self):
"""
Gets the current release of the Protheus.
:return: The current release of the Protheus.
:type: str
Usage:
>>> # Calling the method:
>>> oHelper.get_release()
>>> # Conditional with method:
>>> # Situation: Have a input that only appears in release greater than or equal to 12.1.023
>>> if self.oHelper.get_release() >= '12.1.023':
>>> self.oHelper.SetValue('AK1_CODIGO', 'codigoCT001)
"""
return self.log.release
def try_click(self, element):
"""
[Internal]
"""
try:
self.soup_to_selenium(element).click()
except:
pass
def on_screen_enabled(self, elements):
"""
[Internal]
Returns a list if selenium displayed and enabled methods is True.
"""
if elements:
is_displayed = list(filter(lambda x: self.element_is_displayed(x), elements))
return list(filter(lambda x: self.soup_to_selenium(x).is_enabled(), is_displayed))
def update_password(self):
"""
[Internal]
Update the password in the Protheus password change request screen
"""
container = self.get_current_container()
if container and self.element_exists(term=self.language.change_password, scrap_type=enum.ScrapType.MIXED, main_container=".tmodaldialog", optional_term=".tsay"):
user_login = self.GetValue(self.language.user_login)
if user_login == self.config.user or self.config.user.lower() == "admin":
self.SetValue(self.language.current_password, self.config.password)
self.SetValue(self.language.nem_password, self.config.password)
self.SetValue(self.language.confirm_new_password, self.config.password)
self.SetButton(self.language.finish)
self.wait_element(self.language.database, main_container=".twindow")
def ClickListBox(self, text):
"""
Clicks on Item based in a text in a window tlistbox
:param text: Text in windows to be clicked.
:type text: str
Usage:
>>> # Calling the method.
>>> oHelper.ClickListBox("text")
"""
self.wait_element(term='.tlistbox', scrap_type=enum.ScrapType.CSS_SELECTOR, main_container=".tmodaldialog")
container = self.get_current_container()
tlist = container.select(".tlistbox")
list_option = next(iter(list(filter(lambda x: x.select('option'), tlist))))
list_option_filtered = list(filter(lambda x: self.element_is_displayed(x), list_option))
element = next(iter(filter(lambda x: x.text.strip() == text.strip(), list_option_filtered)), None)
element_selenium = self.soup_to_selenium(element)
self.wait_until_to(expected_condition="element_to_be_clickable", element = element, locator = By.XPATH )
element_selenium.click()
def ClickImage(self, img_name, double_click=False):
"""
Clicks in an Image button. They must be used only in case that 'ClickIcon' doesn't support.
:param img_name: Image to be clicked.
:type img_name: src
Usage:
>>> # Call the method:
>>> oHelper.ClickImage("img_name")
>>> oHelper.ClickImage("img_name",double_click=True)
"""
self.wait_element(term="div.tbtnbmp > img, div.tbitmap > img", scrap_type=enum.ScrapType.CSS_SELECTOR, main_container = self.containers_selectors["ClickImage"])
success = None
endtime = time.time() + self.config.time_out
while(time.time() < endtime and not success):
img_list = self.web_scrap(term="div.tbtnbmp > img, div.tbitmap > img", scrap_type=enum.ScrapType.CSS_SELECTOR , main_container = self.containers_selectors["ClickImage"])
img_list_filtered = list(filter(lambda x: img_name == self.img_src_filtered(x),img_list))
img_soup = next(iter(img_list_filtered), None)
if img_soup:
element_selenium = lambda: self.soup_to_selenium(img_soup)
self.set_element_focus(element_selenium())
self.wait_until_to(expected_condition="element_to_be_clickable", element = img_soup, locator = By.XPATH, timeout=True)
if double_click:
success = self.double_click(element_selenium())
else:
success = self.click(element_selenium())
return success
def img_src_filtered(self, img_soup):
"""
[Internal]
Return an image source filtered.
"""
img_src_string = self.soup_to_selenium(img_soup).get_attribute("src")
return next(iter(re.findall('[\w\_\-]+\.', img_src_string)), None).replace('.','')
def try_element_to_be_clickable(self, element):
"""
Try excpected condition element_to_be_clickable by XPATH or ID
"""
try:
self.wait_until_to(expected_condition="element_to_be_clickable", element = element, locator = By.XPATH)
except:
if 'id' in element.find_parent('div').attrs:
self.wait_until_to(expected_condition="element_to_be_clickable", element = element.find_previous("div").attrs['id'], locator = By.ID )
else:
pass
def open_csv(self, csv_file, delimiter, column, header, filter_column, filter_value):
"""
Returns a dictionary when the file has a header in another way returns a list
The folder must be entered in the CSVPath parameter in the config.json. Ex:
.. note::
This method return data as a string if necessary use some method to convert data like int().
>>> config.json
>>> "CSVPath" : "C:\\temp"
:param csv_file: .csv file name
:type csv_file: str
:param delimiter: Delimiter option such like ';' or ',' or '|'
:type delimiter: str
:param column: To files with Header is possible return only a column by header name or Int value for no header files
:type column: str
:param header: Indicate with the file contains a Header or not default is Header None
:type header: bool
:param filter_column: Is possible to filter a specific value by column and value content, if value is int starts with number 1
:type filter_column: str or int
:param filter_value: Value used in pair with filter_column parameter
:type filter_value: str
>>> # Call the method:
>>> file_csv = test_helper.OpenCSV(delimiter=";", csv_file="no_header.csv")
>>> file_csv_no_header_column = self.oHelper.OpenCSV(column=0, delimiter=";", csv_file="no_header_column.csv")
>>> file_csv_column = self.oHelper.OpenCSV(column='CAMPO', delimiter=";", csv_file="header_column.csv", header=True)
>>> file_csv_pipe = self.oHelper.OpenCSV(delimiter="|", csv_file="pipe_no_header.csv")
>>> file_csv_header = self.oHelper.OpenCSV(delimiter=";", csv_file="header.csv", header=True)
>>> file_csv_header_column = self.oHelper.OpenCSV(delimiter=";", csv_file="header.csv", header=True)
>>> file_csv_header_pipe = self.oHelper.OpenCSV(delimiter="|", csv_file="pipe_header.csv", header=True)
>>> file_csv_header_filter = self.oHelper.OpenCSV(delimiter=";", csv_file="header.csv", header=True, filter_column='CAMPO', filter_value='A00_FILIAL')
>>> file_csv _no_header_filter = self.oHelper.OpenCSV(delimiter=";", csv_file="no_header.csv", filter_column=0, filter_value='A00_FILIAL')
"""
has_header = 'infer' if header else None
if self.config.csv_path:
data = pd.read_csv(f"{self.config.csv_path}\\{csv_file}", sep=delimiter, encoding='latin-1', error_bad_lines=False, header=has_header, index_col=False, dtype=str)
df = pd.DataFrame(data)
df = df.dropna(axis=1, how='all')
filter_column_user = filter_column
if filter_column and filter_value:
if isinstance(filter_column, int):
filter_column_user = filter_column - 1
df = self.filter_dataframe(df, filter_column_user, filter_value)
elif (filter_column and not filter_value) or (filter_value and not filter_column):
logger().warning('WARNING: filter_column and filter_value is necessary to filter rows by column content. Data wasn\'t filtered')
return self.return_data(df, has_header, column)
else:
self.log_error("CSV Path wasn't found, please check 'CSVPath' key in the config.json.")
def filter_dataframe(self, df, column, value):
"""
[Internal]
"""
return df[df[column] == value]
def return_data(self, df, has_header, column):
"""
[Internal]
"""
if has_header == 'infer':
return df[column].to_dict() if column else df.to_dict()
else:
return df[column].values.tolist() if isinstance(column, int) else df.values.tolist()
def open_url_coverage(self, url='', initial_program='', environment=''):
"""
[Internal]
Open a webapp url with line parameters
:param url: server url.
:type url: str
:param initial_program: program name.
:type initial_program: str
:param environment: environment server.
:type environment: str
Usage:
>>> # Call the method:
>>> self.open_url_coverage(url=self.config.url, initial_program=initial_program, environment=self.config.environment)
"""
self.driver.get(f"{url}/?StartProg=CASIGAADV&A={initial_program}&Env={environment}")
def returns_printable_string(self, string):
"""
Returns a string only is printable characters
[Internal]
:param string: string value
:type string: str
"""
return "".join(list(filter(lambda x: x.isprintable(), string)))
def get_config_value(self, json_key):
"""
:param json_key: Json Key in config.json
:type json_key: str
:return: Json Key item in config.json
"""
json_key = json_key.lower()
config_dict = dict((k.lower(), v) for k, v in self.config.json_data.items())
if list(filter(lambda x: json_key in x, config_dict.keys())):
return config_dict[json_key]
else:
self.log_error("Doesn't contain that key in json object")
def send_action(self, action = None, element = None, value = None, right_click=False):
"""
Sends an action to element and compare it object state change.
:param action: selenium function as a reference like click, actionchains or send_keys.
:param element: selenium element as a reference
:param value: send keys value
:param right_click: True if you want a right click
:return: True if there was a change in the object
"""
soup_before_event = self.get_current_DOM()
soup_after_event = soup_before_event
soup_select = None
endtime = time.time() + self.config.time_out
try:
while ((time.time() < endtime) and (soup_before_event == soup_after_event)):
if right_click:
soup_select = self.get_soup_select(".tmenupopupitem")
if not soup_select:
action(element(), right_click=right_click)
self.wait_blocker()
elif value:
action(element(), value)
elif element:
self.set_element_focus(element())
action(element())
elif action:
action()
if soup_select:
soup_after_event = soup_select
elif soup_select == []:
soup_after_event = soup_before_event
else:
soup_after_event = self.get_current_DOM()
time.sleep(1)
except Exception as e:
if self.config.smart_test or self.config.debug_log:
logger().exception(f"Warning Exception send_action {str(e)}")
return False
if self.config.smart_test or self.config.debug_log:
logger().debug(f"send_action method result = {soup_before_event != soup_after_event}")
return soup_before_event != soup_after_event
def get_soup_select(self, selector):
"""
Get a soup select object.
:param selector: Css selector
:return: Return a soup select object
"""
soup = self.get_current_DOM()
return soup.select(selector)
def check_mot_exec(self):
"""
Check MotExec key content
:return:
"""
m = re.match(pattern='((^TIR$)|(^TIR_))', string=self.config.issue)
if m:
self.driver.close()
self.assertTrue(False, f'Current "MotExec" are using a reserved word: "{m.group(0)}", please check "config.json" key and execute again.')
def report_comparison(self, base_file="", current_file=""):
"""
Compare two reports files and if exists show the difference between then if exists.
.. warning::
Important to use BaseLine_Spool key in config.json to work appropriately. Baseline_Spool is the path of report spool in yout environment
.. warning::
Some words are changed to this pattern below:
'Emissão: 01-01-2015'
'Emision: 01-01-2015'
'DT.Ref.: 01-01-2015'
'Fc.Ref.: 01-01-2015'
'Hora...: 00:00:00'
'Hora Término: 00:00:00'
'/' to '@'
Only .xml
'encoding=""'
'"DateTime">2015-01-01T00:00:00'
'ss:Width="100"'
:param base_file: Base file that reflects the expected. If doesn't exist make a copy of auto and then rename to base
:param current_file: Current file recently impressed, this file is use to generate file_auto automatically.
>>> # File example:
>>> # acda080rbase.##r
>>> # acda080rauto.##r
>>> # Calling the method:
>>> self.oHelper.ReportComparison(base_file="acda080rbase.##r", current_file="acda080rauto.##r")
:return:
"""
message = ""
if not self.config.baseline_spool:
self.log_error("No path in BaseLine_Spool in config.json! Please make sure to put a valid path in this key")
if not current_file:
self.log_error("Report current file not found! Please inform a valid file")
else:
auto_file = self.create_auto_file(current_file)
logger().warning(
f'We created a "auto" based in current file in "{self.config.baseline_spool}\\{current_file}". please, if you dont have a base file, make a copy of auto and rename to base then run again.')
self.check_file(base_file, current_file)
with open(f'{self.config.baseline_spool}\\{base_file}') as base_file:
with open(auto_file) as auto_file:
for line_base_file, line_auto_file in zip(base_file, auto_file):
if line_base_file != line_auto_file:
logger().warning("Make sure you are comparing two treated files")
message = f'Base line content: "{line_base_file}" is different of Auto line content: "{line_auto_file}"'
self.errors.append(message)
break
def create_auto_file(self, file=""):
"""
:param file:
:return:
"""
file_extension = file[-4:].lower()
full_path = f'{self.config.baseline_spool}\\{file}'
auto_file_path = f'{self.config.baseline_spool}\\{next(iter(file.split(".")))}auto{file_extension}'
if pathlib.Path(f'{auto_file_path}').exists():
pathlib.Path(f'{auto_file_path}').unlink()
with open(full_path) as file_obj:
readlines = file_obj.readlines()
for line in readlines:
content = self.sub_string(line, file_extension)
with open(
rf'{self.config.baseline_spool}\\{next(iter(file.split(".")))}auto{file_extension}',
"a") as write_file:
write_file.write(content)
logger().warning(
f'Auto file created in: "{auto_file_path}"')
return auto_file_path
def sub_string(self, line, file_extension):
"""
:param line:
:param file_extension:
:return:
"""
if not file_extension == '.xml':
emissao = re.search(r'(Emissão: )(?:(\d{2}-\d{2}-\d{4}))', line)
emision = re.search(r'(Emision: )(?:(\d{2}-\d{2}-\d{4}))', line)
dtref = re.search(r'(DT\.Ref\.: )(?:(\d{2}-\d{2}-\d{4}))', line)
fcref = re.search(r'(Fc\.Ref\.: )(?:(\d{2}-\d{2}-\d{4}))', line)
hora = re.search(r'(Hora\.\.\.: )(?:(\d{2}:\d{2}:\d{2}))', line)
hora_termino = re.search(r'(Hora Término: )(?:(\d{2}:\d{2}:\d{2}))', line)
slash = re.search(r'(/)', line)
if emissao:
line = re.sub(emissao.group(0), 'Emissão: 01-01-2015', line)
if emision:
line = re.sub(emision.group(0), 'Emision: 01-01-2015', line)
if dtref:
line = re.sub(dtref.group(0), 'DT.Ref.: 01-01-2015', line)
if fcref:
line = re.sub(fcref.group(0), 'Fc.Ref.: 01-01-2015', line)
if hora:
line = re.sub(hora.group(0), 'Hora...: 00:00:00', line)
if hora_termino:
line = re.sub(hora_termino.group(0), 'Hora Término: 00:00:00', line)
if slash:
line = re.sub(slash.group(0), '@', line)
else:
encoding = re.search(r'(encoding=)(?:("UTF-8")|(""))', line)
datetime = re.search(r'("DateTime">)(?:(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}))', line)
width = re.search(r'(ss:Width=)"(?:(\d+))"', line)
if encoding:
line = re.sub(encoding.group(0), 'encoding=""', line)
if datetime:
line = re.sub(datetime.group(0), '"DateTime">2015-01-01T00:00:00', line)
if width:
line = re.sub(datetime.group(0), 'ss:Width="100"', line)
return line
def check_file(self, base_file="", current_file=""):
"""
:param base_file:
:param current_file:
:return:
"""
if not base_file:
base_file = None
if not pathlib.Path(f'{self.config.baseline_spool}\\{base_file}').exists():
self.log_error("Base file doesn't exist! Please confirm the file name and path. Now you can use auto file to rename to base.")
if not pathlib.Path(f'{self.config.baseline_spool}\\{current_file}').exists():
self.log_error("Current file doesn't exist! Please confirm the file name and path.")
def set_multilanguage(self):
if self.config.poui_login:
soup = self.get_current_DOM()
po_select = next(iter(soup.select(".po-select-container")), None)
if po_select:
span_label = next(iter(po_select.select('span')), None)
if span_label:
language = self.return_select_language()
if not span_label.text.lower() in language:
self.set_language_poui(language, po_select)
elif self.element_exists(term='.tcombobox', scrap_type=enum.ScrapType.CSS_SELECTOR, main_container="body", check_error=False):
tcombobox = next(iter(self.web_scrap(term='.tcombobox', scrap_type=enum.ScrapType.CSS_SELECTOR, main_container='body')))
selects = next(iter(tcombobox.select('select')))
language = self.return_select_language()
self.select_combo(selects, language, index=True)
def set_language_poui(self, language, container):
icon = next(iter(list(filter(lambda x: "class" in x.attrs, container.select('span')))), None)
if icon:
icon_element = self.soup_to_selenium(icon)
icon_element.click()
container_ul = next(iter(container.select('ul')), None)
if container_ul:
item = next(iter(list(filter(lambda x: x.text.lower() in language ,container_ul.select('li')))), None)
element = self.soup_to_selenium(item)
element.click()
def return_select_language(self):
if self.config.language == 'pt-br':
language = ['português', 'portugués', 'portuguese']
elif self.config.language == 'es-es':
language = ['espanhol', 'español', 'spanish']
elif self.config.language == 'en-us':
language = ['inglês', 'inglés', 'english']
return language
def get_grid_content(self, grid_number, grid_element):
"""
:param grid_number:
:param grid_element:
:return:
"""
grid_number -= 1
self.wait_element(term=".tgetdados tbody tr, .tgrid tbody tr, .tcbrowse",
scrap_type=enum.ScrapType.CSS_SELECTOR)
grid = self.get_grid(grid_number, grid_element)
return grid.select('tbody tr')
def LengthGridLines(self, grid):
"""
Returns the length of the grid.
:return:
"""
return len(grid)
| 42.348272
| 236
| 0.584527
|
8ed31b3dfc0a48329c092b14d8eb5cb43cfefe82
| 4,692
|
py
|
Python
|
template/learning.py
|
liyuan9988/IVOPEwithACME
|
d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558
|
[
"MIT"
] | 1
|
2020-09-05T01:25:39.000Z
|
2020-09-05T01:25:39.000Z
|
template/learning.py
|
liyuan9988/IVOPEwithACME
|
d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558
|
[
"MIT"
] | null | null | null |
template/learning.py
|
liyuan9988/IVOPEwithACME
|
d77fab09b2e1cb8d3dbd8b2ab88adcce6a853558
|
[
"MIT"
] | null | null | null |
# Lint as: python3
"""DFIV Learner implementation."""
from typing import Dict, List
import acme
from acme.tf import savers as tf2_savers
from acme.tf import utils as tf2_utils
from acme.utils import counting
from acme.utils import loggers
import numpy as np
import sonnet as snt
import tensorflow as tf
class DFIVLearner(acme.Learner, tf2_savers.TFSaveable):
"""DFIV learner.
This is the learning component of a DFIV agent. IE it takes a dataset as input
and implements update functionality to learn from this dataset. Optionally
it takes a replay client as well to allow for updating of priorities.
"""
def __init__(self,
treatment_net: snt.Module,
instrumental_net: snt.Module,
policy_net: snt.Module,
treatment_learning_rate: float,
instrumental_learning_rate: float,
policy_learning_rate: float,
dataset: tf.data.Dataset,
counter: counting.Counter = None,
logger: loggers.Logger = None,
checkpoint: bool = True):
"""Initializes the learner.
Args:
treatment_net: treatment network.
instrumental_net: instrumental network.
policy_net: policy network.
treatment_learning_rate: learning rate for the treatment_net update.
instrumental_learning_rate: learning rate for the instrumental_net update.
policy_learning_rate: learning rate for the policy_net update.
dataset: dataset to learn from.
counter: Counter object for (potentially distributed) counting.
logger: Logger object for writing logs to.
checkpoint: boolean indicating whether to checkpoint the learner.
"""
self._counter = counter or counting.Counter()
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
# Get an iterator over the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# TODO(b/155086959): Fix type stubs and remove.
self._treatment_net = treatment_net
self._instrumental_net = instrumental_net
self._policy_net = policy_net
self._treatment_optimizer = snt.optimizers.Adam(treatment_learning_rate)
self._instrumental_optimizer = snt.optimizers.Adam(
instrumental_learning_rate)
self._policy_optimizer = snt.optimizers.Adam(policy_learning_rate)
self._variables = [
treatment_net.trainable_variables,
instrumental_net.trainable_variables,
policy_net.trainable_variables,
]
self._num_steps = tf.Variable(0, dtype=tf.int32)
# Create a snapshotter object.
if checkpoint:
self._snapshotter = tf2_savers.Snapshotter(
objects_to_save={'treatment_net': treatment_net,
'instrumental_net': instrumental_net,
'policy_net': policy_net,
}, time_delta_minutes=60.)
else:
self._snapshotter = None
@tf.function
def _step(self) -> Dict[str, tf.Tensor]:
"""Do a step of SGD and update the priorities."""
# TODO(liyuan): add the learning algorithm in this method.
# Pull out the data needed for updates/priorities.
inputs = next(self._iterator)
o_tm1, a_tm1, r_t, d_t, o_t = inputs.data
# del r_t, d_t, o_t
#
# with tf.GradientTape() as tape:
# # Evaluate our networks.
# logits = self._network(o_tm1)
# cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# loss = cce(a_tm1, logits)
#
# gradients = tape.gradient(loss, self._network.trainable_variables)
# self._optimizer.apply(gradients, self._network.trainable_variables)
self._num_steps.assign_add(1)
# # Compute the global norm of the gradients for logging.
# global_gradient_norm = tf.linalg.global_norm(gradients)
# fetches = {'loss': loss, 'gradient_norm': global_gradient_norm}
return fetches
def step(self):
# Do a batch of SGD.
result = self._step()
# Update our counts and record it.
counts = self._counter.increment(steps=1)
result.update(counts)
# Snapshot and attempt to write logs.
if self._snapshotter is not None:
self._snapshotter.save()
self._logger.write(result)
def get_variables(self, names: List[str]) -> List[np.ndarray]:
return tf2_utils.to_numpy(self._variables)
@property
def state(self):
"""Returns the stateful parts of the learner for checkpointing."""
return {
'treatment_net': self._treatment_net,
'instrumental_net': self._instrumental_net,
'policy_net': self._policy_net,
'optimizer': self._optimizer,
'num_steps': self._num_steps
}
| 34.755556
| 80
| 0.684569
|
b56b3f5817de71b581872d829ccb6bffd06868e9
| 297
|
py
|
Python
|
service/__init__.py
|
volbil/ethereum-indexer
|
5efef3afef07a7afed558983385ebb666f27d5d9
|
[
"MIT"
] | null | null | null |
service/__init__.py
|
volbil/ethereum-indexer
|
5efef3afef07a7afed558983385ebb666f27d5d9
|
[
"MIT"
] | null | null | null |
service/__init__.py
|
volbil/ethereum-indexer
|
5efef3afef07a7afed558983385ebb666f27d5d9
|
[
"MIT"
] | null | null | null |
from flask_cors import CORS
from flask import Flask
import config
def create_app():
app = Flask(__name__)
app.config["SECRET_KEY"] = config.secret
CORS(app)
with app.app_context():
from .history import history
app.register_blueprint(history)
return app
| 18.5625
| 44
| 0.680135
|
84248feacf7542a6bf2634a042cdeb8e970b3bfa
| 6,027
|
py
|
Python
|
discovery-infra/test_infra/helper_classes/nodes.py
|
rollandf/assisted-test-infra
|
f2d3411ceb0838f3045e4ad88f2686bed516cf8f
|
[
"Apache-2.0"
] | 27
|
2020-06-26T13:38:14.000Z
|
2022-03-13T11:06:34.000Z
|
discovery-infra/test_infra/helper_classes/nodes.py
|
cdvultur/assisted-test-infra
|
a3deeeac975c9be087c7177827d991d1680b720a
|
[
"Apache-2.0"
] | 1,559
|
2020-06-27T15:36:37.000Z
|
2022-03-31T22:58:50.000Z
|
discovery-infra/test_infra/helper_classes/nodes.py
|
rollandf/assisted-test-infra
|
f2d3411ceb0838f3045e4ad88f2686bed516cf8f
|
[
"Apache-2.0"
] | 83
|
2020-06-26T09:24:43.000Z
|
2022-03-08T23:19:59.000Z
|
import json
import logging
import random
from typing import Dict, Iterator, List
from munch import Munch
from test_infra.controllers.node_controllers.node import Node
from test_infra.controllers.node_controllers.node_controller import NodeController
from test_infra.tools.concurrently import run_concurrently
class NodeMapping:
def __init__(self, node, cluster_host):
self.name = node.name
self.node = node
self.cluster_host = cluster_host
class Nodes:
DEFAULT_STATIC_IPS_CONFIG = False
def __init__(self, node_controller: NodeController):
self.controller = node_controller
self._nodes = None
self._nodes_as_dict = None
@property
def masters_count(self):
return self.controller.masters_count
@property
def workers_count(self):
return self.controller.workers_count
@property
def nodes_count(self):
return self.workers_count + self.masters_count
@property
def nodes(self) -> List[Node]:
if not self._nodes:
self._nodes = self.controller.list_nodes()
return self._nodes
@property
def is_ipv4(self):
return self.controller.is_ipv4
@property
def is_ipv6(self):
return self.controller.is_ipv6
def __getitem__(self, i):
return self.nodes[i]
def __len__(self):
return len(self.nodes)
def __iter__(self) -> Iterator[Node]:
for n in self.nodes:
yield n
def drop_cache(self):
self._nodes = None
self._nodes_as_dict = None
def get_nodes(self, refresh=False) -> List[Node]:
if refresh:
self.drop_cache()
return self.nodes
def get_masters(self):
return [node for node in self.nodes if node.is_master_in_name()]
def get_workers(self):
return [node for node in self.nodes if node.is_worker_in_name()]
@property
def nodes_as_dict(self):
if not self._nodes_as_dict:
self._nodes_as_dict = {node.name: node for node in self.nodes}
return self._nodes_as_dict
@property
def setup_time(self):
return self.controller.setup_time
def get_random_node(self):
return random.choice(self.nodes)
def shutdown_all(self):
self.run_for_all_nodes("shutdown")
def notify_iso_ready(self):
self.controller.notify_iso_ready()
def start_all(self, check_ips=True):
self.run_for_all_nodes("start", check_ips)
def start_given(self, nodes):
self.run_for_given_nodes(nodes, "start")
def shutdown_given(self, nodes):
self.run_for_given_nodes(nodes, "shutdown")
def format_all_disks(self):
self.run_for_all_nodes("format_disk")
def destroy_all(self):
self.run_for_all_nodes("shutdown")
def destroy_all_nodes(self):
self.controller.destroy_all_nodes()
def prepare_nodes(self):
self.controller.prepare_nodes()
def reboot_all(self):
self.run_for_all_nodes("restart")
def reboot_given(self, nodes):
self.run_for_given_nodes(nodes, "restart")
def get_cluster_network(self):
return self.controller.get_cluster_network()
def set_correct_boot_order(self, nodes=None, start_nodes=False):
nodes = nodes or self.nodes
logging.info("Going to set correct boot order to nodes: %s", nodes)
self.run_for_given_nodes(nodes, "set_boot_order_flow", False, start_nodes)
def run_for_all_nodes(self, func_name, *args):
return self.run_for_given_nodes(self.nodes, func_name, *args)
@staticmethod
def run_for_given_nodes(nodes, func_name, *args):
logging.info("Running <%s> on nodes: %s", func_name, [node.name for node in nodes])
return run_concurrently([(getattr(node, func_name), *args) for node in nodes])
def run_for_given_nodes_by_cluster_hosts(self, cluster_hosts, func_name, *args):
return self.run_for_given_nodes([self.get_node_from_cluster_host(host) for
host in cluster_hosts], func_name, *args)
@staticmethod
def run_ssh_command_on_given_nodes(nodes, command) -> Dict:
return run_concurrently({node.name: (node.run_command, command) for node in nodes})
def set_wrong_boot_order(self, nodes=None, start_nodes=True):
nodes = nodes or self.nodes
logging.info("Setting wrong boot order for %s", self.nodes_as_dict.keys())
self.run_for_given_nodes(nodes, "set_boot_order_flow", True, start_nodes)
def get_bootstrap_node(self, cluster) -> Node:
for cluster_host_object in cluster.get_hosts():
if cluster_host_object.get("bootstrap", False):
node = self.get_node_from_cluster_host(cluster_host_object)
logging.info("Bootstrap node is %s", node.name)
return node
def create_nodes_cluster_hosts_mapping(self, cluster):
node_mapping_dict = {}
for cluster_host_object in cluster.get_hosts():
name = self.get_cluster_hostname(cluster_host_object)
node_mapping_dict[name] = NodeMapping(self.nodes_as_dict[name],
Munch.fromDict(cluster_host_object))
return node_mapping_dict
def get_node_from_cluster_host(self, cluster_host_object):
hostname = self.get_cluster_hostname(cluster_host_object)
return self.get_node_by_hostname(hostname)
def get_node_by_hostname(self, get_node_by_hostname):
return self.nodes_as_dict[get_node_by_hostname]
def get_cluster_host_obj_from_node(self, cluster, node):
mapping = self.create_nodes_cluster_hosts_mapping(cluster=cluster)
return mapping[node.name].cluster_host
@staticmethod
def get_cluster_hostname(cluster_host_object):
inventory = json.loads(cluster_host_object["inventory"])
return inventory["hostname"]
def set_single_node_ip(self, ip):
self.controller.set_single_node_ip(ip)
| 32.058511
| 91
| 0.684088
|
30cef8b2a5d8d175df736de6c4ce2fc2cd8c9987
| 1,000
|
py
|
Python
|
mk42/apps/core/querysets/group.py
|
vint21h/mk42
|
1574d1143ea829212203f2be0b11b44de1e7c722
|
[
"WTFPL"
] | 5
|
2017-06-18T17:04:49.000Z
|
2017-11-02T11:44:36.000Z
|
mk42/apps/core/querysets/group.py
|
vint21h/mk42
|
1574d1143ea829212203f2be0b11b44de1e7c722
|
[
"WTFPL"
] | 13
|
2017-07-05T06:35:42.000Z
|
2017-09-06T02:04:04.000Z
|
mk42/apps/core/querysets/group.py
|
vint21h/mk42
|
1574d1143ea829212203f2be0b11b44de1e7c722
|
[
"WTFPL"
] | 10
|
2017-06-29T05:31:52.000Z
|
2017-10-27T09:31:32.000Z
|
# -*- coding: utf-8 -*-
# mk42
# mk42/apps/core/querysets/groups.py
from __future__ import unicode_literals
from django.db import models
__all__ = [
"GroupQuerySet",
]
class GroupQuerySet(models.QuerySet):
"""
Group model queryset.
"""
def active(self, *args, **kwargs):
"""
Return active groups.
:param args: additional args.
:type args: list.
:param kwargs: additional args.
:type kwargs: dict.
:return: queryset with active groups.
:rtype: django.db.models.query.QuerySet.
"""
return self.filter(active=True)
def inactive(self, *args, **kwargs):
"""
Return inactive groups.
:param args: additional args.
:type args: list.
:param kwargs: additional args.
:type kwargs: dict.
:return: queryset with inactive groups.
:rtype: django.db.models.query.QuerySet.
"""
return self.filter(active=False)
| 20.833333
| 48
| 0.586
|
2c854c75dc13261a567a8ab760e6cd35c32062db
| 2,644
|
py
|
Python
|
Space-Debris-Detection/spaceDebrisDetection.py
|
simransuresh/Geospatial-CV
|
d3c6aab70cbfedc5567701478872b51ed5742426
|
[
"MIT"
] | null | null | null |
Space-Debris-Detection/spaceDebrisDetection.py
|
simransuresh/Geospatial-CV
|
d3c6aab70cbfedc5567701478872b51ed5742426
|
[
"MIT"
] | null | null | null |
Space-Debris-Detection/spaceDebrisDetection.py
|
simransuresh/Geospatial-CV
|
d3c6aab70cbfedc5567701478872b51ed5742426
|
[
"MIT"
] | null | null | null |
""" Import the master rockstar first of all """
import cv2 as cv
""" Import numpy lib for kernel operations """
import numpy as np
""" All the area except debris is black in space. So create a boundary from black (0) to white (255) """
lowerBoundary = np.array([0, 0, 0])
upperBoundary = np.array([0, 0, 255])
""" Capture the video. Video courtesy: Youtube, Movie courtesy: Gravity """
capture = cv.VideoCapture("./assets/debris.mp4")
""" Use two kernels opening and closing for filtering identity matrix of range row*column """
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
""" Read the capture frame-by-frame """
while True:
isTrue, frame = capture.read()
""" Resizing for better visualization """
frame = cv.resize(frame, (1366,768))
""" Convert BGR to HSV
BGR represents color luminance or intensity. It is hard to separate colors from it
HSV Hue Saturation Value separates image luminance from color information
HSV is a rearrangement of RGB in a cylindrical shape
"""
hsv_image= cv.cvtColor(frame, cv.COLOR_BGR2HSV)
""" Create the Mask that uses kernel for filtering """
mask = cv.inRange(hsv_image, lowerBoundary, upperBoundary)
""" Pass the mask through the kernel for morphological manipulation """
maskOpen = cv.morphologyEx(mask, cv.MORPH_OPEN, kernelOpen)
maskClose = cv.morphologyEx(maskOpen, cv.MORPH_CLOSE, kernelClose)
""" Using the mask obtained and chain-approx-none algorithm, find the contours in it
RETR_EXTERNAL - only eldest contour is given preference """
contours, hierachy = cv.findContours(maskClose.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
""" Draw the contours found in the frame. Color is blue and thickness of 3 """
cv.drawContours(frame, contours, -1, (255,0,0), 3)
""" Draw red rectangles to distinguish the debris and add yellow text on it """
for index in range(len(contours)):
""" Plain x and y coordinate with its width and height """
x, y, w, h = cv.boundingRect(contours[index])
cv.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv.putText(frame, str(index + 1), (x, y + h), cv.FONT_HERSHEY_SIMPLEX, 1, (0,255,255))
""" Detection of space debris shown.
Anything other than black in the boundary is detected as blue but only debris is marked as text"""
cv.imshow("Space debris detection output", frame)
""" Play till q key is pressed """
if cv.waitKey(10) & 0xFF == ord('q'):
break
""" Release the video capture and destroy window object """
capture.release()
cv.destroyAllWindows()
| 42.645161
| 106
| 0.677761
|
4cfdd673478ba99b7ab61721556c67accf50dd2b
| 3,243
|
py
|
Python
|
tests/loggers/test_neptune.py
|
akreuzer/pytorch-lightning
|
34bc1493596697a2dfc8c76036921b2bb2fb5013
|
[
"Apache-2.0"
] | null | null | null |
tests/loggers/test_neptune.py
|
akreuzer/pytorch-lightning
|
34bc1493596697a2dfc8c76036921b2bb2fb5013
|
[
"Apache-2.0"
] | null | null | null |
tests/loggers/test_neptune.py
|
akreuzer/pytorch-lightning
|
34bc1493596697a2dfc8c76036921b2bb2fb5013
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import patch, MagicMock
import torch
import tests.base.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import NeptuneLogger
from tests.base import LightningTestModel
@patch('pytorch_lightning.loggers.neptune.neptune')
def test_neptune_online(neptune):
logger = NeptuneLogger(api_key='test', offline_mode=False, project_name='project')
neptune.init.assert_called_once_with(api_token='test', project_qualified_name='project')
assert logger.name == neptune.create_experiment().name
assert logger.version == neptune.create_experiment().id
@patch('pytorch_lightning.loggers.neptune.neptune')
def test_neptune_additional_methods(neptune):
logger = NeptuneLogger(offline_mode=True)
logger.log_metric('test', torch.ones(1))
neptune.create_experiment().log_metric.assert_called_once_with('test', torch.ones(1))
neptune.create_experiment().log_metric.reset_mock()
logger.log_metric('test', 1.0)
neptune.create_experiment().log_metric.assert_called_once_with('test', 1.0)
neptune.create_experiment().log_metric.reset_mock()
logger.log_metric('test', 1.0, step=2)
neptune.create_experiment().log_metric.assert_called_once_with('test', x=2, y=1.0)
neptune.create_experiment().log_metric.reset_mock()
logger.log_text('test', 'text')
neptune.create_experiment().log_metric.assert_called_once_with('test', 'text')
neptune.create_experiment().log_metric.reset_mock()
logger.log_image('test', 'image file')
neptune.create_experiment().log_image.assert_called_once_with('test', 'image file')
neptune.create_experiment().log_image.reset_mock()
logger.log_image('test', 'image file', step=2)
neptune.create_experiment().log_image.assert_called_once_with('test', x=2, y='image file')
neptune.create_experiment().log_image.reset_mock()
logger.log_artifact('file')
neptune.create_experiment().log_artifact.assert_called_once_with('file', None)
logger.set_property('property', 10)
neptune.create_experiment().set_property.assert_called_once_with('property', 10)
logger.append_tags('one tag')
neptune.create_experiment().append_tags.assert_called_once_with('one tag')
neptune.create_experiment().append_tags.reset_mock()
logger.append_tags(['two', 'tags'])
neptune.create_experiment().append_tags.assert_called_once_with('two', 'tags')
def test_neptune_leave_open_experiment_after_fit(tmpdir):
"""Verify that neptune experiment was closed after training"""
tutils.reset_seed()
hparams = tutils.get_default_hparams()
model = LightningTestModel(hparams)
def _run_training(logger):
logger._experiment = MagicMock()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
train_percent_check=0.05,
logger=logger
)
trainer.fit(model)
return logger
logger_close_after_fit = _run_training(NeptuneLogger(offline_mode=True))
assert logger_close_after_fit._experiment.stop.call_count == 1
logger_open_after_fit = _run_training(NeptuneLogger(offline_mode=True, close_after_fit=False))
assert logger_open_after_fit._experiment.stop.call_count == 0
| 38.152941
| 98
| 0.753006
|
cba1f87518ef9ecc5da79255c4b60ed7a00bf61c
| 4,606
|
py
|
Python
|
parser/fase2/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionBinaryString/Convert.py
|
Gabriel-15/tytus
|
fb00718bf3fcc5211a3604fba1a551f44bdc6deb
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/fase2/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionBinaryString/Convert.py
|
Gabriel-15/tytus
|
fb00718bf3fcc5211a3604fba1a551f44bdc6deb
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/fase2/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionBinaryString/Convert.py
|
Gabriel-15/tytus
|
fb00718bf3fcc5211a3604fba1a551f44bdc6deb
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from Instrucciones.Excepcion import Excepcion
from Instrucciones.Expresiones.Aritmetica import Aritmetica
from Instrucciones.Expresiones.Primitivo import Primitivo
from decimal import Decimal
from datetime import date, datetime
import time
#from dateutil.parser import parse
class Convert(Instruccion):
def __init__(self, valor, tipo, tipo_salida, strGram, linea, columna):
Instruccion.__init__(self,tipo,linea,columna, strGram)
self.valor = valor
self.tipo = tipo
self.tipo_salida = tipo_salida
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
resultado = self.valor.ejecutar(tabla,arbol)
if isinstance(resultado, Excepcion):
return resultado
try:
if self.tipo_salida.tipo == Tipo_Dato.INTEGER:
int(resultado)
self.tipo = Tipo(Tipo_Dato.INTEGER)
return resultado
elif self.tipo_salida.tipo == Tipo_Dato.SMALLINT:
int(resultado)
self.tipo = Tipo(Tipo_Dato.SMALLINT)
return resultado
elif self.tipo_salida.tipo == Tipo_Dato.DECIMAL:
float(resultado)
self.tipo = Tipo(Tipo_Dato.DECIMAL)
return resultado
elif self.tipo_salida.tipo == Tipo_Dato.BOOLEAN:
if bool(resultado):
verdadero = ("true","t","1","yes")
false = ("false","f","0","not")
if resultado in (verdadero + false):
self.tipo = Tipo(Tipo_Dato.BOOLEAN)
return str(resultado).lower() in verdadero
else:
error = Excepcion('22P02',"Semántico",f"La sintaxis de entrada no es válida para tipo {self.valor.tipo.toString()}: << {resultado} >> a Boolean",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif self.tipo_salida.tipo == Tipo_Dato.DATE:
#formats = ("%d-%m-%Y %I:%M %p", "%d/%m/%Y %I:%M %p")
formats = ("%d-%m-%Y", "%Y-%m-%d","%d-%M-%Y", "%Y-%M-%d","%Y-%b-%d", "%d-%b-%Y")
for fmt in formats:
valid_date=""
try:
valid_date = time.strptime(resultado, fmt)
if isinstance(valid_date, time.struct_time):
self.tipo = Tipo(Tipo_Dato.DATE)
return time.strftime('%Y-%m-%d',valid_date)
except ValueError as e:
pass
error = Excepcion('22007',"Semántico",f"la sintaxis de entrada no es válida para tipo date: << {resultado} >>",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
except:
error = Excepcion('22P02',"Semántico",f"La sintaxis de entrada no es válida para tipo {self.valor.tipo.toString()}: << {resultado} >>",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
def analizar(self, tabla, arbol):
return super().analizar(tabla, arbol)
def traducir(self, tabla, arbol):
super().traducir(tabla, arbol)
valor=""
tipo_salida=""
if isinstance(self.valor, Primitivo):
valor = self.valor.traducir(tabla,arbol).temporalAnterior
elif isinstance(self.valor, Aritmetica):
valor = self.valor.concatenar(tabla,arbol)
elif isinstance(self.valor, str) or isinstance(self.valor, int):
valor = self.valor
else:
valor=self.valor.traducir(tabla,arbol)
if isinstance(self.tipo_salida, Primitivo):
tipo_salida = self.tipo_salida.traducir(tabla,arbol).temporalAnterior
elif isinstance(self.tipo_salida, Aritmetica):
tipo_salida = self.tipo_salida.concatenar(tabla,arbol)
elif isinstance(self.tipo_salida, str) or isinstance(self.tipo_salida, int):
tipo_salida = self.tipo_salida
else:
tipo_salida= self.tipo_salida.traducir(tabla,arbol)
return f"CONVERT({valor} AS {tipo_salida})"
| 47
| 193
| 0.574034
|
a46f41e2ef00264f5f317759795ef5ec6e91845b
| 1,432
|
py
|
Python
|
src/py/4.3.4-Task-API-Multi-label.py
|
saibaldas/automl-in-action-notebooks
|
4ddd105c7fccf2382091afaf05884ab816ad4b38
|
[
"MIT"
] | 1
|
2022-03-29T19:47:09.000Z
|
2022-03-29T19:47:09.000Z
|
src/py/4.3.4-Task-API-Multi-label.py
|
qingquansong/automl-in-action-notebooks
|
4ddd105c7fccf2382091afaf05884ab816ad4b38
|
[
"MIT"
] | null | null | null |
src/py/4.3.4-Task-API-Multi-label.py
|
qingquansong/automl-in-action-notebooks
|
4ddd105c7fccf2382091afaf05884ab816ad4b38
|
[
"MIT"
] | null | null | null |
"""shell
pip install -r https://raw.githubusercontent.com/datamllab/automl-in-action-notebooks/master/requirements.txt
"""
import tensorflow as tf
import autokeras as ak
"""
### Create synthetic multi-label dataset
"""
from sklearn.datasets import make_multilabel_classification
X, Y = make_multilabel_classification(
n_samples=100,
n_features=64,
n_classes=3,
n_labels=2,
allow_unlabeled=False,
random_state=1,
)
X = X.reshape((100, 8, 8))
X.shape, Y.shape
"""invisible
"""
x_train, x_test, y_train, y_test = X[:80], X[80:], Y[:80], Y[80:]
"""
### Run the ImageClassifier for multi-label classification
"""
# Initialize the image classifier.
clf = ak.ImageClassifier(
max_trials=10, multi_label=True, overwrite=True
) # It tries two different pipelines.
# Feed the image classifier with training data
# 20% of the data is used as validation data by default for tuning
# the process may run for a bit long time, please try to use GPU
clf.fit(x_train, y_train, epochs=3, verbose=2) # each model is trained for three epochs
"""
### Predict with the best model.
"""
predicted_y = clf.predict(x_test)
print("The prediction shape is: {}".format(predicted_y.shape))
print(
"The predicted labels of the first five instances are:\n {}".format(
predicted_y[:5, :]
)
)
"""invisible
"""
test_loss, test_acc = clf.evaluate(x_test, y_test, verbose=0)
print("Test accuracy: ", test_acc)
| 23.47541
| 109
| 0.711592
|
43229af607708505bd71d9b2f35372b263e56a94
| 16,564
|
py
|
Python
|
VL-BERT/cls/data/datasets/cls_v2.py
|
weihezhai/HatefulMemesChallenge
|
04f52643c0864d1efb6c0a9c674db42764f6834c
|
[
"MIT"
] | 63
|
2020-12-09T18:58:16.000Z
|
2022-03-21T02:34:35.000Z
|
VL-BERT/cls/data/datasets/cls_v2.py
|
weihezhai/HatefulMemesChallenge
|
04f52643c0864d1efb6c0a9c674db42764f6834c
|
[
"MIT"
] | 5
|
2021-01-29T10:33:04.000Z
|
2021-08-25T14:15:27.000Z
|
VL-BERT/cls/data/datasets/cls_v2.py
|
weihezhai/HatefulMemesChallenge
|
04f52643c0864d1efb6c0a9c674db42764f6834c
|
[
"MIT"
] | 18
|
2020-12-11T20:36:04.000Z
|
2021-12-12T07:04:20.000Z
|
import os
import json
import re
import base64
import numpy as np
import csv
import sys
import time
import pprint
import logging
import collections
import random
import torch
from torch.utils.data import Dataset
from external.pytorch_pretrained_bert import BertTokenizer
from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast, AutoTokenizer
import _pickle as cPickle
from PIL import Image
from common.utils.zipreader import ZipReader
from common.utils.create_logger import makedirsExist
# from pycocotools.coco import COCO
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
class CLS2(Dataset):
def __init__(self, root_path=None, image_set='train',
transform=None, test_mode=False,
zip_mode=False, cache_mode=False, cache_db=True,
tokenizer=None, pretrained_model_name=None,
add_image_as_a_box=False, mask_size=(14, 14),
aspect_grouping=False, **kwargs):
"""
Visual Question Answering Dataset
:param root_path: root path to cache database loaded from annotation file
:param data_path: path to vcr dataset
:param transform: transform
:param test_mode: test mode means no labels available
:param zip_mode: reading images and metadata in zip archive
:param cache_mode: cache whole dataset to RAM first, then __getitem__ read them from RAM
:param ignore_db_cache: ignore previous cached database, reload it from annotation file
:param tokenizer: default is BertTokenizer from pytorch_pretrained_bert
:param add_image_as_a_box: add whole image as a box
:param mask_size: size of instance mask of each object
:param aspect_grouping: whether to group images via their aspect
:param kwargs:
"""
super(CLS2, self).__init__()
cache_dir = False
assert not cache_mode, 'currently not support cache mode!'
categories = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat',
'trafficlight', 'firehydrant', 'stopsign', 'parkingmeter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sportsball', 'kite', 'baseballbat', 'baseballglove',
'skateboard', 'surfboard', 'tennisracket', 'bottle', 'wineglass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hotdog', 'pizza', 'donut',
'cake', 'chair', 'couch', 'pottedplant', 'bed', 'diningtable', 'toilet', 'tv', 'laptop', 'mouse',
'remote', 'keyboard', 'cellphone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book',
'clock', 'vase', 'scissors', 'teddybear', 'hairdrier', 'toothbrush']
self.category_to_idx = {c: i for i, c in enumerate(categories)}
self.data_split = image_set # HACK: reuse old parameter
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(\,)(\d)")
self.punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
self.test_mode = test_mode
self.root_path = root_path
self.box_bank = {}
self.transform = transform
self.zip_mode = zip_mode
self.aspect_grouping = aspect_grouping
self.add_image_as_a_box = add_image_as_a_box
self.cache_dir = os.path.join(root_path, 'cache')
# return_offsets_mapping
model_name = 'bert-base-uncased' if pretrained_model_name is None else pretrained_model_name
self.fast_tokenizer = AutoTokenizer.from_pretrained(
'bert-base-uncased',
cache_dir=self.cache_dir, use_fast=True, return_offsets_mapping=True)
self.tokenizer = tokenizer if tokenizer is not None \
else BertTokenizer.from_pretrained(
model_name,
cache_dir=self.cache_dir)
self.max_txt_token = 120
if zip_mode:
self.zipreader = ZipReader()
self.database = self.load_annotations()
self.use_img_box = True
self.random_drop_tags = False
# if self.aspect_grouping:
# self.group_ids = self.group_aspect(self.database)
@property
def data_names(self):
if self.use_img_box:
if self.test_mode:
return ['image', 'boxes', 'im_info', 'text', 'img_boxes', 'text_tags', 'id',]
else:
return ['image', 'boxes', 'im_info', 'text', 'img_boxes', 'text_tags', 'label', 'id']
else:
if self.test_mode:
return ['image', 'boxes', 'im_info', 'text', 'id', ]
else:
return ['image', 'boxes', 'im_info', 'text', 'label', 'id']
@property
def weights_by_class(self):
labels = []
num_per_class = collections.defaultdict(lambda: 0)
for data in self.database:
labels.append(data['label'])
num_per_class[data['label']] += 1
weight_per_class = {k: 1 / len(num_per_class) / v for k, v in num_per_class.items()}
sampling_weight = [weight_per_class[label] for label in labels]
return sampling_weight
def clip_box_and_score(self, box_and_score):
new_list = []
for box_sc in box_and_score:
cliped = {k: min(max(v, 0), 1) for k, v in box_sc.items()}
new_list.append(cliped)
return new_list
def __getitem__(self, index):
idb = self.database[index]
# image, boxes, im_info
image = self._load_image(os.path.join(self.root_path, idb['img']))
w0, h0 = image.size
if len(idb['boxes_and_score']) == 0:
boxes = torch.as_tensor([[0.0, 0.0, w0 - 1, h0 - 1, 0]])
else:
w_scale = w0 if idb['boxes_and_score'][0]['xmax'] < 1 else 1.0
h_scale = h0 if idb['boxes_and_score'][0]['xmax'] < 1 else 1.0
boxes = torch.as_tensor([
[
box_sc['xmin'] * w_scale,
box_sc['ymin'] * h_scale,
box_sc['xmax'] * w_scale,
box_sc['ymax'] * h_scale,
box_sc['class_id'],
]
for box_sc in idb['boxes_and_score']
])
if self.add_image_as_a_box:
boxes = torch.cat(
(torch.as_tensor([[0.0, 0.0, w0 - 1, h0 - 1, 0]]), boxes), dim=0)
im_info = torch.tensor([w0, h0, 1.0, 1.0])
# clamp boxes
w = im_info[0].item()
h = im_info[1].item()
boxes[:, [0, 2]] = boxes[:, [0, 2]].clamp(min=0, max=w - 1)
boxes[:, [1, 3]] = boxes[:, [1, 3]].clamp(min=0, max=h - 1)
flipped = False
if self.transform is not None:
image, boxes, _, im_info, flipped = self.transform(image, boxes, None, im_info, flipped)
# question
if 'token_id' not in idb:
main_txt = idb['text']
img_tags = [' '.join(des) for des in idb['partition_description']]
img_tags_str = ''
img_tags_part = []
if not self.random_drop_tags or (self.random_drop_tags and random.random() > 0.5):
for p, img_tag in enumerate(img_tags):
append_str = img_tag + ' '
img_tags_str += append_str
img_tags_part += [p] * len(append_str)
text_with_tag = f"{main_txt} [SEP] {img_tags_str}"
# print(f"[{index}] {text_with_tag}")
result = self.fast_tokenizer(
text_with_tag, return_offsets_mapping=True, add_special_tokens=False)
token_id = result['input_ids']
token_offset = result['offset_mapping']
if self.use_img_box:
text_partition = idb['text_char_partition_id']
text_partition += [0] * len(" [SEP] ") + img_tags_part # additinoal partition id for [SEP]
assert len(text_partition) == len(text_with_tag), \
F"{len(text_partition)} != {len(text_with_tag)}"
token_tags = []
for a, b in filter(lambda x: x[1] - x[0] > 0, token_offset):
char_tags = text_partition[a: b]
# print(a, b, char_tags)
cnt = collections.Counter(char_tags)
token_tags.append(cnt.most_common(1)[0][0])
idb['text_tags'] = token_tags
idb['image_partition'] = np.asarray(idb['image_partition'], dtype=np.float32)[..., :4] # HACK: remove det score from mmdet
else:
idb['text_tags'] = [0] * len(token_id)
# token_id = self.tokenizer.convert_tokens_to_ids(text_tokens)
if token_id[-1] == self.fast_tokenizer.sep_token_id:
token_id = token_id[:-1]
idb['text_tags'] = idb['text_tags'][:-1]
if len(token_id) > self.max_txt_token:
token_id = token_id[:self.max_txt_token]
idb['text_tags'] = idb['text_tags'][:self.max_txt_token]
idb['token_id'] = token_id
assert len(idb['token_id']) == len(idb['text_tags'])
else:
token_id = idb['token_id']
if self.use_img_box:
if self.test_mode:
return (
image, boxes, im_info, token_id,
idb['image_partition'], idb['text_tags'], idb['id'],
)
else:
# print([(self.answer_vocab[i], p.item()) for i, p in enumerate(label) if p.item() != 0])
label = torch.Tensor([float(idb['label'])])
return (
image, boxes, im_info, token_id,
idb['image_partition'], idb['text_tags'],
label, idb['id'],
)
else:
if self.test_mode:
return image, boxes, im_info, token_id, idb['id']
else:
# print([(self.answer_vocab[i], p.item()) for i, p in enumerate(label) if p.item() != 0])
label = torch.Tensor([float(idb['label'])])
return image, boxes, im_info, token_id, label, idb['id']
@staticmethod
def b64_decode(string):
return base64.decodebytes(string.encode())
def load_annotations(self):
tic = time.time()
img_name_to_annos = collections.defaultdict(list)
test_json = os.path.join(self.root_path, 'test_unseen.entity.jsonl')
dev_json = os.path.join(self.root_path, 'dev_seen.entity.jsonl')
dev_train_json = os.path.join(self.root_path, 'dev_all.entity.jsonl')
train_json = os.path.join(self.root_path, 'train.entity.jsonl')
# box_annos_json = os.path.join(self.root_path, 'clean_img_boxes_gqa.json')
box_annos_json = os.path.join(self.root_path, 'box_annos.json')
test_sample = []
dev_sample = []
train_sample = []
dev_train_sample = []
with open(train_json, mode='r') as f:
for line in f.readlines():
train_sample.append(json.loads(line))
with open(dev_train_json, mode='r') as f:
for line in f.readlines():
dev_train_sample.append(json.loads(line))
with open(test_json, mode='r') as f:
for line in f.readlines():
test_sample.append(json.loads(line))
with open(dev_json, mode='r') as f:
for line in f.readlines():
dev_sample.append(json.loads(line))
with open(box_annos_json, mode='r') as f:
box_annos = json.load(f)
sample_sets = []
if self.data_split == 'train':
sample_sets.append(train_sample)
elif self.data_split == 'val':
sample_sets.append(dev_sample)
elif self.data_split == 'train+val':
sample_sets.append(train_sample)
sample_sets.append(dev_train_sample)
elif self.data_split == 'test':
sample_sets.append(test_sample)
else:
raise RuntimeError(f"Unknown dataset split: {self.data_split}")
for sample_set in sample_sets:
for sample in sample_set:
img_name = os.path.basename(sample['img'])
img_name_to_annos[img_name].append(sample)
for box_anno in box_annos:
img_name = box_anno['img_name']
if img_name in img_name_to_annos:
for sample in img_name_to_annos[img_name]:
sample.update(box_anno)
print('Done (t={:.2f}s)'.format(time.time() - tic))
flatten = []
for annos in img_name_to_annos.values():
flatten += annos
return flatten
@staticmethod
def group_aspect(database):
print('grouping aspect...')
t = time.time()
# get shape of all images
widths = torch.as_tensor([idb['width'] for idb in database])
heights = torch.as_tensor([idb['height'] for idb in database])
# group
group_ids = torch.zeros(len(database))
horz = widths >= heights
vert = 1 - horz
group_ids[horz] = 0
group_ids[vert] = 1
print('Done (t={:.2f}s)'.format(time.time() - t))
return group_ids
def load_precomputed_boxes(self, box_file):
if box_file in self.box_bank:
return self.box_bank[box_file]
else:
in_data = {}
with open(box_file, "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['image_h'] = int(item['image_h'])
item['image_w'] = int(item['image_w'])
item['num_boxes'] = int(item['num_boxes'])
for field in (['boxes', 'features'] if self.with_precomputed_visual_feat else ['boxes']):
item[field] = np.frombuffer(base64.decodebytes(item[field].encode()),
dtype=np.float32).reshape((item['num_boxes'], -1))
in_data[item['image_id']] = item
self.box_bank[box_file] = in_data
return in_data
def __len__(self):
return len(self.database)
def _load_image(self, path):
if '.zip@' in path:
return self.zipreader.imread(path).convert('RGB')
else:
return Image.open(path).convert('RGB')
def _load_json(self, path):
if '.zip@' in path:
f = self.zipreader.read(path)
return json.loads(f.decode())
else:
with open(path, 'r') as f:
return json.load(f)
if __name__ == '__main__':
import numpy as np
from loguru import logger
def hight_light_boxes(data):
image, boxes = data[:2]
boxes = boxes.numpy()
np_img = np.array(image).astype(np.int32)
for box in boxes:
x_slice = slice(int(box[0]), int(box[2]))
y_slice = slice(int(box[1]), int(box[3]))
np_img[y_slice, x_slice] += 20
print(box)
np_img = np.clip(np_img, 0, 255).astype(np.uint8)
return np_img
with logger.catch(reraise=True):
pretrained_model_name = '/home/ron/Projects/VL-BERT/model/pretrained_model/bert-large-uncased'
cls_data = CLS2(
'/home/ron/Downloads/hateful_meme_data/',
pretrained_model_name=pretrained_model_name
)
print(cls_data[0])
tk_len = []
for i in range(len(cls_data)):
if i % 100 == 0:
print(i)
tmp = cls_data[i]
print(tmp[4].shape)
tk_len.append(len(tmp[3]))
import pdb; pdb.set_trace()
Image.fromarray(hight_light_boxes(cls_data[3])).show()
| 40.009662
| 139
| 0.553248
|
0cb99e2dd2a898cb718bb9bea2576ce3423e3d6a
| 1,375
|
py
|
Python
|
models/bert_base_uncased.py
|
chrisAS12/specseminars-2021-mi
|
9f1bfa6dc5abac6d3b9f8b9cb2224431c78f0365
|
[
"MIT"
] | null | null | null |
models/bert_base_uncased.py
|
chrisAS12/specseminars-2021-mi
|
9f1bfa6dc5abac6d3b9f8b9cb2224431c78f0365
|
[
"MIT"
] | null | null | null |
models/bert_base_uncased.py
|
chrisAS12/specseminars-2021-mi
|
9f1bfa6dc5abac6d3b9f8b9cb2224431c78f0365
|
[
"MIT"
] | null | null | null |
from transformers import pipeline, AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
model = AutoModelForMaskedLM.from_pretrained("bert-base-uncased")
def predict(sentence, symbol_to_replace, model, tokenizer):
try:
fill_mask = pipeline(
"fill-mask",
model=model,
tokenizer=tokenizer
)
sentence = sentence.replace(symbol_to_replace, "[MASK]")
print(fill_mask(sentence))
return fill_mask(sentence)[1]['sequence']
except:
print("Add only one symbol to replace, please.")
return -1
def full_prediction(sentence, symbol_to_replace):
print(sentence)
print(f"In the sentence above, we can replace {symbol_to_replace} to this: ")
print('\033[0m',"\033[1m",predict(sentence,symbol_to_replace, model, tokenizer),'\033[0m','\033[92m');
def test_cases():
full_prediction("The University of Latvia is so *.",'*')
full_prediction("This course is so -!", '-')
def make_text_green():
print('\033[92m')
while(True):
sentence = input("Enter your sentence and add one symbol you will replace with a word later: ")
if(sentence is None or sentence == '' or sentence == '-1'):
break
symbol = input("Symbol to replace: ")
full_prediction(sentence, symbol)
print("Enter -1 to break!")
| 33.536585
| 106
| 0.666909
|
8b6db77ed6de5e88ef9a82a6aeff74b8ee68a8ed
| 3,053
|
py
|
Python
|
saleor/graphql/app/tests/queries/test_apps_pagination.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/graphql/app/tests/queries/test_apps_pagination.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/graphql/app/tests/queries/test_apps_pagination.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
import pytest
from .....app.models import App
from ....tests.utils import get_graphql_content
@pytest.fixture()
def apps_for_pagination():
apps = App.objects.bulk_create(
[
App(name="Account1", is_active=True),
App(name="AccountAccount1", is_active=True),
App(name="AccountAccount2", is_active=True),
App(name="Account2", is_active=True),
App(name="Account3", is_active=True),
]
)
return apps
QUERY_APP_PAGINATION = """
query (
$first: Int, $last: Int, $after: String, $before: String,
$sortBy: AppSortingInput, $filter: AppFilterInput
){
apps(
first: $first, last: $last, after: $after, before: $before,
sortBy: $sortBy, filter: $filter
) {
edges{
node{
name
}
}
pageInfo{
startCursor
endCursor
hasNextPage
hasPreviousPage
}
}
}
"""
@pytest.mark.parametrize(
"sort_by, apps_order",
[
({"field": "NAME", "direction": "ASC"}, ["Account1", "Account2", "Account3"]),
(
{"field": "NAME", "direction": "DESC"},
["AccountAccount2", "AccountAccount1", "Account3"],
),
],
)
def test_apps_pagination_with_sorting(
sort_by,
apps_order,
staff_api_client,
apps_for_pagination,
permission_manage_apps,
):
staff_api_client.user.user_permissions.add(permission_manage_apps)
page_size = 3
variables = {"first": page_size, "after": None, "sortBy": sort_by}
response = staff_api_client.post_graphql(
QUERY_APP_PAGINATION,
variables,
)
content = get_graphql_content(response)
nodes = content["data"]["apps"]["edges"]
assert apps_order[0] == nodes[0]["node"]["name"]
assert apps_order[1] == nodes[1]["node"]["name"]
assert apps_order[2] == nodes[2]["node"]["name"]
assert len(nodes) == page_size
@pytest.mark.parametrize(
"filter_by, apps_order",
[
({"search": "Account"}, ["Account1", "Account2"]),
({"search": "AccountAccount"}, ["AccountAccount1", "AccountAccount2"]),
({"search": "accountaccount"}, ["AccountAccount1", "AccountAccount2"]),
({"search": "Account1"}, ["Account1", "AccountAccount1"]),
],
)
def test_apps_pagination_with_filtering(
filter_by,
apps_order,
staff_api_client,
apps_for_pagination,
permission_manage_apps,
):
staff_api_client.user.user_permissions.add(permission_manage_apps)
page_size = 2
variables = {"first": page_size, "after": None, "filter": filter_by}
response = staff_api_client.post_graphql(
QUERY_APP_PAGINATION,
variables,
)
content = get_graphql_content(response)
nodes = content["data"]["apps"]["edges"]
assert apps_order[0] == nodes[0]["node"]["name"]
assert apps_order[1] == nodes[1]["node"]["name"]
assert len(nodes) == page_size
| 28.53271
| 86
| 0.586964
|
4bb199db9d8b7091619d4946c6f35bc0baef2624
| 738
|
py
|
Python
|
adafruit-circuitpython-bundle-py-20201107/examples/circuitplayground_tapdetect_single_double.py
|
rantler/AdaFruit
|
9b0aa56ede9ac358b835162cad4c6531c09ba5b0
|
[
"CC0-1.0"
] | null | null | null |
adafruit-circuitpython-bundle-py-20201107/examples/circuitplayground_tapdetect_single_double.py
|
rantler/AdaFruit
|
9b0aa56ede9ac358b835162cad4c6531c09ba5b0
|
[
"CC0-1.0"
] | null | null | null |
adafruit-circuitpython-bundle-py-20201107/examples/circuitplayground_tapdetect_single_double.py
|
rantler/AdaFruit
|
9b0aa56ede9ac358b835162cad4c6531c09ba5b0
|
[
"CC0-1.0"
] | null | null | null |
"""This example shows how you can use single-tap and double-tap together with a delay between.
Single-tap the board twice and then double-tap the board twice to complete the program."""
from adafruit_circuitplayground import cp
# Set to check for single-taps.
cp.detect_taps = 1
tap_count = 0
# We're looking for 2 single-taps before moving on.
while tap_count < 2:
if cp.tapped:
tap_count += 1
print("Reached 2 single-taps!")
# Now switch to checking for double-taps
tap_count = 0
cp.detect_taps = 2
# We're looking for 2 double-taps before moving on.
while tap_count < 2:
if cp.tapped:
tap_count += 1
print("Reached 2 double-taps!")
print("Done.")
while True:
cp.red_led = True
| 27.333333
| 95
| 0.692412
|
bf0a93b4c0a0a5c40490e0e2359bbe388da07698
| 119
|
py
|
Python
|
HARK/__init__.py
|
jdice/HARK
|
16361c6aa3790fb0f09a53c40d46f01cb8634a9a
|
[
"Apache-2.0"
] | null | null | null |
HARK/__init__.py
|
jdice/HARK
|
16361c6aa3790fb0f09a53c40d46f01cb8634a9a
|
[
"Apache-2.0"
] | null | null | null |
HARK/__init__.py
|
jdice/HARK
|
16361c6aa3790fb0f09a53c40d46f01cb8634a9a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from .core import *
__version__ = '0.10.1.dev3'
from .BayerLuetticke import *
| 17
| 38
| 0.773109
|
42f8fef54f2f840c878ed599bf7f3c4e5aa7120b
| 2,916
|
py
|
Python
|
test/functional/mempool_resurrect.py
|
ogfuncoin/ogfuncoin
|
18d00bc1d93335c86ae6f2971321e93e627ae570
|
[
"MIT"
] | null | null | null |
test/functional/mempool_resurrect.py
|
ogfuncoin/ogfuncoin
|
18d00bc1d93335c86ae6f2971321e93e627ae570
|
[
"MIT"
] | null | null | null |
test/functional/mempool_resurrect.py
|
ogfuncoin/ogfuncoin
|
18d00bc1d93335c86ae6f2971321e93e627ae570
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resurrection of mined transactions when the blockchain is re-organized."""
from test_framework.blocktools import create_raw_transaction
from test_framework.test_framework import ogfuncoinTestFramework
from test_framework.util import assert_equal
class MempoolCoinbaseTest(ogfuncoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [self.nodes[0].getblockhash(n) for n in range(1, 4)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends1_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids]
spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.98) for txid in spends1_id]
spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back
for node in self.nodes:
node.invalidateblock(blocks[0])
# All txns should be back in mempool with 0 confirmations
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 41.070423
| 123
| 0.674211
|
0b04217b97b68a98f8ad34bb79e894b479a89302
| 123
|
py
|
Python
|
sciwing/models/__init__.py
|
sean-dingxu/sciwing
|
75eca1ea43be165eab20cf8bd81bbc19cecda74c
|
[
"MIT"
] | 50
|
2019-09-13T10:32:29.000Z
|
2022-02-14T16:52:53.000Z
|
sciwing/models/__init__.py
|
sean-dingxu/sciwing
|
75eca1ea43be165eab20cf8bd81bbc19cecda74c
|
[
"MIT"
] | 31
|
2019-09-03T11:06:03.000Z
|
2021-08-20T14:57:09.000Z
|
sciwing/models/__init__.py
|
sean-dingxu/sciwing
|
75eca1ea43be165eab20cf8bd81bbc19cecda74c
|
[
"MIT"
] | 9
|
2019-09-16T03:25:15.000Z
|
2021-05-11T10:28:25.000Z
|
from sciwing.models.simpleclassifier import SimpleClassifier
from sciwing.models.rnn_seq_crf_tagger import RnnSeqCrfTagger
| 41
| 61
| 0.902439
|
485e9a85206fd5ce9356be87fa2983d1489f2245
| 3,444
|
py
|
Python
|
tests/components/knx/test_init.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | null | null | null |
tests/components/knx/test_init.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | null | null | null |
tests/components/knx/test_init.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | null | null | null |
"""Test KNX init."""
import pytest
from xknx import XKNX
from xknx.io import (
DEFAULT_MCAST_GRP,
DEFAULT_MCAST_PORT,
ConnectionConfig,
ConnectionType,
)
from homeassistant.components.knx.const import (
CONF_KNX_AUTOMATIC,
CONF_KNX_CONNECTION_TYPE,
CONF_KNX_DEFAULT_RATE_LIMIT,
CONF_KNX_DEFAULT_STATE_UPDATER,
CONF_KNX_INDIVIDUAL_ADDRESS,
CONF_KNX_LOCAL_IP,
CONF_KNX_MCAST_GRP,
CONF_KNX_MCAST_PORT,
CONF_KNX_RATE_LIMIT,
CONF_KNX_ROUTE_BACK,
CONF_KNX_ROUTING,
CONF_KNX_STATE_UPDATER,
CONF_KNX_TUNNELING,
DOMAIN as KNX_DOMAIN,
)
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant
from .conftest import KNXTestKit
from tests.common import MockConfigEntry
@pytest.mark.parametrize(
"config_entry_data,connection_config",
[
(
{
CONF_KNX_CONNECTION_TYPE: CONF_KNX_AUTOMATIC,
CONF_KNX_RATE_LIMIT: CONF_KNX_DEFAULT_RATE_LIMIT,
CONF_KNX_STATE_UPDATER: CONF_KNX_DEFAULT_STATE_UPDATER,
CONF_KNX_MCAST_PORT: DEFAULT_MCAST_PORT,
CONF_KNX_MCAST_GRP: DEFAULT_MCAST_GRP,
CONF_KNX_INDIVIDUAL_ADDRESS: XKNX.DEFAULT_ADDRESS,
},
ConnectionConfig(threaded=True),
),
(
{
CONF_KNX_CONNECTION_TYPE: CONF_KNX_ROUTING,
CONF_KNX_LOCAL_IP: "192.168.1.1",
CONF_KNX_RATE_LIMIT: CONF_KNX_DEFAULT_RATE_LIMIT,
CONF_KNX_STATE_UPDATER: CONF_KNX_DEFAULT_STATE_UPDATER,
CONF_KNX_MCAST_PORT: DEFAULT_MCAST_PORT,
CONF_KNX_MCAST_GRP: DEFAULT_MCAST_GRP,
CONF_KNX_INDIVIDUAL_ADDRESS: XKNX.DEFAULT_ADDRESS,
},
ConnectionConfig(
connection_type=ConnectionType.ROUTING,
local_ip="192.168.1.1",
threaded=True,
),
),
(
{
CONF_KNX_CONNECTION_TYPE: CONF_KNX_TUNNELING,
CONF_HOST: "192.168.0.2",
CONF_PORT: 3675,
CONF_KNX_ROUTE_BACK: False,
CONF_KNX_LOCAL_IP: "192.168.1.112",
CONF_KNX_RATE_LIMIT: CONF_KNX_DEFAULT_RATE_LIMIT,
CONF_KNX_STATE_UPDATER: CONF_KNX_DEFAULT_STATE_UPDATER,
CONF_KNX_MCAST_PORT: DEFAULT_MCAST_PORT,
CONF_KNX_MCAST_GRP: DEFAULT_MCAST_GRP,
CONF_KNX_INDIVIDUAL_ADDRESS: XKNX.DEFAULT_ADDRESS,
},
ConnectionConfig(
connection_type=ConnectionType.TUNNELING,
route_back=False,
gateway_ip="192.168.0.2",
gateway_port=3675,
local_ip="192.168.1.112",
auto_reconnect=True,
threaded=True,
),
),
],
)
async def test_init_connection_handling(
hass: HomeAssistant, knx: KNXTestKit, config_entry_data, connection_config
):
"""Test correctly generating connection config."""
config_entry = MockConfigEntry(
title="KNX",
domain=KNX_DOMAIN,
data=config_entry_data,
)
knx.mock_config_entry = config_entry
await knx.setup_integration({})
assert hass.data.get(KNX_DOMAIN) is not None
assert (
hass.data[KNX_DOMAIN].connection_config().__dict__ == connection_config.__dict__
)
| 31.888889
| 88
| 0.631823
|
314eb5150682e75213243cd890d64f7b63af63ef
| 31,734
|
py
|
Python
|
qtapps/skrf_qtwidgets/widgets.py
|
DavidLutton/scikit-rf
|
1e0dfb2c560058ae21ddf255f395a753b6ea696f
|
[
"BSD-3-Clause"
] | null | null | null |
qtapps/skrf_qtwidgets/widgets.py
|
DavidLutton/scikit-rf
|
1e0dfb2c560058ae21ddf255f395a753b6ea696f
|
[
"BSD-3-Clause"
] | null | null | null |
qtapps/skrf_qtwidgets/widgets.py
|
DavidLutton/scikit-rf
|
1e0dfb2c560058ae21ddf255f395a753b6ea696f
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import traceback
from qtpy import QtWidgets, QtCore, QtGui
import skrf
from . import numeric_inputs
from . import qt
from .analyzers import analyzers
def load_network_file(caption="load network file", filter="touchstone file (*.s*p)"):
fname = qt.getOpenFileName_Global(caption, filter)
if not fname:
return None
try:
ntwk = skrf.Network(fname)
except Exception as e:
qt.error_popup(e)
return None
return ntwk
def load_network_files(caption="load network file", filter="touchstone file (*.s*p)"):
fnames = qt.getOpenFileNames_Global(caption, filter)
if not fnames:
return None
ntwks = []
errors = []
for fname in fnames:
try:
ntwks.append(skrf.Network(fname))
except Exception:
etype, value, tb = sys.exc_info()
errors.append(fname + ": " + traceback.format_exception_only(etype, value))
if errors:
qt.error_popup(errors)
return ntwks
def save_multiple_networks(ntwk_list):
dirname = qt.getDirName_Global("select directory to save network files")
if not dirname:
return
remember = False
overwrite = False
for ntwk in ntwk_list:
if isinstance(ntwk, skrf.Network):
fname = os.path.join(dirname, ntwk.name) + ".s{:d}p".format(ntwk.s.shape[1])
if os.path.isfile(fname):
if not remember:
msg = "The file:\n" + fname + "\nalready exists.\n\nDo you want to overwrite the file?"
dialog = OverwriteFilesQuery(title="File Already Exists", msg=msg)
dialog.exec_()
if dialog.choice == "yes":
overwrite = True
elif dialog.choice == "yes to all":
overwrite = True
remember = True
elif dialog.choice == "no":
overwrite = False
elif dialog.choice == "cancel":
return
else:
raise ValueError("did not recognize dialog choice")
if not overwrite:
filter = "Touchstone file (*.s{:d}p)".format(ntwk.s.shape[1])
fname = qt.getSaveFileName_Global("save network file", filter)
ntwk.write_touchstone(fname)
class OverwriteFilesQuery(QtWidgets.QDialog):
def __init__(self, title="", msg="", parent=None):
super(OverwriteFilesQuery, self).__init__(parent)
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.textBrowser = QtWidgets.QTextBrowser(self)
self.verticalLayout.addWidget(self.textBrowser)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.buttonBox = QtWidgets.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(
QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.No | QtWidgets.QDialogButtonBox.Yes | QtWidgets.QDialogButtonBox.YesToAll)
self.choice = None
self.yes = self.buttonBox.button(QtWidgets.QDialogButtonBox.Yes)
self.yesToAll = self.buttonBox.button(QtWidgets.QDialogButtonBox.YesToAll)
self.no = self.buttonBox.button(QtWidgets.QDialogButtonBox.No)
self.cancel = self.buttonBox.button(QtWidgets.QDialogButtonBox.Cancel)
self.yes.clicked.connect(self.set_yes)
self.yesToAll.clicked.connect(self.set_yesToAll)
self.no.clicked.connect(self.set_no)
self.cancel.clicked.connect(self.set_cancel)
self.horizontalLayout.addWidget(self.buttonBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.setWindowTitle(title)
self.textBrowser.setText(msg)
def set_yes(self):
self.choice = "yes"
def set_yesToAll(self):
self.choice = "yes to all"
def set_no(self):
self.choice = "no"
def set_cancel(self):
self.choice = "cancel"
class NetworkParameterEditor(QtWidgets.QDialog):
def __init__(self, item, item_parameters, window_title="Edit Parameters", parent=None):
super(NetworkParameterEditor, self).__init__(parent)
self.setWindowTitle(window_title)
vlay = QtWidgets.QVBoxLayout(self)
form = QtWidgets.QFormLayout(None)
vlay.addLayout(form)
self.inputs = dict()
input = QtWidgets.QLineEdit()
input.setText(item.ntwk.name)
form.addRow("name", input)
self.inputs["name"] = input
for name, param in item_parameters.items():
value = item.parameters[name]
if param["combo_list"]:
input = QtWidgets.QComboBox()
input.addItems(param["combo_list"])
input.setCurrentIndex(input.findText(value))
row_name = name
elif param["units"] and param["type"] in ("int", "float"):
input = numeric_inputs.InputWithUnits(param["units"])
input.setText(str(value))
row_name = "{:} ({:})".format(name, param["units"])
else:
input = QtWidgets.QLineEdit()
input.setText(str(value))
row_name = name
self.inputs[name] = input
form.addRow(row_name, input)
ok = QtWidgets.QPushButton("Ok")
# ok.setAutoDefault(False)
cancel = QtWidgets.QPushButton("Cancel")
# cancel.setAutoDefault(False)
hlay = QtWidgets.QHBoxLayout()
hlay.addWidget(ok)
hlay.addWidget(cancel)
vlay.addLayout(hlay)
ok.clicked.connect(self.accept)
cancel.clicked.connect(self.reject)
def showEvent(self, event):
self.resize(self.width() * 1.25, self.height())
qr = self.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
class SwitchTermsDialog(QtWidgets.QDialog):
def __init__(self, analyzer=None, parent=None):
super(SwitchTermsDialog, self).__init__(parent)
self.setWindowTitle("Measure Switch Terms")
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.btn_measureSwitch = QtWidgets.QPushButton("Measure Switch Terms")
self.label_measureSwitch = QtWidgets.QLabel("Not Measured")
self.btn_loadForwardSwitch = QtWidgets.QPushButton("Load Forward Switch Terms")
self.label_loadForwardSwitch = QtWidgets.QLabel("Not Measured")
self.btn_loadReverseSwitch = QtWidgets.QPushButton("Load Reverse Switch Terms")
self.label_loadReverseSwitch = QtWidgets.QLabel("Not Measured")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.addWidget(self.btn_measureSwitch, 0, 0)
self.gridLayout.addWidget(self.label_measureSwitch, 0, 1)
self.gridLayout.addWidget(self.btn_loadForwardSwitch, 1, 0)
self.gridLayout.addWidget(self.label_loadForwardSwitch, 1, 1)
self.gridLayout.addWidget(self.btn_loadReverseSwitch, 2, 0)
self.gridLayout.addWidget(self.label_loadReverseSwitch, 2, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.buttonBox = QtWidgets.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.analyzer = analyzer
if self.analyzer is None:
self.btn_measureSwitch.setEnabled(False)
self.forward = None
self.reverse = None
self._ready = False
self.current_item = None
self.btn_measureSwitch.clicked.connect(self.measure_switch)
self.btn_loadForwardSwitch.clicked.connect(self.load_forward_switch)
self.btn_loadReverseSwitch.clicked.connect(self.load_reverse_switch)
self.ok = self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok) # type: QtWidgets.QPushButton
self.ok.setEnabled(False)
def measure_switch(self):
self.forward, self.reverse = self.analyzer.get_switch_terms(**self.analyzer.params_twoport)
self.evaluate()
def load_forward_switch(self):
self.forward = load_network_file("Load Forward Switch Terms", "Touchstone 1-port (*.s1p)")
if type(self.forward) is not skrf.Network:
self.forward = None
self.evaluate()
def load_reverse_switch(self):
self.reverse = load_network_file("Load Reverse Switch Terms", "Touchstone 1-port (*.s1p)")
if type(self.reverse) is not skrf.Network:
self.reverse = None
self.evaluate()
@property
def ready(self):
return self._ready
@ready.setter
def ready(self, val):
if val is True:
self._ready = True
self.ok.setEnabled(True)
else:
self._ready = False
self.ok.setEnabled(False)
def evaluate(self):
if type(self.forward) is skrf.Network:
self.label_loadForwardSwitch.setText("forward - measured")
else:
self.label_loadForwardSwitch.setText("forward - not measured")
if type(self.reverse) is skrf.Network:
self.label_loadReverseSwitch.setText("reverse - measured")
else:
self.label_loadReverseSwitch.setText("reverse - not measured")
if type(self.forward) is skrf.Network and type(self.reverse) is skrf.Network:
self.label_measureSwitch.setText("measured")
self.ready = True
class VnaControllerDialog(QtWidgets.QDialog):
"""
a convenience Dialog class that contains a VnaController Widget
"""
def __init__(self, vna, parent=None):
super(VnaControllerDialog, self).__init__(parent)
self.controller = VnaController(vna, self)
self.close = QtWidgets.QPushButton("Close")
self.close.clicked.connect(self.close)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.controller)
self.layout.addWidget(self.close)
class VnaController(QtWidgets.QWidget):
"""
control the parameters of the VNA, start with simple things like the frequency settings
"""
FUNITS = ["Hz", "kHz", "MHz", "GHz", "THz", "PHz"]
FCONVERSIONS = {"Hz": 1., "kHz": 1e-3, "MHz": 1e-6, "GHz": 1e-9, "THz": 1e-12, "PHz": 1e-15}
def __init__(self, vna, parent=None):
"""
set the instrument state for the given vna
Parameters
----------
vna : skrf.vi.vna.VNA
skrf vna virtual instrument driver object
parent: QtWidgets.QWidget
parent widget
"""
super(VnaController, self).__init__(parent)
self.vna = vna
self.verticalLayout = QtWidgets.QVBoxLayout(self)
# --- Frequency Controls --- #
default_funit = "GHz"
self.label_startFreq = QtWidgets.QLabel("Frequency:")
self.lineEdit_startFrequency = numeric_inputs.InputWithUnits(default_funit, 0.01)
self.lineEdit_startFrequency.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.label_stopFreq = QtWidgets.QLabel("to:")
self.lineEdit_stopFrequency = numeric_inputs.InputWithUnits(default_funit, 40)
self.lineEdit_stopFrequency.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.comboBox_funit = QtWidgets.QComboBox()
self.comboBox_funit.addItems(self.FUNITS)
self.comboBox_funit.setCurrentIndex(self.comboBox_funit.findText(default_funit))
self.label_numberOfPoints = QtWidgets.QLabel("Num Points:")
self.spinBox_numberOfPoints = QtWidgets.QSpinBox()
self.spinBox_numberOfPoints.setMinimum(1)
self.spinBox_numberOfPoints.setMaximum(100000)
self.spinBox_numberOfPoints.setSingleStep(100)
self.spinBox_numberOfPoints.setValue(401)
self.checkBox_logf = QtWidgets.QCheckBox("Log")
self.checkBox_logf.setChecked(False)
self.btn_setAnalyzerFreqSweep = QtWidgets.QPushButton("Set Freq. Sweep")
# --- adding Frequency Controls --- #
self.layout_row2.addWidget(self.label_startFreq)
self.layout_row2.addWidget(self.lineEdit_startFrequency)
self.layout_row2.addWidget(self.label_stopFreq)
self.layout_row2.addWidget(self.lineEdit_stopFrequency)
self.layout_row2.addWidget(self.comboBox_funit)
self.layout_row2.addWidget(qt.QVLine())
self.layout_row2.addWidget(self.label_numberOfPoints)
self.layout_row2.addWidget(self.spinBox_numberOfPoints)
self.layout_row2.addWidget(self.checkBox_logf)
self.layout_row2.addWidget(self.btn_setAnalyzerFreqSweep)
self._start_frequency = float(self.lineEdit_startFrequency.text())
self._stop_frequency = float(self.lineEdit_stopFrequency.text())
self.funit = self.comboBox_funit.currentText()
self.comboBox_funit.currentIndexChanged.connect(self.frequency_changed)
self.btn_setAnalyzerFreqSweep.clicked.connect(self.set_frequency_sweep)
def set_frequency_sweep(self):
channel = self.spinBox_channel.value()
f_unit = self.comboBox_funit.currentText()
f_start = self.start_frequency
f_stop = self.stop_frequency
f_npoints = self.spinBox_numberOfPoints.value()
self.vna.set_frequency_sweep(channel=channel, f_unit=f_unit, f_start=f_start, f_stop=f_stop,
f_npoints=f_npoints)
def set_start_freequency(self, value):
self._start_frequency = float(value)
self.lineEdit_startFrequency.setText("{:g}".format(self._start_frequency))
def get_start_frequency(self):
self._start_frequency = float(self.lineEdit_startFrequency.text())
return self._start_frequency
start_frequency = property(get_start_frequency, set_start_freequency)
def set_stop_freequency(self, value):
self._stop_frequency = float(value)
self.lineEdit_stopFrequency.setText("{:g}".format(self._stop_frequency))
def get_stop_frequency(self):
self._stop_frequency = float(self.lineEdit_stopFrequency.text())
return self._stop_frequency
stop_frequency = property(get_stop_frequency, set_stop_freequency)
def frequency_changed(self):
self.funit = self.comboBox_funit.currentText()
self.lineEdit_startFrequency.set_units(self.funit)
self.lineEdit_stopFrequency.set_units(self.funit)
class VnaSelector(QtWidgets.QWidget):
enableStateToggled = QtCore.Signal(bool)
def __init__(self, parent=None):
super(VnaSelector, self).__init__(parent)
# --- Setup UI Elements --- #
self.verticalLayout = QtWidgets.QVBoxLayout(self) # primary widget layout
self.verticalLayout.setContentsMargins(0, 0, 0, 0) # normally this will be embedded in another application
self.checkBox_SweepNew = QtWidgets.QCheckBox("Sweep New", self)
self.checkBox_SweepNew.setLayoutDirection(QtCore.Qt.RightToLeft)
self.checkBox_RawData = QtWidgets.QCheckBox("Raw Data", self)
self.checkBox_RawData.setLayoutDirection(QtCore.Qt.RightToLeft)
self.label_ports = QtWidgets.QLabel("ports 1,2:")
self.spinBox_port1 = QtWidgets.QSpinBox(self)
self.spinBox_port2 = QtWidgets.QSpinBox(self)
for port in (self.spinBox_port1, self.spinBox_port2):
port.setMinimum(1)
port.setMaximum(2)
self.spinBox_port1.setValue(1)
self.spinBox_port2.setValue(2)
self.label_analyzerList = QtWidgets.QLabel("Select Analyzer", self)
self.comboBox_analyzer = QtWidgets.QComboBox(self)
self.hlay_analyzerList = QtWidgets.QHBoxLayout()
self.hlay_analyzerList.addWidget(self.label_analyzerList)
self.hlay_analyzerList.addWidget(self.comboBox_analyzer)
self.label_visaString = QtWidgets.QLabel("Visa String", self)
self.lineEdit_visaString = QtWidgets.QLineEdit(self)
self.row1 = QtWidgets.QHBoxLayout()
self.row1.addLayout(self.hlay_analyzerList)
self.row1.addWidget(self.label_visaString)
self.row1.addWidget(self.lineEdit_visaString)
self.row1.insertStretch(-1)
self.label_channel = QtWidgets.QLabel("Channel:")
self.spinBox_channel = QtWidgets.QSpinBox()
self.spinBox_channel.setMinimum(1)
self.spinBox_channel.setMaximum(256)
self.btn_controlVna = QtWidgets.QPushButton("Set VNA State")
self.row2 = QtWidgets.QHBoxLayout()
self.row2.addWidget(self.label_channel)
self.row2.addWidget(self.spinBox_channel)
self.row2.addWidget(qt.QVLine())
self.row2.addWidget(self.checkBox_SweepNew)
self.row2.addWidget(qt.QVLine())
self.row2.addWidget(self.checkBox_RawData)
self.row2.addWidget(qt.QVLine())
self.row2.addWidget(self.label_ports)
self.row2.addWidget(self.spinBox_port1)
self.row2.addWidget(self.spinBox_port2)
self.row2.addWidget(qt.QVLine())
self.row2.addWidget(self.btn_controlVna)
self.row2.insertStretch(-1)
self.verticalLayout.addLayout(self.row1)
self.verticalLayout.addLayout(self.row2)
self.comboBox_analyzer.currentIndexChanged.connect(self.update_selected_analyzer)
for key, val in analyzers.items():
self.comboBox_analyzer.addItem(key)
# --- End Setup UI Elements --- #
self.btn_controlVna.clicked.connect(self.control_vna)
self.btn_controlVna.setEnabled(False)
def setEnabled(self, enabled):
super(VnaSelector, self).setEnabled(enabled)
self.enableStateToggled.emit(enabled)
def update_selected_analyzer(self):
cls = analyzers[self.comboBox_analyzer.currentText()]
self.lineEdit_visaString.setText(cls.DEFAULT_VISA_ADDRESS)
self.spinBox_port2.setMaximum(cls.NPORTS)
self.spinBox_channel.setMaximum(cls.NCHANNELS)
def get_analyzer(self):
nwa = analyzers[self.comboBox_analyzer.currentText()](self.lineEdit_visaString.text())
nwa.set_measurement_parameters(
port1=self.port1, port2=self.port2, sweep=self.sweep_new,
channel=self.channel, raw_data=self.raw_data)
return nwa
@property
def port1(self):
return self.spinBox_port1.value()
@port1.setter
def port1(self, val):
self.spinBox_port1.setValue(val)
@property
def port2(self):
return self.spinBox_port2.value()
@port2.setter
def port2(self, val):
self.spinBox_port2.setValue(val)
@property
def sweep_new(self):
return self.checkBox_SweepNew.isChecked()
@sweep_new.setter
def sweep_new(self, val):
self.checkBox_SweepNew.setChecked(val)
@property
def raw_data(self):
return self.checkBox_RawData.isChecked()
@raw_data.setter
def raw_data(self, val):
self.checkBox_RawData.setChecked(val)
@property
def channel(self):
return self.spinBox_channel.value()
@channel.setter
def channel(self, val):
self.spinBox_channel.setValue(val)
def control_vna(self):
qt.warnMissingFeature()
# with self.get_analyzer() as vna:
# VnaControllerDialog(vna).exec_()
class ReflectDialog(QtWidgets.QDialog):
def __init__(self, analyzer=None, parent=None, **kwargs):
super(ReflectDialog, self).__init__(parent)
self.setWindowTitle("Measure Reflect Standards")
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.gridLayout = QtWidgets.QGridLayout()
self.label_port2 = QtWidgets.QLabel(self)
self.gridLayout.addWidget(self.label_port2, 1, 2, 1, 1)
self.btn_loadPort1 = QtWidgets.QPushButton(self)
self.gridLayout.addWidget(self.btn_loadPort1, 0, 1, 1, 1)
self.btn_loadPort2 = QtWidgets.QPushButton(self)
self.gridLayout.addWidget(self.btn_loadPort2, 1, 1, 1, 1)
self.label_port1 = QtWidgets.QLabel(self)
self.gridLayout.addWidget(self.label_port1, 0, 2, 1, 1)
self.btn_measurePort2 = QtWidgets.QPushButton(self)
self.gridLayout.addWidget(self.btn_measurePort2, 1, 0, 1, 1)
self.btn_measurePort1 = QtWidgets.QPushButton(self)
self.gridLayout.addWidget(self.btn_measurePort1, 0, 0, 1, 1)
self.btn_measureBoth = QtWidgets.QPushButton(self)
self.gridLayout.addWidget(self.btn_measureBoth, 2, 0, 1, 1)
self.btn_loadBoth = QtWidgets.QPushButton(self)
self.gridLayout.addWidget(self.btn_loadBoth, 2, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.buttonBox = QtWidgets.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.label_port2.setText("port2 - not ready")
self.btn_loadPort1.setText("Load Port 1 (.s1p)")
self.btn_loadPort2.setText("Load Port (.s1p)")
self.label_port1.setText("port1 - not ready")
self.btn_measurePort2.setText("Measure Port2")
self.btn_measurePort1.setText("Measure Port1")
self.btn_measureBoth.setText("Measure Both")
self.btn_loadBoth.setText("Load Both (.s2p)")
self._ready = False
self.analyzer = analyzer
if self.analyzer is None:
for btn in (self.btn_measureBoth, self.btn_measurePort1, self.btn_measurePort2):
btn.setEnabled(False)
self.reflect_2port = None
self.s11 = None
self.s22 = None
self.ok = self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok) # type: QtWidgets.QPushButton
self.ok.setEnabled(False)
self.btn_measureBoth.clicked.connect(self.measure_both)
self.btn_measurePort1.clicked.connect(self.measure_s11)
self.btn_measurePort2.clicked.connect(self.measure_s22)
self.btn_loadBoth.clicked.connect(self.load_both)
self.btn_loadPort1.clicked.connect(self.load_s11)
self.btn_loadPort2.clicked.connect(self.load_s22)
def measure_s11(self):
self.s11 = self.analyzer.get_oneport(port=self.port1)
self.evaluate()
def measure_s22(self):
self.s22 = self.analyzer.get_oneport(port=self.port2)
self.evaluate()
def measure_both(self):
self.reflect_2port = self.analyzer.get_twoport(**self.analyzer.params_twoport)
self.evaluate()
def load_s11(self):
self.s11 = load_network_file("load port 1 reflect", "1-port touchstone (*.s1p)")
self.evaluate()
def load_s22(self):
self.s22 = load_network_file("load port 2 reflect", "1-port touchstone (*.s1p)")
self.evaluate()
def load_both(self):
self.reflect_2port = load_network_file("load reflect cal standard")
self.evaluate()
@property
def ready(self):
return self._ready
@ready.setter
def ready(self, val):
if val is True:
self._ready = True
self.ok.setEnabled(True)
else:
self._ready = False
self.ok.setEnabled(False)
def evaluate(self):
if type(self.reflect_2port) is skrf.Network:
self.ready = True
self.label_port1.setText("port1 - measured")
self.label_port2.setText("port2 - measured")
else:
if type(self.s11) is skrf.Network and type(self.s22) is skrf.Network:
self.reflect_2port = skrf.two_port_reflect(self.s11, self.s22)
# self.reflect_2port = skrf.four_oneports_2_twoport(self.s11, self.s11, self.s22, self.s22)
# self.reflect_2port.s[:, 0, 1] = 0
# self.reflect_2port.s[:, 1, 0] = 0
self.ready = True
self.label_port1.setText("port1 - measured")
self.label_port2.setText("port2 - measured")
else:
self.ready = False
if type(self.s11) is skrf.Network:
self.label_port1.setText("port1 - measured")
else:
self.label_port1.setText("port1 - not measured")
if type(self.s22) is skrf.Network:
self.label_port2.setText("port2 - measured")
else:
self.label_port2.setText("port2 - not measured")
class MeasurementDialog(QtWidgets.QDialog):
measurements_available = QtCore.Signal(object)
def __init__(self, nwa, parent=None):
super(MeasurementDialog, self).__init__(parent)
self.setWindowTitle("MeasurementDialog")
self.horizontalLayout_main = QtWidgets.QHBoxLayout(self)
self.verticalLayout_left = QtWidgets.QVBoxLayout()
self.groupBox_options = QtWidgets.QGroupBox("Options", self)
self.lineEdit_namePrefix = QtWidgets.QLineEdit(self)
self.label_namePrefix = QtWidgets.QLabel("Name Prefix:")
self.horizontalLayout_namePrefix = QtWidgets.QHBoxLayout()
self.horizontalLayout_namePrefix.addWidget(self.label_namePrefix)
self.horizontalLayout_namePrefix.addWidget(self.lineEdit_namePrefix)
self.label_timeout = QtWidgets.QLabel("Timeout (ms)", self)
self.spinBox_timeout = QtWidgets.QSpinBox(self)
self.spinBox_timeout.setMinimum(100)
self.spinBox_timeout.setMaximum(600000)
try:
self.spinBox_timeout.setValue(nwa.resource.timeout)
except:
self.spinBox_timeout.setValue(3000)
self.spinBox_timeout.setSingleStep(1000)
self.horizontalLayout_timeout = QtWidgets.QHBoxLayout()
self.horizontalLayout_timeout.addWidget(self.label_timeout)
self.horizontalLayout_timeout.addWidget(self.spinBox_timeout)
self.checkBox_sweepNew = QtWidgets.QCheckBox("Sweep New", self.groupBox_options)
self.checkBox_autoTimeOut = QtWidgets.QCheckBox("Auto Timeout", self.groupBox_options)
self.horizonatlLayout_sweep = QtWidgets.QHBoxLayout()
self.horizonatlLayout_sweep.addWidget(self.checkBox_sweepNew)
self.horizonatlLayout_sweep.addWidget(self.checkBox_autoTimeOut)
self.label_channel = QtWidgets.QLabel("Channel", self.groupBox_options)
self.spinBox_channel = QtWidgets.QSpinBox(self.groupBox_options)
self.horizontalLayout_channel = QtWidgets.QHBoxLayout()
self.horizontalLayout_channel.addWidget(self.label_channel)
self.horizontalLayout_channel.addWidget(self.spinBox_channel)
self.verticalLayout_options = QtWidgets.QVBoxLayout(self.groupBox_options)
self.verticalLayout_options.addLayout(self.horizontalLayout_namePrefix)
self.verticalLayout_options.addLayout(self.horizontalLayout_timeout)
self.verticalLayout_options.addLayout(self.horizonatlLayout_sweep)
self.verticalLayout_options.addLayout(self.horizontalLayout_channel)
self.verticalLayout_left.addWidget(self.groupBox_options)
self.groupBox_snp = QtWidgets.QGroupBox("Get N-Port Network", self)
self.verticalLayout_snp = QtWidgets.QVBoxLayout(self.groupBox_snp)
self.label_ports = QtWidgets.QLabel("Ports:", self.groupBox_snp)
self.lineEdit_ports = QtWidgets.QLineEdit(self.groupBox_snp)
self.btn_measureSnp = QtWidgets.QPushButton("Measure Network", self.groupBox_snp)
self.horizontalLayout_nports = QtWidgets.QHBoxLayout()
self.horizontalLayout_nports.addWidget(self.label_ports)
self.horizontalLayout_nports.addWidget(self.lineEdit_ports)
self.verticalLayout_snp.addWidget(self.btn_measureSnp)
self.verticalLayout_snp.addLayout(self.horizontalLayout_nports)
self.spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_snp.addItem(self.spacerItem)
self.verticalLayout_left.addWidget(self.groupBox_snp)
self.groupBox_traces = QtWidgets.QGroupBox("Available Traces", self)
self.listWidget_traces = QtWidgets.QListWidget(self.groupBox_traces)
self.listWidget_traces.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.btn_updateTraces = QtWidgets.QPushButton("Update", self.groupBox_traces)
self.btn_measureTraces = QtWidgets.QPushButton("Measure Traces", self.groupBox_traces)
self.horizontalLayout_tracesButtons = QtWidgets.QHBoxLayout()
self.horizontalLayout_tracesButtons.addWidget(self.btn_updateTraces)
self.horizontalLayout_tracesButtons.addWidget(self.btn_measureTraces)
self.verticalLayout_traces = QtWidgets.QVBoxLayout(self.groupBox_traces)
self.verticalLayout_traces.addWidget(self.listWidget_traces)
self.verticalLayout_traces.addLayout(self.horizontalLayout_tracesButtons)
self.horizontalLayout_main.addLayout(self.verticalLayout_left)
self.horizontalLayout_main.addWidget(self.groupBox_traces)
self.nwa = nwa
self.btn_updateTraces.clicked.connect(self.update_traces)
self.btn_measureSnp.clicked.connect(self.measure_snp)
self.btn_measureTraces.clicked.connect(self.measure_traces)
if self.nwa.NCHANNELS:
self.spinBox_channel.setValue(1)
self.spinBox_channel.setMinimum(1)
self.spinBox_channel.setMaximum(self.nwa.NCHANNELS)
else:
self.spinBox_channel.setEnabled(False)
self.lineEdit_ports.setText(",".join([str(port + 1) for port in range(self.nwa.nports)]))
self.spinBox_timeout.valueChanged.connect(self.set_timeout)
def set_timeout(self):
self.nwa.resource.timeout = self.spinBox_timeout.value()
def measure_traces(self):
items = self.listWidget_traces.selectedItems()
if len(items) < 1:
print("nothing to measure")
return
traces = []
for item in items:
traces.append(item.trace)
ntwks = self.nwa.get_traces(traces, name_prefix=self.lineEdit_namePrefix.text())
self.measurements_available.emit(ntwks)
def measure_snp(self):
ports = self.lineEdit_ports.text().replace(" ", "").split(",")
try:
ports = [int(port) for port in ports]
except Exception:
qt.error_popup("Ports must be a comma separated list of integers")
return
kwargs = {"ports": ports,
"channel": self.spinBox_channel.value(),
"sweep": self.checkBox_sweepNew.isChecked(),
"name": self.lineEdit_namePrefix.text()}
if self.checkBox_autoTimeOut.isChecked():
kwargs["timeout"] = self.spinBox_timeout.value()
ntwk = self.nwa.get_snp_network(**kwargs)
self.measurements_available.emit(ntwk)
def update_traces(self):
traces = self.nwa.get_list_of_traces()
self.listWidget_traces.clear()
for trace in traces:
item = QtWidgets.QListWidgetItem()
item.setText(trace["label"])
item.trace = trace
self.listWidget_traces.addItem(item)
| 40.528736
| 149
| 0.673757
|
349ba3465e9e6a1999cf5255a95b83cf9ee09c98
| 8,893
|
py
|
Python
|
examples/Nolan/AFRL/Carts/cart76.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-03-26T03:00:03.000Z
|
2019-03-26T03:00:03.000Z
|
examples/Nolan/AFRL/Carts/cart76.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | null | null | null |
examples/Nolan/AFRL/Carts/cart76.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-07-14T22:53:52.000Z
|
2019-07-14T22:53:52.000Z
|
if __name__ == "__main__":
import numpy as np
import beluga.Beluga as Beluga
import beluga.bvpsol as bvpsol
import beluga.bvpsol.algorithms as algorithms
import beluga.optim.Problem
from beluga.optim.problem import *
from beluga.continuation import *
import logging
# Import Libraries for Matrix Calculations
from sympy import symbols, Matrix, Transpose, simplify, diff, diag
from sympy import sin
from sympy import cos, acos
from sympy import sqrt
from sympy import exp
from sympy import atan
from numpy import pi
writeEqn = True
simpList = False
if writeEqn:
writeList = []
# Constants
v, u_max = symbols('v, u_max')
xb, yb = symbols('xb, yb')
Dt, sigv, sigw, sigr = symbols('Dt, sigv, sigw, sigr')
# Primary States
x, y, theta = symbols('x, y, theta')
# Control
w = symbols('w')
# Secondary States
# Primary State Rates
x_dot = v * cos(theta)
y_dot = v * sin(theta)
theta_dot = u_max * sin(w)
writeList = [x_dot, y_dot, theta_dot]
# Covariance Calculations
p11, p12, p13,\
p22, p23, \
p33 \
= symbols('p11 p12 p13\
p22 p23 \
p33')
P = Matrix([[p11, p12, p13],
[p12, p22, p23],
[p13, p13, p33]])
F = Matrix([[diff(x_dot, x), diff(x_dot, y), diff(x_dot, theta)],
[diff(y_dot, x), diff(y_dot, y), diff(y_dot, theta)],
[diff(theta_dot, x), diff(theta_dot, y), diff(theta_dot, theta)],])
G = Matrix([[cos(theta), 0],
[sin(theta), 0],
[0, 1]])
h = sqrt((x - xb)**2 + (y - yb)**2)
H = Matrix([[diff(h, x), diff(h, y), diff(h, theta)]])
Q = Dt*diag(sigv**2, sigw**2)
R = Dt*diag(sigr**2)
P_dot = (F*P + P*F.T - P*H.T*(R**-1)*H*P + G*Q*G.T)
Dim = P_dot.shape
PP = F * P + P * F.T
obj = PP[1, 1]
for i in range(0, Dim[0]):
for j in range(i, Dim[1]):
# print(P_dot[i, j])
writeList.append(P_dot[i, j])
# h_new, theta_new, v_new, gam_new = symbols('h_new, theta_new, v_new, gam_new')
# h_scale, theta_scale, v_scale, gam_scale = symbols('h_scale, theta_scale, v_scale, gam_scale')
states = [x, y, theta,
p11, p12, p13,
p22, p23,
p33]
x_s, y_s, theta_s, \
p11_s, p12_s, p13_s, \
p22_s, p23_s, \
p33_s = \
symbols('x_s, y_s, theta_s, \
p11_s, p12_s, p13_s, \
p22_s, p23_s, \
p33_s')
scales = [x_s, y_s, theta_s,
p11_s, p12_s, p13_s,
p22_s, p23_s,
p33_s]
x_n, y_n, theta_n, \
p11_n, p12_n, p13_n, \
p22_n, p23_n, \
p33_n = \
symbols('x_n, y_n, theta_n, \
p11_n, p12_n, p13_n, \
p22_n, p23_n, \
p33_n')
states_new = [x_n, y_n, theta_n,
p11_n, p12_n, p13_n,
p22_n, p23_n,
p33_n]
# print(writeList)
Z1 = zip(writeList, scales)
scaledList = []
for item, Scale in Z1:
# print(item)
item = item/Scale
Z2 = zip(states, states_new, scales)
# print(item)
# for state, new, scale in Z2:
# print(state)
# print(new)
# print(scale)
for state, new, scale in Z2:
# print(new)
item = item.subs(state, scale*new)
# print(item)
scaledList.append(item)
Z2 = zip(states, states_new, scales)
for state, new, scale in Z2:
# print(new)
obj = obj.subs(state, scale * new)
k = 1
with open("eqns.txt", "w") as my_file:
for item in scaledList:
if simpList:
# print('* ' + str(item))
item = simplify(item)
# print('# ' + str(item))
my_file.write(str(item) + "\n")
# print(" Wrote " + str(k) + "/" + str(len(scaledList)))
k += 1
k = 1
with open("eqnsUnscaled.txt", "w") as my_file:
for item in writeList:
my_file.write(str(item) + "\n")
# print(" Wrote " + str(k) + "/" + str(len(writeList)))
k += 1
''' Start Optimal Control Calculations '''
# Read Covariance State Rates from File
with open("eqns.txt", "r") as f:
eqnsList = list(f)
# for item in P_dot_eqns:
# print(item)
# Rename this and/or move to optim package?
problem = beluga.optim.Problem('carts0')
# Define independent variables
problem.independent('t', 's')
# Define equations of motion
problem\
.state('x_n', eqnsList[0] + '+ ep*u_max*cos(w)', '1') \
.state('y_n', eqnsList[1], '1') \
.state('theta_n', eqnsList[2], '1') \
.state('p11_n', eqnsList[3], '1') \
.state('p12_n', eqnsList[4], '1') \
.state('p13_n', eqnsList[5], '1') \
.state('p22_n', eqnsList[6], '1') \
.state('p23_n', eqnsList[7], '1') \
.state('p33_n', eqnsList[8], '1') \
# Define controls
problem.control('w', '1') \
# Define costs
# problem.cost['path'] = Expression('p11', 'm^2/s^2')
# problem.cost['path'] = Expression('sin(w)**2', 's')
# problem.cost['terminal'] = Expression('p22_n', '1')
problem.cost['path'] = Expression(str(obj), 's')
# Define constraints
problem.constraints() \
.initial('x_n-x_n_0', '1') \
.initial('y_n-y_n_0', '1') \
.initial('theta_n-theta_n_0', '1') \
\
.initial('p11_n-p11_n_0', '1') \
.initial('p12_n-p12_n_0', '1') \
.initial('p13_n-p13_n_0', '1') \
.initial('p22_n-p22_n_0', '1') \
.initial('p23_n-p23_n_0', '1') \
.initial('p33_n-p33_n_0', '1') \
\
.terminal('x_n-x_n_f', '1') \
.terminal('y_n-y_n_f', '1') \
\
# Define constants
problem.constant('Dt', 0.1, '1')
problem.constant('sigv', 0.1, '1')
problem.constant('sigw', 0.1, '1')
problem.constant('sigr', 0.1, '1')
problem.constant('xb', 5, '1')
problem.constant('yb', 5, '1')
problem.constant('u_max', 0.1, '1')
problem.constant('v', 30, '1')
problem.constant('x_s', 1, '1')
problem.constant('y_s', 1, '1')
problem.constant('theta_s', 1, '1')
problem.constant('p11_s', 1e-3, '1')
problem.constant('p12_s', 1e-3, '1')
problem.constant('p13_s', 1e-3, '1')
problem.constant('p22_s', 1e-1, '1')
problem.constant('p23_s', 1e-2, '1')
problem.constant('p33_s', 1e-3, '1')
problem.constant('ep', 5, '1')
problem.bvp_solver = algorithms.MultipleShooting(derivative_method='fd', tolerance=1e-4, max_iterations=1000, verbose=True, cached=False, number_arcs=16)
# problem.bvp_solver = algorithms.SingleShooting(derivative_method='fd',tolerance=1e-4, max_iterations=1000, verbose=True, cached=False)
problem.scale.unit('m', 1) \
.unit('s', 1) \
.unit('kg', 1) \
.unit('rad', 1)
# Define quantity (not implemented at present)
# Is this actually an Expression rather than a Value?
# problem.quantity = [Value('tanAng','tan(theta)')]
problem.guess.setup('auto', start=[0, 0, 0, 0, 0, 0, 0, 0, 0], time_integrate=1, costate_guess=[0, 0, 0.001, -0.0001, 0.0, 0.0, 0.001, 0.0, 0.])
# problem.guess.setup('auto',start=[80000,3.38575809e-21,5000,7.98617365e-02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],direction='forward',time_integrate=229.865209,costate_guess =[-1.37514494e+01,3.80852584e+06,-3.26290152e+03,-2.31984720e-14,0.00,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01])
# Figure out nicer way of representing this. Done?
problem.steps.add_step().num_cases(5) \
.terminal('x_n', 20) \
.terminal('y_n', 0) \
problem.steps.add_step().num_cases(40) \
.terminal('x_n', 250) \
# problem.steps.add_step().num_cases(10) \
# .const('xb', 7) \
# .const('yb', 7) \
# \
# \
# problem.steps.add_step().num_cases(20) \
# .terminal('x_n', 150) \
# .terminal('y_n', 0) \
# problem.steps.add_step().num_cases(15) \
# .terminal('theta', 5)
# problem.steps.add_step().num_cases(21) \
# .terminal('theta', 10*pi/180)
Beluga.run(problem, display_level=logging.DEBUG)
| 30.248299
| 288
| 0.505116
|
5060f3f800088af76811aca7b4b7329105e95ce4
| 1,767
|
py
|
Python
|
quality_manager/measurements/test_measurement.py
|
MOOCworkbench/MOOCworkbench
|
c478dd4f185c50e0a48319e2b30d418533c32a34
|
[
"MIT"
] | null | null | null |
quality_manager/measurements/test_measurement.py
|
MOOCworkbench/MOOCworkbench
|
c478dd4f185c50e0a48319e2b30d418533c32a34
|
[
"MIT"
] | 1
|
2017-07-09T17:38:21.000Z
|
2017-07-09T17:38:22.000Z
|
quality_manager/measurements/test_measurement.py
|
MOOCworkbench/MOOCworkbench
|
c478dd4f185c50e0a48319e2b30d418533c32a34
|
[
"MIT"
] | null | null | null |
from coverage_manager.helpers.coveralls_helper import CoverallsHelper
from git_manager.helpers.github_helper import GitHubHelper
from quality_manager.measurements.measurement import MeasurementAbstraction
from quality_manager.models import (ExperimentMeasure, ExperimentMeasureResult,
RawMeasureResult)
class TestMeasurement(MeasurementAbstraction):
def __init__(self, experiment_step):
super().__init__(experiment_step)
self.measurement = ExperimentMeasure.objects.get(name='Testing')
self.github_helper = GitHubHelper(self.experiment.owner, self.experiment.git_repo.name)
self.raw = RawMeasureResult()
def measure(self):
code_coverage = self.measure_code_coverage()
if code_coverage:
code_coverage = float(code_coverage)
self.raw.key = 'code_coverage'
self.raw.value = code_coverage
if code_coverage < 50:
self.result.result = ExperimentMeasureResult.LOW
elif code_coverage < 80:
self.result.result = ExperimentMeasureResult.MEDIUM
elif code_coverage >= 80:
self.result.result = ExperimentMeasureResult.HIGH
else:
self.result.result = ExperimentMeasureResult.LOW
def measure_code_coverage(self):
coveralls = CoverallsHelper(user=self.github_helper.owner, repo_name=self.github_helper.repo_name)
return coveralls.code_coverage_data()
def save_and_get_result(self):
self.result.measurement = self.measurement
self.result.save()
if self.raw.key:
self.raw.save()
self.result.raw_values.add(self.raw)
self.result.save()
return self.result
| 40.159091
| 106
| 0.682513
|
a8e89b3e5ea2d6af21bd75d221ed07b8dd13f4ae
| 5,575
|
py
|
Python
|
check_in/github_checks_requests.py
|
JayBatra/check-in
|
4d11a0a70e62a44318a562de03fcf5c4f868e988
|
[
"MIT"
] | null | null | null |
check_in/github_checks_requests.py
|
JayBatra/check-in
|
4d11a0a70e62a44318a562de03fcf5c4f868e988
|
[
"MIT"
] | null | null | null |
check_in/github_checks_requests.py
|
JayBatra/check-in
|
4d11a0a70e62a44318a562de03fcf5c4f868e988
|
[
"MIT"
] | null | null | null |
from functools import partial
from typing import List, Optional
import attr
__all__ = 'NewCheckRequest', 'UpdateCheckRequest', 'to_gh_query'
str_attrib = partial(attr.ib, converter=lambda s: str(s) if s is not None else '')
int_attrib = partial(attr.ib, converter=int)
optional_attrib = partial(
attr.ib,
default=None,
)
optional_str_attrib = partial(
optional_attrib,
validator=attr.validators.optional(lambda *_: str(_[-1])),
)
optional_list_attrib = partial(
attr.ib,
default=[],
validator=attr.validators.optional(lambda *_: list(_[-1])),
)
def optional_converter(kwargs_dict, convert_to_cls):
if kwargs_dict is not None and not isinstance(kwargs_dict, convert_to_cls):
return convert_to_cls(**kwargs_dict)
return kwargs_dict
def optional_list_converter(args_list, convert_to_cls):
if args_list is not None and isinstance(args_list, list):
return [optional_converter(kwargs_dict, convert_to_cls) for kwargs_dict in args_list] or None
return args_list
@attr.dataclass
class CheckAnnotation:
filename: str = str_attrib()
blob_href: str = str_attrib()
start_line: int = int_attrib()
end_line: int = int_attrib()
warning_level: str = str_attrib(
validator=attr.validators.in_(
(
'notice',
'warning',
'failure',
)
)
)
message: str = str_attrib()
title: Optional[str] = optional_str_attrib()
raw_details: Optional[str] = optional_str_attrib()
@attr.dataclass
class CheckImage:
alt: str = str_attrib()
image_url: str = str_attrib()
caption: Optional[str] = optional_str_attrib()
@attr.dataclass
class CheckActions:
label: str = str_attrib()
description: str = str_attrib()
identifier: str = str_attrib()
@label.validator
def label_up_to_20(self, attribute, value):
if len(value) > 20:
raise ValueError(f'`{attribute.name}` must not exceed 20 characters.')
@description.validator
def description_up_to_40(self, attribute, value):
if len(value) > 40:
raise ValueError(f'`{attribute.name}` must not exceed 40 characters.')
@identifier.validator
def identifier_up_to_20(self, attribute, value):
if len(value) > 20:
raise ValueError(f'`{attribute.name}` must not exceed 20 characters.')
@attr.dataclass
class CheckOutput:
title: str = str_attrib()
summary: str = str_attrib()
text: str = str_attrib(default="")
annotations: List[CheckAnnotation] = optional_list_attrib(converter=partial(optional_list_converter, convert_to_cls=CheckAnnotation))
images: List[CheckImage] = optional_list_attrib(converter=partial(optional_list_converter, convert_to_cls=CheckImage))
@attr.dataclass
class BaseCheckRequestMixin:
name: str = str_attrib()
details_url: Optional[str] = optional_str_attrib()
external_id: Optional[str] = optional_str_attrib()
status: Optional[str] = attr.ib(
default='queued',
validator=attr.validators.optional(
attr.validators.in_(
(
'queued',
'in_progress',
'completed',
)
)
)
)
started_at: Optional[str] = optional_str_attrib() # '2018-05-27T14:30:33Z', datetime.isoformat()
conclusion: Optional[str] = attr.ib(
# [required] if 'status' is set to 'completed', should be missing if it's unset
default=None,
validator=attr.validators.optional(
attr.validators.in_(
(
'success',
'failure',
'neutral',
'cancelled',
'timed_out',
'action_required',
)
)
),
)
completed_at: Optional[str] = optional_str_attrib() # [required] if 'conclusion' is set # '2018-05-27T14:30:33Z',
output: Optional[CheckOutput] = optional_attrib(converter=partial(optional_converter, convert_to_cls=CheckOutput))
actions: List[CheckActions] = optional_list_attrib(converter=partial(optional_list_converter, convert_to_cls=CheckActions))
@conclusion.validator
def depends_on_status(self, attribute, value):
if self.status == 'completed' and not value:
raise ValueError(f'`{attribute.name}` must be provided if status is completed')
@completed_at.validator
def depends_on_conclusion(self, attribute, value):
if self.conclusion is not None and not value:
raise ValueError(f'`{attribute.name}` must be provided if conclusion is present')
@actions.validator
def actions_up_to_3(self, attribute, value):
if value is not None and len(value) > 3:
raise ValueError(f'`{attribute.name}` must not exceed 3 items.')
@attr.dataclass
class NewCheckRequestMixin:
head_branch: str = str_attrib()
head_sha: str = str_attrib()
@attr.dataclass
class NewCheckRequest(NewCheckRequestMixin, BaseCheckRequestMixin):
pass
@attr.dataclass
class UpdateCheckRequest(BaseCheckRequestMixin):
pass
def conditional_to_gh_query(req):
if hasattr(req, '__attrs_attrs__'):
return to_gh_query(req)
if isinstance(req, list):
return list(map(conditional_to_gh_query, req))
return req
def to_gh_query(req):
return {
k: conditional_to_gh_query(v) # recursive if dataclass or list
for k, v in attr.asdict(req).items()
if v is not None
}
| 30.464481
| 137
| 0.655426
|
2c6cfd8766d897bcf19758203b8a815d035e32dd
| 75,306
|
py
|
Python
|
xnu-4903.241.1/tools/lldbmacros/net.py
|
DogeCoding/iOSCompiledRuntime
|
81bada7472a3470d3424f2d0bbcc414d755c3620
|
[
"MIT"
] | 672
|
2019-10-09T11:15:13.000Z
|
2021-09-21T10:02:33.000Z
|
xnu-4903.221.2/tools/lldbmacros/net.py
|
KaiserFeng/KFAppleOpenSource
|
7ea6ab19f1492a2da262d3554f90882393f975a4
|
[
"MIT"
] | 19
|
2019-11-05T03:32:31.000Z
|
2021-07-15T11:16:25.000Z
|
xnu-4903.221.2/tools/lldbmacros/net.py
|
KaiserFeng/KFAppleOpenSource
|
7ea6ab19f1492a2da262d3554f90882393f975a4
|
[
"MIT"
] | 216
|
2019-10-10T01:47:36.000Z
|
2021-09-23T07:56:54.000Z
|
""" Please make sure you read the README COMPLETELY BEFORE reading anything below.
It is very critical that you read coding guidelines in Section E in README file.
"""
from xnu import *
from utils import *
from string import *
from socket import *
import xnudefines
from netdefines import *
from routedefines import *
def GetIfFlagsAsString(if_flags):
""" Return a formatted string description of the interface flags
"""
out_string = ""
flags = (unsigned)(if_flags & 0xffff)
i = 0
num = 1
while num <= flags:
if flags & num:
out_string += if_flags_strings[i] + ","
i += 1
num = num << 1
return rstrip(out_string, ",")
def ShowIfConfiguration(ifnet):
""" Display ifconfig-like output for the ifnet
"""
iface = Cast(ifnet, 'ifnet *')
dlifnet = Cast(ifnet, 'dlil_ifnet *')
out_string = ""
format_string = "{0: <s}: flags={1: <x} <{2: <s}> index {3: <d} mtu {4: <d}"
if iface :
out_string += format_string.format(iface.if_xname, (iface.if_flags & 0xffff), GetIfFlagsAsString(iface.if_flags), iface.if_index, iface.if_data.ifi_mtu)
out_string += "\n\t(struct ifnet *)" + hex(ifnet)
if iface.if_snd.ifcq_len :
out_string += "\n\t" + str(iface.if_snd.ifcq_len)
if dlifnet.dl_if_inpstorage.rcvq_pkts.qlen :
out_string += "\n\t" + str(dlifnet.dl_if_inpstorage.rcvq_pkts.qlen)
print out_string
def GetIfConfiguration(ifname):
""" Return ifnet structure corresponding to the ifname passed in
"""
global kern
ifnets = kern.globals.ifnet_head
for ifnet in IterateTAILQ_HEAD(ifnets, "if_link") :
if str(ifnet.if_xname) == ifname :
return ifnet
return None
# Macro: ifconfig
@lldb_command('ifconfig')
def ShowIfconfig(cmd_args=None) :
""" Display ifconfig-like output, and print the (struct ifnet *) pointers for further inspection
"""
if cmd_args != None and len(cmd_args) > 0:
showall = 1
else:
showall = 0
ifnets = kern.globals.ifnet_head
for ifnet in IterateTAILQ_HEAD(ifnets, "if_link"):
ShowIfConfiguration(ifnet)
if (showall == 1):
print GetIfaddrs(ifnet)
# EndMacro: ifconfig
def GetAddressAsStringColonHex(addr, count):
out_string = ""
i = 0
addr_format_string = "{0:02x}"
while (i < count):
if (i == 0):
out_string += addr_format_string.format(addr[i])[-2:]
else:
out_string += ":" + addr_format_string.format(addr[i])[-2:]
i += 1
return out_string
def GetSocketAddrAsStringUnspec(sockaddr):
out_string = ""
out_string += GetAddressAsStringColonHex(sockaddr.sa_data, sockaddr.sa_len - 2)
return out_string
def GetSocketAddrAsStringUnix(sockaddr):
sock_unix = Cast(sockaddr, 'sockaddr_un *')
if (sock_unix == 0):
return "(null)"
else:
if (len(str(sock_unix.sun_path)) > 0):
return str(sock_unix.sun_path)
else:
return "\"\""
def GetInAddrAsString(ia):
out_string = ""
inaddr = Cast(ia, 'in_addr *')
packed_value = struct.pack('I', unsigned(ia.s_addr))
out_string = inet_ntoa(packed_value)
return out_string
def GetIn6AddrAsString(ia):
out_string = ""
addr = ia
addr_format_string = "{0:02x}:{1:02x}:{2:02x}:{3:02x}{4:02x}:{5:02x}:{6:02x}:{7:02x}{8:02x}:{9:02x}:{10:02x}:{11:02x}{12:02x}:{13:02x}:{14:02x}:{15:02x}"
out_string += addr_format_string.format(unsigned(addr[0]), unsigned(addr[1]), unsigned(addr[2]), unsigned(addr[3]), unsigned(addr[4]), unsigned(addr[5]), unsigned(addr[6]), unsigned(addr[7]), unsigned(addr[8]), unsigned(addr[9]), unsigned(addr[10]), unsigned(addr[11]), unsigned(addr[12]), unsigned(addr[13]), unsigned(addr[14]), unsigned(addr[15]))
return out_string
def GetSocketAddrAsStringInet(sockaddr):
sock_in = Cast(sockaddr, 'sockaddr_in *')
return GetInAddrAsString(sock_in.sin_addr)
def GetSocketAddrAsStringInet6(sockaddr):
sock_in6 = Cast(sockaddr, 'sockaddr_in6 *')
return GetIn6AddrAsString(sock_in6.sin6_addr.__u6_addr.__u6_addr8)
def GetSocketAddrAsStringLink(sockaddr):
sock_link = Cast(sockaddr, 'sockaddr_dl *')
if sock_link is None:
return "(null)"
else:
out_string = ""
if (sock_link.sdl_nlen == 0 and sock_link.sdl_alen == 0 and sock_link.sdl_slen == 0):
out_string = "link#" + str(int(sock_link.sdl_index))
else:
out_string += GetAddressAsStringColonHex(addressof(sock_link.sdl_data[sock_link.sdl_nlen]), sock_link.sdl_alen)
return out_string
def GetSocketAddrAsStringAT(sockaddr):
out_string = ""
sock_addr = Cast(sockaddr, 'sockaddr *')
out_string += GetAddressAsStringColonHex(sockaddr.sa_data, sockaddr.sa_len - 2)
return out_string
def GetSocketAddrAsString(sockaddr):
if sockaddr is None :
return "(null)"
out_string = ""
if (sockaddr.sa_family == 0):
out_string += "UNSPC "
GetSocketAddrAsStringUnspec(sockaddr)
elif (sockaddr.sa_family == 1):
out_string += "UNIX "
out_string += GetSocketAddrAsStringUnix(sockaddr)
elif (sockaddr.sa_family == 2):
out_string += "INET "
out_string += GetSocketAddrAsStringInet(sockaddr)
elif (sockaddr.sa_family == 30):
out_string += "INET6 "
out_string += GetSocketAddrAsStringInet6(sockaddr)
elif (sockaddr.sa_family == 18):
out_string += "LINK "
out_string += GetSocketAddrAsStringLink(sockaddr)
elif (sockaddr.sa_family == 16):
out_string += "ATLK "
out_string += GetSocketAddrAsStringAT(sockaddr)
else:
out_string += "FAM " + str(sockaddr.sa_family)
out_string += GetAddressAsStringColonHex(sockaddr.sa_data, sockaddr.sa_len)
return out_string
# Macro: showifaddrs
@lldb_command('showifaddrs')
def ShowIfaddrs(cmd_args=None):
""" Show the (struct ifnet).if_addrhead list of addresses for the given ifp
"""
if cmd_args != None and len(cmd_args) > 0 :
ifp = kern.GetValueFromAddress(cmd_args[0], 'ifnet *')
if not ifp:
print "Unknown value passed as argument."
return
i = 1
for ifaddr in IterateTAILQ_HEAD(ifp.if_addrhead, "ifa_link"):
format_string = "\t{0: <d}: 0x{1: <x} {2: <s} [{3: <d}]"
print format_string.format(i, ifaddr, GetSocketAddrAsString(ifaddr.ifa_addr), ifaddr.ifa_refcnt)
i += 1
else :
print "Missing argument 0 in user function."
# EndMacro: showifaddrs
def GetIfaddrs(ifp):
out_string = ""
if (ifp != 0):
i = 1
for ifaddr in IterateTAILQ_HEAD(ifp.if_addrhead, "ifa_link"):
format_string = "\t{0: <d}: 0x{1: <x} {2: <s} [{3: <d}]"
out_string += format_string.format(i, ifaddr, GetSocketAddrAsString(ifaddr.ifa_addr), ifaddr.ifa_refcnt) + "\n"
i += 1
else:
out_string += "Missing argument 0 in user function."
return out_string
def GetCapabilitiesAsString(flags):
""" Return a formatted string description of the interface flags
"""
out_string = ""
i = 0
num = 1
while num <= flags:
if flags & num:
out_string += if_capenable_strings[i] + ","
i += 1
num = num << 1
return rstrip(out_string, ",")
def GetIfEflagsAsString(if_eflags):
""" Return a formatted string description of the interface flags
"""
out_string = ""
flags = unsigned(if_eflags)
i = 0
num = 1
while num <= flags:
if flags & num:
out_string += if_eflags_strings[i] + ","
i += 1
num = num << 1
return rstrip(out_string, ",")
def ShowDlilIfnetConfiguration(dlil_ifnet, show_all) :
""" Formatted display of dlil_ifnet structures
"""
DLIF_INUSE = 0x1
DLIF_REUSE = 0x2
if dlil_ifnet is None :
return
dlil_iface = Cast(dlil_ifnet, 'dlil_ifnet *')
iface = Cast(dlil_ifnet, 'ifnet *')
out_string = ""
if (dlil_iface.dl_if_flags & DLIF_REUSE) :
out_string += "*"
format_string = "{0: <s}: flags={1: <x} <{2: <s}> index {3: <d} mtu {4: <d}"
extended_flags_format_string = "\n\teflags={0: <x} <{1: <s}>"
capenabled_format_string = "\n\toptions={0: <x} <{1: <s}>"
if (dlil_iface.dl_if_flags & DLIF_INUSE) :
out_string += format_string.format(iface.if_xname, (iface.if_flags & 0xffff), GetIfFlagsAsString(iface.if_flags), iface.if_index, iface.if_data.ifi_mtu)
else :
out_string += format_string.format("[" + str(iface.if_name) + str(int(iface.if_unit)) + "]", (iface.if_flags & 0xffff), GetIfFlagsAsString(iface.if_flags), iface.if_index, iface.if_data.ifi_mtu)
if (iface.if_eflags) :
out_string += extended_flags_format_string.format(iface.if_eflags, GetIfEflagsAsString(iface.if_eflags))
if (iface.if_capenable) :
out_string += capenabled_format_string.format(iface.if_capenable, GetCapabilitiesAsString(iface.if_capenable))
out_string += "\n\t(struct ifnet *)" + hex(dlil_ifnet) + "\n"
if show_all :
out_string += GetIfaddrs(iface)
out_string += "\n"
print out_string
# Macro: showifnets
@lldb_command('showifnets')
def ShowIfnets(cmd_args=None) :
""" Display ifconfig-like output for all attached and detached interfaces
"""
showall = 0
if cmd_args != None and len(cmd_args) > 0 :
showall = 1
dlil_ifnets = kern.globals.dlil_ifnet_head
for dlil_ifnet in IterateTAILQ_HEAD(dlil_ifnets, "dl_if_link"):
ShowDlilIfnetConfiguration(dlil_ifnet, showall)
# EndMacro: showifnets
# Macro: showifmultiaddrs
@lldb_command('showifmultiaddrs')
def ShowIfMultiAddrs(cmd_args=None) :
""" Show the list of multicast addresses for the given ifp
"""
out_string = ""
if cmd_args != None and len(cmd_args) > 0 :
ifp = kern.GetValueFromAddress(cmd_args[0], 'ifnet *')
if not ifp:
print "Unknown value passed as argument."
return
ifmulti = cast(ifp.if_multiaddrs.lh_first, 'ifmultiaddr *')
i = 0
while ifmulti != 0:
ifma_format_string = "\t{0: <d}: 0x{1: <x} "
out_string += (ifma_format_string.format(i + 1, ifmulti))
if (ifmulti.ifma_addr.sa_family == 2):
if (ifmulti.ifma_ll != 0):
out_string += GetSocketAddrAsStringLink(ifmulti.ifma_ll.ifma_addr) + " "
out_string += GetSocketAddrAsStringInet(ifmulti.ifma_addr)
if (ifmulti.ifma_addr.sa_family == 30):
if (ifmulti.ifma_ll != 0):
out_string += GetSocketAddrAsStringLink(ifmulti.ifma_ll.ifma_addr) + " "
out_string += GetSocketAddrAsStringInet6(ifmulti.ifma_addr) + " "
if (ifmulti.ifma_addr.sa_family == 18):
out_string += GetSocketAddrAsStringLink(ifmulti.ifma_addr) + " "
if (ifmulti.ifma_addr.sa_family == 0):
out_string += GetSocketAddrAsStringUnspec(ifmulti.ifma_addr) + " "
out_string += "[" + str(int(ifmulti.ifma_refcount)) + "]\n"
ifmulti = cast(ifmulti.ifma_link.le_next, 'ifmultiaddr *')
i += 1
print out_string
else :
print "Missing argument 0 in user function."
# EndMacro: showifmultiaddrs
# Macro: showinmultiaddrs
@lldb_command('showinmultiaddrs')
def ShowInMultiAddrs(cmd_args=None) :
""" Show the contents of IPv4 multicast address records
"""
out_string = ""
inmultihead = kern.globals.in_multihead
inmulti = cast(inmultihead.lh_first, 'in_multi *')
i = 0
while inmulti != 0:
ifp = inmulti.inm_ifp
inma_format_string = "\t{0: <d}: 0x{1: <x} "
out_string += inma_format_string.format(i + 1, inmulti) + " "
out_string += GetInAddrAsString(addressof(inmulti.inm_addr)) + " "
ifma_format_string = "(ifp 0x{0: <x} [{1: <s}] ifma {2: <x})"
out_string += ifma_format_string.format(ifp, ifp.if_xname, inmulti.inm_ifma) + "\n"
inmulti = cast(inmulti.inm_link.le_next, 'in_multi *')
i += 1
print out_string
# EndMacro: showinmultiaddrs
# Macro: showin6multiaddrs
@lldb_command('showin6multiaddrs')
def ShowIn6MultiAddrs(cmd_args=None) :
""" Show the contents of IPv6 multicast address records
"""
out_string = ""
in6multihead = kern.globals.in6_multihead
in6multi = cast(in6multihead.lh_first, 'in6_multi *')
i = 0
while in6multi != 0:
ifp = in6multi.in6m_ifp
inma_format_string = "\t{0: <d}: 0x{1: <x} "
out_string += inma_format_string.format(i + 1, in6multi) + " "
out_string += GetIn6AddrAsString((in6multi.in6m_addr.__u6_addr.__u6_addr8)) + " "
ifma_format_string = "(ifp 0x{0: <x} [{1: <s}] ifma {2: <x})"
out_string += ifma_format_string.format(ifp, ifp.if_xname, in6multi.in6m_ifma) + "\n"
in6multi = cast(in6multi.in6m_entry.le_next, 'in6_multi *')
i += 1
print out_string
# EndMacro: showin6multiaddrs
def GetTcpState(tcpcb):
out_string = ""
tp = Cast(tcpcb, 'tcpcb *')
if (int(tp) != 0):
if tp.t_state == 0:
out_string += "CLOSED\t"
if tp.t_state == 1:
out_string += "LISTEN\t"
if tp.t_state == 2:
out_string += "SYN_SENT\t"
if tp.t_state == 3:
out_string += "SYN_RCVD\t"
if tp.t_state == 4:
out_string += "ESTABLISHED\t"
if tp.t_state == 5:
out_string += "CLOSE_WAIT\t"
if tp.t_state == 6:
out_string += "FIN_WAIT_1\t"
if tp.t_state == 7:
out_string += "CLOSING\t"
if tp.t_state == 8:
out_string += "LAST_ACK\t"
if tp.t_state == 9:
out_string += "FIN_WAIT_2\t"
if tp.t_state == 10:
out_string += "TIME_WAIT\t"
return out_string
def GetSocketProtocolAsString(sock):
out_string = ""
inpcb = Cast(sock.so_pcb, 'inpcb *')
if sock.so_proto.pr_protocol == 6:
out_string += " TCP "
out_string += GetTcpState(inpcb.inp_ppcb)
if sock.so_proto.pr_protocol == 17:
out_string += " UDP "
if sock.so_proto.pr_protocol == 1:
out_string += " ICMP "
if sock.so_proto.pr_protocol == 254:
out_string += " DIVERT "
if sock.so_proto.pr_protocol == 255:
out_string += " RAW "
return out_string
def GetInAddr4to6AsString(inaddr):
out_string = ""
if (inaddr is not None):
ia = Cast(inaddr, 'char *')
inaddr_format_string = "{0: <d}.{1: <d}.{2: <d}.{3: <d}"
out_string += inaddr_format_string.format(ia[0], ia[1], ia[2], ia[3])
return out_string
def GetInPortAsString(port):
out_string = ""
port_string = Cast(port, 'char *')
port_unsigned = dereference(Cast(port, 'unsigned short *'))
if ((((port_unsigned & 0xff00) >> 8) == port_string[0])) and (((port_unsigned & 0x00ff) == port_string[1])):
out_string += ":" + str(int(port_unsigned))
else:
out_string += ":" + str(int(((port_unsigned & 0xff00) >> 8) | ((port_unsigned & 0x00ff) << 8)))
return out_string
def GetIPv4SocketAsString(sock) :
out_string = ""
pcb = Cast(sock.so_pcb, 'inpcb *')
if (pcb == 0):
out_string += "inpcb: (null) "
else:
out_string += "inpcb: " + hex(pcb)
out_string += GetSocketProtocolAsString(sock)
out_string += GetInAddr4to6AsString(addressof(pcb.inp_dependladdr.inp46_local))
out_string += GetInPortAsString(addressof(pcb.inp_lport))
out_string += " -> "
out_string += GetInAddr4to6AsString(addressof(pcb.inp_dependfaddr.inp46_foreign))
out_string += GetInPortAsString(addressof(pcb.inp_fport))
return out_string
def GetIPv6SocketAsString(sock) :
out_string = ""
pcb = Cast(sock.so_pcb, 'inpcb *')
if (pcb == 0):
out_string += "inpcb: (null) "
else:
out_string += "inpcb: " + hex(pcb) + " "
out_string += GetSocketProtocolAsString(sock)
out_string += GetIn6AddrAsString((pcb.inp_dependladdr.inp6_local.__u6_addr.__u6_addr8))
out_string += GetInPortAsString(addressof(pcb.inp_lport))
out_string += " -> "
out_string += GetIn6AddrAsString((pcb.inp_dependfaddr.inp6_foreign.__u6_addr.__u6_addr8))
out_string += GetInPortAsString(addressof(pcb.inp_fport))
return out_string
def GetUnixDomainSocketAsString(sock) :
out_string = ""
pcb = Cast(sock.so_pcb, 'unpcb *')
if (pcb == 0):
out_string += "unpcb: (null) "
else:
out_string += "unpcb: " + hex(pcb) + " "
out_string += "unp_vnode: " + hex(pcb.unp_vnode) + " "
out_string += "unp_conn: " + hex(pcb.unp_conn) + " "
out_string += "unp_addr: " + GetSocketAddrAsStringUnix(pcb.unp_addr)
return out_string
def GetSocket(socket) :
""" Show the contents of a socket
"""
so = kern.GetValueFromAddress(unsigned(socket), 'socket *')
if (so):
out_string = ""
sock_format_string = "so: 0x{0:<x}"
out_string += sock_format_string.format(so)
domain = so.so_proto.pr_domain
domain_name_format_string = " {0:<s} "
out_string += domain_name_format_string.format(domain.dom_name)
if (domain.dom_family == 1):
out_string += GetUnixDomainSocketAsString(so)
if (domain.dom_family == 2):
out_string += GetIPv4SocketAsString(so)
if (domain.dom_family == 30):
out_string += GetIPv6SocketAsString(so)
else:
out_string += "(null)"
return out_string
# EndMacro: showsocket
# Macro: showsocket
@lldb_command('showsocket')
def ShowSocket(cmd_args=None) :
""" Show the contents of a socket
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
so = kern.GetValueFromAddress(cmd_args[0], 'socket *')
if (len(str(cmd_args[0])) > 0):
out_string = ""
sock_format_string = "so: 0x{0:<x}"
out_string += sock_format_string.format(so)
domain = so.so_proto.pr_domain
domain_name_format_string = " {0:<s} "
out_string += domain_name_format_string.format(domain.dom_name)
if (domain.dom_family == 1):
out_string += GetUnixDomainSocketAsString(so)
if (domain.dom_family == 2):
out_string += GetIPv4SocketAsString(so)
if (domain.dom_family == 30):
out_string += GetIPv6SocketAsString(so)
print out_string
else:
print "Unknown value passed as argument."
return
# EndMacro: showsocket
# Macro: showprocsockets
@lldb_command('showprocsockets')
def ShowProcSockets(cmd_args=None):
""" Given a proc_t pointer, display information about its sockets
"""
out_string = ""
if cmd_args != None and len(cmd_args) > 0 :
proc = kern.GetValueFromAddress(cmd_args[0], 'proc *')
proc_fd = proc.p_fd
if not proc:
print "Unknown value passed as argument."
return
else:
count = 0
fpp = Cast(proc_fd.fd_ofiles, 'fileproc **')
while (count < proc_fd.fd_nfiles):
fp = Cast(dereference(fpp), 'fileproc *')
if (fp != 0):
fg = Cast(fp.f_fglob, 'fileglob *')
if (int(fg.fg_ops.fo_type) == 2):
if (proc_fd.fd_ofileflags[count] & 4):
out_string += "U: "
else:
out_string += " "
out_string += "fd = " + str(count) + " "
if (fg.fg_data != 0):
out_string += GetSocket(unsigned(fg.fg_data))
out_string += "\n"
else:
out_string += ""
fpp = kern.GetValueFromAddress(unsigned(fpp + 8), 'fileproc **')
count += 1
print out_string
else:
print "Missing argument 0 in user function."
# EndMacro: showprocsockets
def GetProcSockets(proc):
""" Given a proc_t pointer, display information about its sockets
"""
out_string = ""
proc_fd = proc.p_fd
if proc is None:
out_string += "Unknown value passed as argument."
else:
count = 0
fpp = Cast(proc_fd.fd_ofiles, 'fileproc **')
while (count < proc_fd.fd_nfiles):
fp = Cast(dereference(fpp), 'fileproc *')
if (fp != 0):
fg = Cast(fp.f_fglob, 'fileglob *')
if (int(fg.fg_ops.fo_type) == 2):
if (proc_fd.fd_ofileflags[count] & 4):
out_string += "U: "
else:
out_string += " "
out_string += "fd = " + str(count) + " "
if (fg.fg_data != 0):
out_string += GetSocket(unsigned(fg.fg_data))
out_string += "\n"
else:
out_string += ""
fpp = kern.GetValueFromAddress(unsigned(fpp + 8), 'fileproc **')
count += 1
return out_string
# Macro: showallprocsockets
@lldb_command('showallprocsockets')
def ShowAllProcSockets(cmd_args=None):
"""Display information about the sockets of all the processes
"""
for proc in kern.procs:
print "================================================================================"
print GetProcInfo(proc)
print GetProcSockets(proc)
# EndMacro: showallprocsockets
def GetRtEntryPrDetailsAsString(rte):
out_string = ""
rt = Cast(rte, 'rtentry *')
dst = Cast(rt.rt_nodes[0].rn_u.rn_leaf.rn_Key, 'sockaddr *')
isv6 = 0
dst_string_format = "{0:<18s}"
if (dst.sa_family == AF_INET):
out_string += dst_string_format.format(GetSocketAddrAsStringInet(dst)) + " "
else:
if (dst.sa_family == AF_INET6):
out_string += dst_string_format.format(GetSocketAddrAsStringInet6(dst)) + " "
isv6 = 1
else:
if (dst.sa_family == AF_LINK):
out_string += dst_string_format.format(GetSocketAddrAsStringLink(dst))
if (isv6 == 1):
out_string += " "
else:
out_string += " "
else:
out_string += dst_string_format.format(GetSocketAddrAsStringUnspec(dst)) + " "
gw = Cast(rt.rt_gateway, 'sockaddr *')
if (gw.sa_family == AF_INET):
out_string += dst_string_format.format(GetSocketAddrAsStringInet(gw)) + " "
else:
if (gw.sa_family == 30):
out_string += dst_string_format.format(GetSocketAddrAsStringInet6(gw)) + " "
isv6 = 1
else:
if (gw.sa_family == 18):
out_string += dst_string_format.format(GetSocketAddrAsStringLink(gw)) + " "
if (isv6 == 1):
out_string += " "
else:
out_string += " "
else:
dst_string_format.format(GetSocketAddrAsStringUnspec(gw))
if (rt.rt_flags & RTF_WASCLONED):
if (kern.ptrsize == 8):
rt_flags_string_format = "0x{0:<16x}"
out_string += rt_flags_string_format.format(rt.rt_parent) + " "
else:
rt_flags_string_format = "0x{0:<8x}"
out_string += rt_flags_string_format.format(rt.rt_parent) + " "
else:
if (kern.ptrsize == 8):
out_string += " "
else:
out_string += " "
rt_refcnt_rmx_string_format = "{0:<d} {1:>10d} "
out_string += rt_refcnt_rmx_string_format.format(rt.rt_refcnt, rt.rt_rmx.rmx_pksent) + " "
rtf_string_format = "{0:>s}"
if (rt.rt_flags & RTF_UP):
out_string += rtf_string_format.format("U")
if (rt.rt_flags & RTF_GATEWAY):
out_string += rtf_string_format.format("G")
if (rt.rt_flags & RTF_HOST):
out_string += rtf_string_format.format("H")
if (rt.rt_flags & RTF_REJECT):
out_string += rtf_string_format.format("R")
if (rt.rt_flags & RTF_DYNAMIC):
out_string += rtf_string_format.format("D")
if (rt.rt_flags & RTF_MODIFIED):
out_string += rtf_string_format.format("M")
if (rt.rt_flags & RTF_CLONING):
out_string += rtf_string_format.format("C")
if (rt.rt_flags & RTF_PRCLONING):
out_string += rtf_string_format.format("c")
if (rt.rt_flags & RTF_LLINFO):
out_string += rtf_string_format.format("L")
if (rt.rt_flags & RTF_STATIC):
out_string += rtf_string_format.format("S")
if (rt.rt_flags & RTF_PROTO1):
out_string += rtf_string_format.format("1")
if (rt.rt_flags & RTF_PROTO2):
out_string += rtf_string_format.format("2")
if (rt.rt_flags & RTF_PROTO3):
out_string += rtf_string_format.format("3")
if (rt.rt_flags & RTF_WASCLONED):
out_string += rtf_string_format.format("W")
if (rt.rt_flags & RTF_BROADCAST):
out_string += rtf_string_format.format("b")
if (rt.rt_flags & RTF_MULTICAST):
out_string += rtf_string_format.format("m")
if (rt.rt_flags & RTF_XRESOLVE):
out_string += rtf_string_format.format("X")
if (rt.rt_flags & RTF_BLACKHOLE):
out_string += rtf_string_format.format("B")
if (rt.rt_flags & RTF_IFSCOPE):
out_string += rtf_string_format.format("I")
if (rt.rt_flags & RTF_CONDEMNED):
out_string += rtf_string_format.format("Z")
if (rt.rt_flags & RTF_IFREF):
out_string += rtf_string_format.format("i")
if (rt.rt_flags & RTF_PROXY):
out_string += rtf_string_format.format("Y")
if (rt.rt_flags & RTF_ROUTER):
out_string += rtf_string_format.format("r")
out_string += "/"
out_string += str(rt.rt_ifp.if_name)
out_string += str(int(rt.rt_ifp.if_unit))
out_string += "\n"
return out_string
RNF_ROOT = 2
def GetRtTableAsString(rt_tables):
out_string = ""
rn = Cast(rt_tables.rnh_treetop, 'radix_node *')
rnh_cnt = rt_tables.rnh_cnt
while (rn.rn_bit >= 0):
rn = rn.rn_u.rn_node.rn_L
while 1:
base = Cast(rn, 'radix_node *')
while ((rn.rn_parent.rn_u.rn_node.rn_R == rn) and (rn.rn_flags & RNF_ROOT == 0)):
rn = rn.rn_parent
rn = rn.rn_parent.rn_u.rn_node.rn_R
while (rn.rn_bit >= 0):
rn = rn.rn_u.rn_node.rn_L
next_rn = rn
while (base != 0):
rn = base
base = rn.rn_u.rn_leaf.rn_Dupedkey
if ((rn.rn_flags & RNF_ROOT) == 0):
rt = Cast(rn, 'rtentry *')
if (kern.ptrsize == 8):
rtentry_string_format = "0x{0:<18x}"
out_string += rtentry_string_format.format(rt) + " "
else:
rtentry_string_format = "0x{0:<10x}"
out_string += rtentry_string_format.format(rt) + " "
out_string += GetRtEntryPrDetailsAsString(rt) + " "
rn = next_rn
if ((rn.rn_flags & RNF_ROOT) != 0):
break
return out_string
def GetRtInetAsString():
rt_tables = kern.globals.rt_tables[2]
if (kern.ptrsize == 8):
rt_table_header_format_string = "{0:<18s} {1: <16s} {2:<20s} {3:<16s} {4:<8s} {5:<8s} {6:<8s}"
print rt_table_header_format_string.format("rtentry", " dst", "gw", "parent", "Refs", "Use", "flags/if")
print rt_table_header_format_string.format("-" * 18, "-" * 16, "-" * 16, "-" * 16, "-" * 8, "-" * 8, "-" * 8)
print GetRtTableAsString(rt_tables)
else:
rt_table_header_format_string = "{0:<8s} {1:<16s} {2:<18s} {3:<8s} {4:<8s} {5:<8s} {6:<8s}"
print rt_table_header_format_string.format("rtentry", "dst", "gw", "parent", "Refs", "Use", "flags/if")
print rt_table_header_format_string.format("-" * 8, "-" * 16, "-" * 16, "-" * 8, "-" * 8, "-" * 8, "-" * 8)
print GetRtTableAsString(rt_tables)
def GetRtInet6AsString():
rt_tables = kern.globals.rt_tables[30]
if (kern.ptrsize == 8):
rt_table_header_format_string = "{0:<18s} {1: <16s} {2:<20s} {3:<16s} {4:<8s} {5:<8s} {6:<8s}"
print rt_table_header_format_string.format("rtentry", " dst", "gw", "parent", "Refs", "Use", "flags/if")
print rt_table_header_format_string.format("-" * 18, "-" * 16, "-" * 16, "-" * 16, "-" * 8, "-" * 8, "-" * 8)
print GetRtTableAsString(rt_tables)
else:
rt_table_header_format_string = "{0:<8s} {1:<16s} {2:<18s} {3:<8s} {4:<8s} {5:<8s} {6:<8s}"
print rt_table_header_format_string.format("rtentry", "dst", "gw", "parent", "Refs", "Use", "flags/if")
print rt_table_header_format_string.format("-" * 8, "-" * 16, "-" * 18, "-" * 8, "-" * 8, "-" * 8, "-" * 8)
print GetRtTableAsString(rt_tables)
# Macro: show_rt_inet
@lldb_command('show_rt_inet')
def ShowRtInet(cmd_args=None):
""" Display the IPv4 routing table
"""
print GetRtInetAsString()
# EndMacro: show_rt_inet
# Macro: show_rt_inet6
@lldb_command('show_rt_inet6')
def ShowRtInet6(cmd_args=None):
""" Display the IPv6 routing table
"""
print GetRtInet6AsString()
# EndMacro: show_rt_inet6
# Macro: rtentry_showdbg
@lldb_command('rtentry_showdbg')
def ShowRtEntryDebug(cmd_args=None):
""" Print the debug information of a route entry
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
rtd = kern.GetValueFromAddress(cmd_args[0], 'rtentry_dbg *')
rtd_summary_format_string = "{0:s} {1:d}"
out_string += rtd_summary_format_string.format("Total holds : ", rtd.rtd_refhold_cnt) + "\n"
out_string += rtd_summary_format_string.format("Total releases : ", rtd.rtd_refrele_cnt) + "\n"
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = rtd.rtd_alloc.pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nAlloc: (thread " + hex(rtd.rtd_alloc.th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = rtd.rtd_free.pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nFree: (thread " + hex(rtd.rtd_free.th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
while (cnt < RTD_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = rtd.rtd_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(rtd.rtd_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < RTD_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = rtd.rtd_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(rtd.rtd_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
out_string += "\nTotal locks : " + str(int(rtd.rtd_lock_cnt))
out_string += "\nTotal unlocks : " + str(int(rtd.rtd_unlock_cnt))
cnt = 0
while (cnt < RTD_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = rtd.rtd_lock[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nLock [" + str(int(cnt)) + "] (thread " + hex(rtd.rtd_lock[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < RTD_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = rtd.rtd_unlock[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nUnlock [" + str(int(cnt)) + "] (thread " + hex(rtd.rtd_unlock[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: rtentry_showdbg
# Macro: inifa_showdbg
@lldb_command('inifa_showdbg')
def InIfaShowDebug(cmd_args=None):
""" Print the debug information of an IPv4 interface address
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
inifa = kern.GetValueFromAddress(cmd_args[0], 'in_ifaddr_dbg *')
in_ifaddr_summary_format_string = "{0:s} {1:d}"
out_string += in_ifaddr_summary_format_string.format("Total holds : ", inifa.inifa_refhold_cnt) + "\n"
out_string += in_ifaddr_summary_format_string.format("Total releases : ", inifa.inifa_refrele_cnt) + "\n"
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = inifa.inifa_alloc.pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nAlloc: (thread " + hex(inifa.inifa_alloc.th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = inifa.inifa_free.pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nFree: (thread " + hex(inifa.inifa_free.th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
while (cnt < INIFA_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = inifa.inifa_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(inifa.inifa_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < INIFA_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = inifa.inifa_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(inifa.inifa_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: inifa_showdbg
# Macro: in6ifa_showdbg
@lldb_command('in6ifa_showdbg')
def In6IfaShowDebug(cmd_args=None):
""" Print the debug information of an IPv6 interface address
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
in6ifa = kern.GetValueFromAddress(cmd_args[0], 'in6_ifaddr_dbg *')
in6_ifaddr_summary_format_string = "{0:s} {1:d}"
print in6_ifaddr_summary_format_string.format("Total holds : ", in6ifa.in6ifa_refhold_cnt)
print in6_ifaddr_summary_format_string.format("Total releases : ", in6ifa.in6ifa_refrele_cnt)
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = in6ifa.in6ifa_alloc.pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nAlloc: (thread " + hex(in6ifa.in6ifa_alloc.th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = in6ifa.in6ifa_free.pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nFree: (thread " + hex(in6ifa.in6ifa_free.th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
while (cnt < IN6IFA_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = in6ifa.in6ifa_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(in6ifa.in6ifa_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < IN6IFA_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = in6ifa.in6ifa_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(in6ifa.in6ifa_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: in6ifa_showdbg
# Macro: inm_showdbg
@lldb_command('inm_showdbg')
def InmShowDebug(cmd_args=None):
""" Print the debug information of an IPv4 multicast address
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
inm = kern.GetValueFromAddress(cmd_args[0], 'in_multi_dbg *')
in_multi_summary_format_string = "{0:s} {1:d}"
out_string += in_multi_summary_format_string.format("Total holds : ", inm.inm_refhold_cnt)
out_string += in_multi_summary_format_string.format("Total releases : ", inm.inm_refrele_cnt)
while (cnt < INM_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = inm.inm_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(inm.inm_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < INM_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = inm.inm_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(inm.inm_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: inm_showdbg
# Macro: ifma_showdbg
@lldb_command('ifma_showdbg')
def IfmaShowDebug(cmd_args=None):
""" Print the debug information of a link multicast address
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
ifma = kern.GetValueFromAddress(cmd_args[0], 'ifmultiaddr_dbg *')
link_multi_summary_format_string = "{0:s} {1:d}"
out_string += link_multi_summary_format_string.format("Total holds : ", ifma.ifma_refhold_cnt) + "\n"
out_string += link_multi_summary_format_string.format("Total releases : ", ifma.ifma_refrele_cnt) + "\n"
while (cnt < IFMA_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = ifma.ifma_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(ifma.ifma_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < IFMA_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = ifma.ifma_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(ifma.ifma_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: ifma_showdbg
# Macro: ifpref_showdbg
@lldb_command('ifpref_showdbg')
def IfpRefShowDebug(cmd_args=None):
""" Print the debug information of an interface ref count
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
dl_if = kern.GetValueFromAddress(cmd_args[0], 'dlil_ifnet_dbg *')
dl_if_summary_format_string = "{0:s} {1:d}"
out_string += dl_if_summary_format_string.format("Total holds : ", dl_if.dldbg_if_refhold_cnt)
out_string += dl_if_summary_format_string.format("Total releases : ", dl_if.dldbg_if_refrele_cnt)
while (cnt < IF_REF_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = dl_if.dldbg_if_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(dl_if.dldbg_if_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < IF_REF_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = dl_if.dldbg_if_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(dl_if.dldbg_if_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: ifpref_showdbg
# Macro: ndpr_showdbg
@lldb_command('ndpr_showdbg')
def ndprShowDebug(cmd_args=None):
""" Print the debug information of a nd_prefix structure
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
ndpr = kern.GetValueFromAddress(cmd_args[0], 'nd_prefix_dbg *')
ndpr_summary_format_string = "{0:s} {1:d}"
out_string += ndpr_summary_format_string.format("Total holds : ", ndpr.ndpr_refhold_cnt)
out_string += ndpr_summary_format_string.format("Total releases : ", ndpr.ndpr_refrele_cnt)
while (cnt < NDPR_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = ndpr.ndpr_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(ndpr.ndpr_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < NDPR_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = ndpr.ndpr_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(ndpr.ndpr_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: ndpr_showdbg
# Macro: nddr_showdbg
@lldb_command('nddr_showdbg')
def nddrShowDebug(cmd_args=None):
""" Print the debug information of a nd_defrouter structure
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
nddr = kern.GetValueFromAddress(cmd_args[0], 'nd_defrouter_dbg *')
nddr_summary_format_string = "{0:s} {1:d}"
out_string += nddr_summary_format_string.format("Total holds : ", nddr.nddr_refhold_cnt)
out_string += nddr_summary_format_string.format("Total releases : ", nddr.nddr_refrele_cnt)
while (cnt < NDDR_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = nddr.nddr_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(nddr.nddr_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < NDDR_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = nddr.nddr_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(nddr.nddr_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: nddr_showdbg
# Macro: imo_showdbg
@lldb_command('imo_showdbg')
def IpmOptions(cmd_args=None):
""" Print the debug information of a ip_moptions structure
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
imo = kern.GetValueFromAddress(cmd_args[0], 'ip_moptions_dbg *')
imo_summary_format_string = "{0:s} {1:d}"
out_string += imo_summary_format_string.format("Total holds : ", imo.imo_refhold_cnt)
out_string += imo_summary_format_string.format("Total releases : ", imo.imo_refrele_cnt)
while (cnt < IMO_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = imo.imo_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(imo.imo_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < IMO_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = imo.imo_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(imo.imo_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: imo_showdbg
# Macro: im6o_showdbg
@lldb_command('im6o_showdbg')
def IpmOptions(cmd_args=None):
""" Print the debug information of a ip6_moptions structure
"""
if (cmd_args == None or len(cmd_args) == 0):
print "Missing argument 0 in user function."
return
out_string = ""
cnt = 0
im6o = kern.GetValueFromAddress(cmd_args[0], 'ip6_moptions_dbg *')
im6o_summary_format_string = "{0:s} {1:d}"
out_string += im6o_summary_format_string.format("Total holds : ", im6o.im6o_refhold_cnt)
out_string += im6o_summary_format_string.format("Total releases : ", im6o.im6o_refrele_cnt)
while (cnt < IM6O_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = im6o.im6o_refhold[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nHold [" + str(int(cnt)) + "] (thread " + hex(im6o.im6o_refhold[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
cnt = 0
while (cnt < IM6O_TRACE_HIST_SIZE):
ix = 0
while (ix < CTRACE_STACK_SIZE):
kgm_pc = im6o.im6o_refrele[cnt].pc[ix]
if (kgm_pc != 0):
if (ix == 0):
out_string += "\nRelease [" + str(int(cnt)) + "] (thread " + hex(im6o.im6o_refrele[cnt].th) + "):\n"
out_string += str(int(ix + 1)) + ": "
out_string += GetSourceInformationForAddress(kgm_pc)
out_string += "\n"
ix += 1
cnt += 1
print out_string
# EndMacro: im6o_showdbg
# Macro: rtentry_trash
@lldb_command('rtentry_trash')
def RtEntryTrash(cmd_args=None):
""" Walk the list of trash route entries
"""
out_string = ""
rt_trash_head = kern.globals.rttrash_head
rtd = Cast(rt_trash_head.tqh_first, 'rtentry_dbg *')
rt_trash_format_string = "{0:4d}: {1:x} {2:3d} {3:6d} {4:6d}"
cnt = 0
while (int(rtd) != 0):
if (cnt == 0):
if (kern.ptrsize == 8):
print " rtentry ref hold rele dst gw parent flags/if\n"
print " ----------------- --- ------ ------ --------------- ----- ------------------ -----------\n"
else:
print " rtentry ref hold rele dst gw parent flags/if\n"
print " --------- --- ------ ------ --------------- ----- ---------- -----------\n"
out_string += rt_trash_format_string.format(cnt, rtd, rtd.rtd_refhold_cnt - rtd.rtd_refrele_cnt, rtd.rtd_refhold_cnt, rtd.rtd_refrele_cnt) + " "
out_string += GetRtEntryPrDetailsAsString(rtd) + "\n"
rtd = rtd.rtd_trash_link.tqe_next
cnt += 1
print out_string
# EndMacro: rtentry_trash
# Macro: show_rtentry
@lldb_command('show_rtentry')
def ShRtEntry(cmd_args=None):
""" Print rtentry.
"""
out_string = ""
rt = kern.GetValueFromAddress(cmd_args[0], 'rtentry *')
out_string += GetRtEntryPrDetailsAsString(rt) + "\n"
print out_string
# EndMacro: show_rtentry
# Macro: inifa_trash
@lldb_command('inifa_trash')
def InIfaTrash(cmd_args=None):
""" Walk the list of trash in_ifaddr entries
"""
out_string = ""
ifa_trash_head = kern.globals.inifa_trash_head
ifa = Cast(ifa_trash_head.tqh_first, 'in_ifaddr_dbg *')
inifa_trash_format_string = "{0:4d}: {1:x} {2:3d} {3:6d} {4:6d}"
cnt = 0
while (int(ifa) != 0):
if (cnt == 0):
if (kern.ptrsize == 8):
print " in_ifa ref hold rele"
print " ------------------ --- ------ ----"
else:
print " in_ifa ref hold rele"
print " ---------- --- ----- ------"
out_string += inifa_trash_format_string.format(cnt + 1, ifa, ifa.inifa_refhold_cnt - ifa.inifa_refrele_cnt, ifa.inifa_refhold_cnt, ifa.inifa_refrele_cnt) + " "
out_string += GetSocketAddrAsStringInet(ifa.inifa.ia_ifa.ifa_addr) + "\n"
ifa = ifa.inifa_trash_link.tqe_next
cnt += 1
print out_string
# EndMacro: inifa_trash
# Macro: in6ifa_trash
@lldb_command('in6ifa_trash')
def In6IfaTrash(cmd_args=None):
""" Walk the list of trash in6_ifaddr entries
"""
out_string = ""
in6ifa_trash_head = kern.globals.in6ifa_trash_head
ifa = Cast(in6ifa_trash_head.tqh_first, 'in6_ifaddr_dbg *')
in6ifa_trash_format_string = "{0:4d}: 0x{1:x} {2:3d} {3:6d} {4:6d}"
cnt = 0
while (int(ifa) != 0):
if (cnt == 0):
if (kern.ptrsize == 8):
print " in6_ifa ref hold rele"
print " ------------------ --- ------ ------"
else:
print " in6_ifa ref hold rele"
print " ---------- --- ------ ------"
out_string += in6ifa_trash_format_string.format(cnt + 1, ifa, ifa.in6ifa_refhold_cnt - ifa.in6ifa_refrele_cnt, ifa.in6ifa_refhold_cnt, ifa.in6ifa_refrele_cnt) + " "
out_string += GetSocketAddrAsStringInet6(ifa.in6ifa.ia_ifa.ifa_addr) + "\n"
ifa = ifa.in6ifa_trash_link.tqe_next
cnt += 1
print out_string
# EndMacro: in6ifa_trash
# Macro: inm_trash
@lldb_command('inm_trash')
def InmTrash(cmd_args=None):
""" Walk the list of trash in_multi entries
"""
out_string = ""
inm_trash_head = kern.globals.inm_trash_head
inm = Cast(inm_trash_head.tqh_first, 'in_multi_dbg *')
inm_trash_format_string = "{0:4d}: {1:x} {2:3d} {3:6d} {4:6d}"
cnt = 0
while (int(inm) != 0):
if (cnt == 0):
if (kern.ptrsize == 8):
print " inm ref hold rele"
print " ------------------ --- ------ ------"
else:
print " inm ref hold rele"
print " ---------- --- ------ ------"
out_string += inm_trash_format_string.format(cnt + 1, inm, inm.inm_refhold_cnt - inm.inm_refrele_cnt, inm.inm_refhold_cnt, inm.inm_refrele_cnt) + " "
out_string += GetInAddrAsString(addressof(inm.inm.inm_addr)) + "\n"
inm = inm.inm_trash_link.tqe_next
cnt += 1
print out_string
# EndMacro: inm_trash
# Macro: in6m_trash
@lldb_command('in6m_trash')
def In6mTrash(cmd_args=None):
""" Walk the list of trash in6_multi entries
"""
out_string = ""
in6m_trash_head = kern.globals.in6m_trash_head
in6m = Cast(in6m_trash_head.tqh_first, 'in6_multi_dbg *')
in6m_trash_format_string = "{0:4d}: {1:x} {2:3d} {3:6d} {4:6d}"
cnt = 0
while (int(in6m) != 0):
if (cnt == 0):
if (kern.ptrsize == 8):
print " in6m ref hold rele"
print " ------------------ --- ------ ------"
else:
print " in6m ref hold rele"
print " ---------- --- ------ ------"
out_string += in6m_trash_format_string.format(cnt + 1, in6m, in6m.in6m_refhold_cnt - in6m.in6m_refrele_cnt, in6m.in6m_refhold_cnt, in6m.in6m_refrele_cnt) + " "
out_string += GetIn6AddrAsString(addressof(in6m.in6m.in6m_addr)) + "\n"
in6m = in6m.in6m_trash_link.tqe_next
cnt += 1
print out_string
# EndMacro: in6m_trash
# Macro: ifma_trash
@lldb_command('ifma_trash')
def IfmaTrash(cmd_args=None):
""" Walk the list of trash ifmultiaddr entries
"""
out_string = ""
ifma_trash_head = kern.globals.ifma_trash_head
ifma = Cast(ifma_trash_head.tqh_first, 'ifmultiaddr_dbg *')
ifma_trash_format_string = "{0:4d}: {1:x} {2:3d} {3:6d} {4:6d}"
cnt = 0
while (int(ifma) != 0):
if (cnt == 0):
if (kern.ptrsize == 8):
print " ifma ref hold rele"
print " ------------------ --- ------ ------"
else:
print " ifma ref hold rele"
print " ---------- --- ------ ------"
out_string += ifma_trash_format_string.format(cnt + 1, ifma, ifma.ifma_refhold_cnt - ifma.ifma_refrele_cnt, ifma.ifma_refhold_cnt, ifma.ifma_refrele_cnt) + " "
out_string += GetSocketAddrAsString(ifma.ifma.ifma_addr) + "\n"
out_string += " @ " + ifma.ifma.ifma_ifp.if_xname
ifma = ifma.ifma_trash_link.tqe_next
cnt += 1
print out_string
# EndMacro: ifma_trash
def GetInPcb(pcb, proto):
out_string = ""
out_string += hex(pcb)
if (proto == IPPROTO_TCP):
out_string += " tcp"
else:
if (proto == IPPROTO_UDP):
out_string += " udp"
else:
out_string += str(proto) + "."
if (pcb.inp_vflag & INP_IPV4):
out_string += "4 "
if (pcb.inp_vflag & INP_IPV6):
out_string += "6 "
if (pcb.inp_vflag & INP_IPV4):
out_string += " "
out_string += GetInAddrAsString(addressof(pcb.inp_dependladdr.inp46_local.ia46_addr4))
else:
out_string += GetIn6AddrAsString((pcb.inp_dependladdr.inp6_local.__u6_addr.__u6_addr8))
out_string += " "
out_string += Getntohs(pcb.inp_lport)
out_string += " "
if (pcb.inp_vflag & INP_IPV4):
out_string += " "
out_string += GetInAddrAsString(addressof(pcb.inp_dependfaddr.inp46_foreign.ia46_addr4))
else:
out_string += GetIn6AddrAsString((pcb.inp_dependfaddr.inp6_foreign.__u6_addr.__u6_addr8))
out_string += " "
out_string += Getntohs(pcb.inp_fport)
out_string += " "
if (proto == IPPROTO_TCP):
out_string += GetTcpState(pcb.inp_ppcb)
if (pcb.inp_flags & INP_RECVOPTS):
out_string += "recvopts "
if (pcb.inp_flags & INP_RECVRETOPTS):
out_string += "recvretopts "
if (pcb.inp_flags & INP_RECVDSTADDR):
out_string += "recvdstaddr "
if (pcb.inp_flags & INP_HDRINCL):
out_string += "hdrincl "
if (pcb.inp_flags & INP_HIGHPORT):
out_string += "highport "
if (pcb.inp_flags & INP_LOWPORT):
out_string += "lowport "
if (pcb.inp_flags & INP_ANONPORT):
out_string += "anonport "
if (pcb.inp_flags & INP_RECVIF):
out_string += "recvif "
if (pcb.inp_flags & INP_MTUDISC):
out_string += "mtudisc "
if (pcb.inp_flags & INP_STRIPHDR):
out_string += "striphdr "
if (pcb.inp_flags & INP_RECV_ANYIF):
out_string += "recv_anyif "
if (pcb.inp_flags & INP_INADDR_ANY):
out_string += "inaddr_any "
if (pcb.inp_flags & INP_RECVTTL):
out_string += "recvttl "
if (pcb.inp_flags & INP_UDP_NOCKSUM):
out_string += "nocksum "
if (pcb.inp_flags & INP_BOUND_IF):
out_string += "boundif "
if (pcb.inp_flags & IN6P_IPV6_V6ONLY):
out_string += "v6only "
if (pcb.inp_flags & IN6P_PKTINFO):
out_string += "pktinfo "
if (pcb.inp_flags & IN6P_HOPLIMIT):
out_string += "hoplimit "
if (pcb.inp_flags & IN6P_HOPOPTS):
out_string += "hopopts "
if (pcb.inp_flags & IN6P_DSTOPTS):
out_string += "dstopts "
if (pcb.inp_flags & IN6P_RTHDR):
out_string += "rthdr "
if (pcb.inp_flags & IN6P_RTHDRDSTOPTS):
out_string += "rthdrdstopts "
if (pcb.inp_flags & IN6P_TCLASS):
out_string += "rcv_tclass "
if (pcb.inp_flags & IN6P_AUTOFLOWLABEL):
out_string += "autoflowlabel "
if (pcb.inp_flags & IN6P_BINDV6ONLY):
out_string += "bindv6only "
if (pcb.inp_flags & IN6P_RFC2292):
out_string += "RFC2292 "
if (pcb.inp_flags & IN6P_MTU):
out_string += "rcv_pmtu "
if (pcb.inp_flags & INP_PKTINFO):
out_string += "pktinfo "
if (pcb.inp_flags & INP_FLOW_SUSPENDED):
out_string += "suspended "
if (pcb.inp_flags & INP_NO_IFT_CELLULAR):
out_string += "nocellular "
if (pcb.inp_flags & INP_FLOW_CONTROLLED):
out_string += "flowctld "
if (pcb.inp_flags & INP_FC_FEEDBACK):
out_string += "fcfeedback "
if (pcb.inp_flags2 & INP2_TIMEWAIT):
out_string += "timewait "
if (pcb.inp_flags2 & INP2_IN_FCTREE):
out_string += "in_fctree "
if (pcb.inp_flags2 & INP2_WANT_APP_POLICY):
out_string += "want_app_policy "
so = pcb.inp_socket
if (so != 0):
out_string += "[so=" + str(so) + " s=" + str(int(so.so_snd.sb_cc)) + " r=" + str(int(so.so_rcv.sb_cc)) + " usecnt=" + str(int(so.so_usecount)) + "] "
if (pcb.inp_state == 0 or pcb.inp_state == INPCB_STATE_INUSE):
out_string += "inuse, "
else:
if (pcb.inp_state == INPCB_STATE_DEAD):
out_string += "dead, "
else:
out_string += "unknown (" + str(int(pcb.inp_state)) + "), "
return out_string
def GetPcbInfo(pcbi, proto):
out_string = ""
snd_cc = 0
snd_buf = unsigned(0)
rcv_cc = 0
rcv_buf = unsigned(0)
pcbseen = 0
out_string += "lastport " + str(int(pcbi.ipi_lastport)) + " lastlow " + str(int(pcbi.ipi_lastlow)) + " lasthi " + str(int(pcbi.ipi_lasthi)) + "\n"
out_string += "active pcb count is " + str(int(pcbi.ipi_count)) + "\n"
hashsize = pcbi.ipi_hashmask + 1
out_string += "hash size is " + str(int(hashsize)) + "\n"
out_string += str(pcbi.ipi_hashbase) + " has the following inpcb(s):\n"
if (kern.ptrsize == 8):
out_string += "pcb proto source address port destination address port\n"
else:
out_string += "pcb proto source address port destination address port\n\n"
i = 0
hashbase = pcbi.ipi_hashbase
while (i < hashsize):
head = hashbase[i]
pcb = cast(head.lh_first, 'inpcb *')
while pcb != 0:
pcbseen += 1
out_string += GetInPcb(pcb, proto) + "\n"
so = pcb.inp_socket
if so != 0:
snd_cc += so.so_snd.sb_cc
mp = so.so_snd.sb_mb
while mp != 0:
snd_buf += 256
if (mp.m_hdr.mh_flags & 0x01):
snd_buf += mp.M_dat.MH.MH_dat.MH_ext.ext_size
mp = mp.m_hdr.mh_next
rcv_cc += so.so_rcv.sb_cc
mp = so.so_rcv.sb_mb
while mp != 0:
rcv_buf += 256
if (mp.m_hdr.mh_flags & 0x01):
rcv_buf += mp.M_dat.MH.MH_dat.MH_ext.ext_size
mp = mp.m_hdr.mh_next
pcb = cast(pcb.inp_hash.le_next, 'inpcb *')
i += 1
out_string += "total seen " + str(int(pcbseen)) + " snd_cc " + str(int(snd_cc)) + " rcv_cc " + str(int(rcv_cc)) + "\n"
out_string += "total snd_buf " + str(int(snd_buf)) + " rcv_buf " + str(int(rcv_buf)) + "\n"
out_string += "port hash base is " + hex(pcbi.ipi_porthashbase) + "\n"
i = 0
hashbase = pcbi.ipi_porthashbase
while (i < hashsize):
head = hashbase[i]
pcb = cast(head.lh_first, 'inpcbport *')
while pcb != 0:
out_string += "\t"
out_string += GetInPcbPort(pcb)
out_string += "\n"
pcb = cast(pcb.phd_hash.le_next, 'inpcbport *')
i += 1
return out_string
def GetInPcbPort(ppcb):
out_string = ""
out_string += hex(ppcb) + ": lport "
out_string += Getntohs(ppcb.phd_port)
return out_string
def Getntohs(port):
out_string = ""
#p = unsigned(int(port) & 0x0000ffff)
p = ((port & 0x0000ff00) >> 8)
p |= ((port & 0x000000ff) << 8)
return str(p)
# Macro: show_kern_event_pcbinfo
def GetKernEventPcbInfo(kev_pcb_head):
out_string = ""
pcb = Cast(kev_pcb_head.lh_first, 'kern_event_pcb *')
if (kern.ptrsize == 8):
kev_pcb_format_string = "0x{0:<16x} {1:12d} {2:16d} {3:16d}"
out_string += " evp socket vendor code class filter subclass filter\n"
out_string += "-------------- ----------- ------------ ---------------\n"
else:
kev_pcb_format_string = "0x{0:<8x} {1:12d} {2:16d} {3:16d}"
out_string += "evp socket vendor code class filter subclass filter\n"
out_string += "---------- ----------- ------------ ---------------\n"
while (pcb != 0):
out_string += kev_pcb_format_string.format(pcb.evp_socket, pcb.evp_vendor_code_filter, pcb.evp_class_filter, pcb.evp_subclass_filter)
out_string += "\n"
pcb = pcb.evp_link.le_next
return out_string
@lldb_command('show_kern_event_pcbinfo')
def ShowKernEventPcbInfo(cmd_args=None):
""" Display the list of Kernel Event protocol control block information
"""
print GetKernEventPcbInfo(addressof(kern.globals.kern_event_head))
# EndMacro: show_kern_event_pcbinfo
# Macro: show_kern_control_pcbinfo
def GetKernControlPcbInfo(ctl_head):
out_string = ""
kctl = Cast(ctl_head.tqh_first, 'kctl *')
if (kern.ptrsize == 8):
kcb_format_string = "0x{0:<16x} {1:4d} {2:10d}\n"
else:
kcb_format_string = "0x{0:<8x} {1:4d} {2:10d}\n"
while unsigned(kctl) != 0:
kctl_name = "controller: " + str(kctl.name) + "\n"
out_string += kctl_name
kcb = Cast(kctl.kcb_head.tqh_first, 'ctl_cb *')
if unsigned(kcb) != 0:
if (kern.ptrsize == 8):
out_string += "socket unit usecount\n"
out_string += "------ ---- --------\n"
else:
out_string += "socket unit usecount\n"
out_string += "------ ---- --------\n"
while unsigned(kcb) != 0:
out_string += kcb_format_string.format(kcb.so, kcb.unit, kcb.usecount)
kcb = kcb.next.tqe_next
out_string += "\n"
kctl = kctl.next.tqe_next
return out_string
@lldb_command('show_kern_control_pcbinfo')
def ShowKernControlPcbInfo(cmd_args=None):
""" Display the list of Kernel Control protocol control block information
"""
print GetKernControlPcbInfo(addressof(kern.globals.ctl_head))
# EndMacro: show_kern_control_pcbinfo
# Macro: show_tcp_pcbinfo
@lldb_command('show_tcp_pcbinfo')
def ShowTcpPcbInfo(cmd_args=None):
""" Display the list of TCP protocol control block information
"""
print GetPcbInfo(addressof(kern.globals.tcbinfo), IPPROTO_TCP)
# EndMacro: show_tcp_pcbinfo
# Macro: show_udp_pcbinfo
@lldb_command('show_udp_pcbinfo')
def ShowUdpPcbInfo(cmd_args=None):
""" Display the list of UDP protocol control block information
"""
print GetPcbInfo(addressof(kern.globals.udbinfo), IPPROTO_UDP)
# EndMacro: show_udp_pcbinfo
# Macro: show_tcp_timewaitslots
@lldb_command('show_tcp_timewaitslots')
def ShowTcpTimeWaitSlots(cmd_args=None):
""" Display the list of the TCP protocol control blocks in TIMEWAIT
"""
out_string = ""
slot = -1
_all = 0
if len(cmd_args) > 0:
if (int(cmd_args[0]) == -1):
_all = 1
else:
slot = int(cmd_args[0])
out_string += "time wait slot size " + str(N_TIME_WAIT_SLOTS) + " cur_tw_slot " + str(int(kern.globals.cur_tw_slot)) + "\n"
i = 0
while (i < N_TIME_WAIT_SLOTS):
perslot = 0
head = kern.globals.time_wait_slots[i]
if (i == slot or slot == -1):
pcb0 = cast(head.lh_first, 'inpcb *')
while (pcb0 != 0):
perslot += 1
pcb0 = pcb0.inp_list.le_next
out_string += " slot " + str(i) + " count " + str(perslot) + "\n"
if (_all or i == slot):
pcb0 = cast(head.lh_first, 'inpcb *')
while (pcb0 != 0):
out_string += "\t"
out_string += GetInPcb(pcb0, IPPROTO_TCP)
out_string += "\n"
pcb0 = pcb0.inp_list.le_next
i += 1
print out_string
# EndMacro: show_tcp_timewaitslots
# Macro: show_domains
@lldb_command('show_domains')
def ShowDomains(cmd_args=None):
""" Display the list of the domains
"""
out_string = ""
domains = kern.globals.domains
dp = Cast(domains.tqh_first, 'domain *')
ifma_trash_format_string = "{0:4d}: {1:x} {2:3d} {3:6d} {4:6d}"
cnt = 0
while (dp != 0):
out_string += "\"" + str(dp.dom_name) + "\"" + "[" + str(int(dp.dom_refs)) + " refs] domain " + hex(dp) + "\n"
out_string += " family:\t" + str(int(dp.dom_family)) + "\n"
out_string += " flags:0x\t" + str(int(dp.dom_flags)) + "\n"
out_string += " rtparams:\toff=" + str(int(dp.dom_rtoffset)) + ", maxrtkey=" + str(int(dp.dom_maxrtkey)) + "\n"
if (dp.dom_init):
out_string += " init:\t"
out_string += GetSourceInformationForAddress(dp.dom_init) + "\n"
if (dp.dom_externalize):
out_string += " externalize:\t"
out_string += GetSourceInformationForAddress(dp.dom_externalize) + "\n"
if (dp.dom_dispose):
out_string += " dispose:\t"
out_string += GetSourceInformationForAddress(dp.dom_dispose) + "\n"
if (dp.dom_rtattach):
out_string += " rtattach:\t"
out_string += GetSourceInformationForAddress(dp.dom_rtattach) + "\n"
if (dp.dom_old):
out_string += " old:\t"
out_string += GetSourceInformationForAddress(dp.dom_old) + "\n"
pr = Cast(dp.dom_protosw.tqh_first, 'protosw *')
while pr != 0:
pru = pr.pr_usrreqs
out_string += "\ttype " + str(int(pr.pr_type)) + ", protocol " + str(int(pr.pr_protocol)) + ", protosw " + hex(pr) + "\n"
out_string += "\t flags:0x\t" + hex(pr.pr_flags) + "\n"
if (pr.pr_input):
out_string += "\t input:\t"
out_string += GetSourceInformationForAddress(pr.pr_input) + "\n"
if (pr.pr_output):
out_string += "\t output:\t"
out_string += GetSourceInformationForAddress(pr.pr_output) + "\n"
if (pr.pr_ctlinput):
out_string += "\t ctlinput:\t"
out_string += GetSourceInformationForAddress(pr.pr_ctlinput) + "\n"
if (pr.pr_ctloutput):
out_string += "\t ctloutput:\t"
out_string += GetSourceInformationForAddress(pr.pr_ctloutput) + "\n"
if (pr.pr_init):
out_string += "\t init:\t"
out_string += GetSourceInformationForAddress(pr.pr_init) + "\n"
if (pr.pr_drain):
out_string += "\t drain:\t"
out_string += GetSourceInformationForAddress(pr.pr_drain) + "\n"
if (pr.pr_sysctl):
out_string += "\t sysctl:\t"
out_string += GetSourceInformationForAddress(pr.pr_sysctl) + "\n"
if (pr.pr_lock):
out_string += "\t lock:\t"
out_string += GetSourceInformationForAddress(pr.pr_lock) + "\n"
if (pr.pr_unlock):
out_string += "\t unlock:\t"
out_string += GetSourceInformationForAddress(pr.pr_unlock) + "\n"
if (pr.pr_getlock):
out_string += "\t getlock:\t"
out_string += GetSourceInformationForAddress(pr.pr_getlock) + "\n"
if (pr.pr_old):
out_string += "\t old:\t"
out_string += GetSourceInformationForAddress(pr.pr_old) + "\n"
out_string += "\t pru_flags:0x\t" + hex(pru.pru_flags) + "\n"
out_string += "\t abort:\t"
out_string += GetSourceInformationForAddress(pru.pru_abort) + "\n"
out_string += "\t accept:\t"
out_string += GetSourceInformationForAddress(pru.pru_accept) + "\n"
out_string += "\t attach:\t"
out_string += GetSourceInformationForAddress(pru.pru_attach) + "\n"
out_string += "\t bind:\t"
out_string += GetSourceInformationForAddress(pru.pru_bind) + "\n"
out_string += "\t connect:\t"
out_string += GetSourceInformationForAddress(pru.pru_connect) + "\n"
out_string += "\t connect2:\t"
out_string += GetSourceInformationForAddress(pru.pru_connect2) + "\n"
out_string += "\t connectx:\t"
out_string += GetSourceInformationForAddress(pru.pru_connectx) + "\n"
out_string += "\t control:\t"
out_string += GetSourceInformationForAddress(pru.pru_control) + "\n"
out_string += "\t detach:\t"
out_string += GetSourceInformationForAddress(pru.pru_detach) + "\n"
out_string += "\t disconnect:\t"
out_string += GetSourceInformationForAddress(pru.pru_disconnect) + "\n"
out_string += "\t listen:\t"
out_string += GetSourceInformationForAddress(pru.pru_listen) + "\n"
out_string += "\t peeraddr:\t"
out_string += GetSourceInformationForAddress(pru.pru_peeraddr) + "\n"
out_string += "\t rcvd:\t"
out_string += GetSourceInformationForAddress(pru.pru_rcvd) + "\n"
out_string += "\t rcvoob:\t"
out_string += GetSourceInformationForAddress(pru.pru_rcvoob) + "\n"
out_string += "\t send:\t"
out_string += GetSourceInformationForAddress(pru.pru_send) + "\n"
out_string += "\t sense:\t"
out_string += GetSourceInformationForAddress(pru.pru_sense) + "\n"
out_string += "\t shutdown:\t"
out_string += GetSourceInformationForAddress(pru.pru_shutdown) + "\n"
out_string += "\t sockaddr:\t"
out_string += GetSourceInformationForAddress(pru.pru_sockaddr) + "\n"
out_string += "\t sopoll:\t"
out_string += GetSourceInformationForAddress(pru.pru_sopoll) + "\n"
out_string += "\t soreceive:\t"
out_string += GetSourceInformationForAddress(pru.pru_soreceive) + "\n"
out_string += "\t sosend:\t"
out_string += GetSourceInformationForAddress(pru.pru_sosend) + "\n"
pr = pr.pr_entry.tqe_next
dp = dp.dom_entry.tqe_next
print out_string
# EndMacro: show_domains
| 39.530709
| 353
| 0.56609
|
9a5c0085a0dbcedb9406d4dd24af668ed735b1f1
| 7,724
|
py
|
Python
|
parse_args.py
|
headacheboy/IGSQL
|
5cdbdbf5e530be14ff6441d2a4a514f3546d76ba
|
[
"Apache-2.0"
] | 18
|
2020-11-13T03:18:05.000Z
|
2022-01-11T02:38:06.000Z
|
parse_args.py
|
kiminh/IGSQL
|
5cdbdbf5e530be14ff6441d2a4a514f3546d76ba
|
[
"Apache-2.0"
] | 2
|
2020-12-07T10:06:15.000Z
|
2021-03-10T07:13:45.000Z
|
parse_args.py
|
kiminh/IGSQL
|
5cdbdbf5e530be14ff6441d2a4a514f3546d76ba
|
[
"Apache-2.0"
] | 4
|
2020-11-17T06:42:34.000Z
|
2021-12-29T05:18:30.000Z
|
import sys
args = sys.argv
import os
import argparse
def interpret_args():
""" Interprets the command line arguments, and returns a dictionary. """
parser = argparse.ArgumentParser()
parser.add_argument("--no_gpus", type=bool, default=1)
### Data parameters
parser.add_argument(
'--raw_train_filename',
type=str,
default='../atis_data/data/resplit/processed/train_with_tables.pkl')
parser.add_argument(
'--raw_dev_filename',
type=str,
default='../atis_data/data/resplit/processed/dev_with_tables.pkl')
parser.add_argument(
'--raw_validation_filename',
type=str,
default='../atis_data/data/resplit/processed/valid_with_tables.pkl')
parser.add_argument(
'--raw_test_filename',
type=str,
default='../atis_data/data/resplit/processed/test_with_tables.pkl')
parser.add_argument('--data_directory', type=str, default='processed_data')
parser.add_argument('--processed_train_filename', type=str, default='train.pkl')
parser.add_argument('--processed_dev_filename', type=str, default='dev.pkl')
parser.add_argument('--processed_validation_filename', type=str, default='validation.pkl')
parser.add_argument('--processed_test_filename', type=str, default='test.pkl')
parser.add_argument('--database_schema_filename', type=str, default=None)
parser.add_argument('--embedding_filename', type=str, default=None)
parser.add_argument('--input_vocabulary_filename', type=str, default='input_vocabulary.pkl')
parser.add_argument('--output_vocabulary_filename',
type=str,
default='output_vocabulary.pkl')
parser.add_argument('--input_key', type=str, default='nl_with_dates')
parser.add_argument('--anonymize', type=bool, default=False)
parser.add_argument('--anonymization_scoring', type=bool, default=False)
parser.add_argument('--use_snippets', type=bool, default=False)
parser.add_argument('--use_previous_query', type=bool, default=False)
parser.add_argument('--maximum_queries', type=int, default=1)
parser.add_argument('--use_copy_switch', type=bool, default=False)
parser.add_argument('--use_query_attention', type=bool, default=False)
parser.add_argument('--use_utterance_attention', type=bool, default=False)
parser.add_argument('--freeze', type=bool, default=False)
parser.add_argument('--scheduler', type=bool, default=False)
parser.add_argument('--use_bert', type=bool, default=False)
parser.add_argument("--bert_type_abb", type=str, help="Type of BERT model to load. e.g.) uS, uL, cS, cL, and mcS")
parser.add_argument("--bert_input_version", type=str, default='v1')
parser.add_argument('--fine_tune_bert', type=bool, default=False)
parser.add_argument('--lr_bert', default=1e-5, type=float, help='BERT model learning rate.')
### Debugging/logging parameters
parser.add_argument('--reload_embedding', type=int, default=0)
parser.add_argument('--logdir', type=str, default='logs')
parser.add_argument('--deterministic', type=bool, default=False)
parser.add_argument('--num_train', type=int, default=-1)
parser.add_argument('--logfile', type=str, default='log.txt')
parser.add_argument('--results_file', type=str, default='results.txt')
### Model architecture
parser.add_argument('--input_embedding_size', type=int, default=300)
parser.add_argument('--output_embedding_size', type=int, default=300)
parser.add_argument('--encoder_state_size', type=int, default=300)
parser.add_argument('--decoder_state_size', type=int, default=300)
parser.add_argument('--encoder_num_layers', type=int, default=1)
parser.add_argument('--decoder_num_layers', type=int, default=1)
parser.add_argument('--snippet_num_layers', type=int, default=1)
parser.add_argument('--maximum_utterances', type=int, default=5)
parser.add_argument('--state_positional_embeddings', type=bool, default=False)
parser.add_argument('--positional_embedding_size', type=int, default=50)
parser.add_argument('--snippet_age_embedding', type=bool, default=False)
parser.add_argument('--snippet_age_embedding_size', type=int, default=64)
parser.add_argument('--max_snippet_age_embedding', type=int, default=4)
parser.add_argument('--previous_decoder_snippet_encoding', type=bool, default=False)
parser.add_argument('--discourse_level_lstm', type=bool, default=False)
parser.add_argument('--use_schema_attention', type=bool, default=False)
parser.add_argument('--use_encoder_attention', type=bool, default=False)
parser.add_argument('--use_schema_encoder', type=bool, default=False)
parser.add_argument('--use_schema_self_attention', type=bool, default=False)
parser.add_argument('--use_schema_encoder_2', type=bool, default=False)
### Training parameters
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--train_maximum_sql_length', type=int, default=400) #200
parser.add_argument('--train_evaluation_size', type=int, default=100)
parser.add_argument('--dropout_amount', type=float, default=0.5)
parser.add_argument('--initial_patience', type=float, default=10.)
parser.add_argument('--patience_ratio', type=float, default=1.01)
parser.add_argument('--initial_learning_rate', type=float, default=1e-3)
parser.add_argument('--learning_rate_ratio', type=float, default=0.8)
parser.add_argument('--interaction_level', type=bool, default=False)
parser.add_argument('--reweight_batch', type=bool, default=False)
parser.add_argument('--gnn_layer_number', type=int, default=1)
parser.add_argument('--clip', type=float, default=5.0)
parser.add_argument('--warmup_step', type=int, default=1000)
### Setting
parser.add_argument('--train', type=bool, default=False)
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--evaluate', type=bool, default=False)
parser.add_argument('--attention', type=bool, default=False)
parser.add_argument('--save_file', type=str, default="")
parser.add_argument('--enable_testing', type=bool, default=False)
parser.add_argument('--use_predicted_queries', type=bool, default=False)
parser.add_argument('--evaluate_split', type=str, default='dev')
parser.add_argument('--evaluate_with_gold_forcing', type=bool, default=False)
parser.add_argument('--eval_maximum_sql_length', type=int, default=400)
parser.add_argument('--results_note', type=str, default='')
parser.add_argument('--compute_metrics', type=bool, default=False)
parser.add_argument('--reference_results', type=str, default='')
parser.add_argument('--interactive', type=bool, default=False)
parser.add_argument('--database_username', type=str, default="aviarmy")
parser.add_argument('--database_password', type=str, default="aviarmy")
parser.add_argument('--database_timeout', type=int, default=2)
args = parser.parse_args()
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
if not (args.train or args.evaluate or args.interactive or args.attention):
raise ValueError('You need to be training or evaluating')
if args.enable_testing and not args.evaluate:
raise ValueError('You should evaluate the model if enabling testing')
if args.train:
args_file = args.logdir + '/args.log'
if os.path.exists(args_file):
raise ValueError('Warning: arguments already exist in ' + str(args_file))
with open(args_file, 'w') as infile:
infile.write(str(args))
return args
| 46.53012
| 118
| 0.714267
|
e0a32547fcb15c8068fee150f7654fe501a68512
| 38,465
|
py
|
Python
|
tests/nlu/test_evaluation.py
|
Goxiaoy/rasa
|
93d9983e465f7a426930a746fef1252cbb2f8f35
|
[
"Apache-2.0"
] | null | null | null |
tests/nlu/test_evaluation.py
|
Goxiaoy/rasa
|
93d9983e465f7a426930a746fef1252cbb2f8f35
|
[
"Apache-2.0"
] | null | null | null |
tests/nlu/test_evaluation.py
|
Goxiaoy/rasa
|
93d9983e465f7a426930a746fef1252cbb2f8f35
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import json
import os
import sys
from pathlib import Path
from typing import Text, List, Dict, Any, Set, Optional
from tests.conftest import AsyncMock
import pytest
from _pytest.monkeypatch import MonkeyPatch
from unittest.mock import Mock
import rasa.nlu.test
import rasa.shared.nlu.training_data.loading
import rasa.shared.utils.io
import rasa.utils.io
import rasa.model
from rasa.nlu.classifiers.diet_classifier import DIETClassifier
from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier
from rasa.nlu.components import ComponentBuilder, Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor
from rasa.nlu.extractors.extractor import EntityExtractor
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
from rasa.nlu.model import Interpreter, Trainer
from rasa.core.interpreter import RasaNLUInterpreter
from rasa.nlu.selectors.response_selector import ResponseSelector
from rasa.nlu.test import (
is_token_within_entity,
do_entities_overlap,
merge_labels,
remove_empty_intent_examples,
remove_empty_response_examples,
get_entity_extractors,
remove_pretrained_extractors,
drop_intents_below_freq,
cross_validate,
run_evaluation,
substitute_labels,
IntentEvaluationResult,
EntityEvaluationResult,
ResponseSelectionEvaluationResult,
evaluate_intents,
evaluate_entities,
evaluate_response_selections,
NO_ENTITY,
collect_successful_entity_predictions,
collect_incorrect_entity_predictions,
merge_confidences,
_get_entity_confidences,
is_response_selector_present,
get_eval_data,
does_token_cross_borders,
align_entity_predictions,
determine_intersection,
determine_token_labels,
is_entity_extractor_present,
)
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.shared.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.nlu.constants import (
NO_ENTITY_TAG,
INTENT,
INTENT_RANKING_KEY,
INTENT_NAME_KEY,
PREDICTED_CONFIDENCE_KEY,
)
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.model_testing import compare_nlu_models
from rasa.utils.tensorflow.constants import EPOCHS, ENTITY_RECOGNITION
# https://github.com/pytest-dev/pytest-asyncio/issues/68
# this event_loop is used by pytest-asyncio, and redefining it
# is currently the only way of changing the scope of this fixture
from tests.nlu.utilities import write_file_config
# Chinese Example
# "对面食过敏" -> To be allergic to wheat-based food
CH_wrong_segmentation = [
Token("对面", 0),
Token("食", 2),
Token("过敏", 3), # opposite, food, allergy
]
CH_correct_segmentation = [
Token("对", 0),
Token("面食", 1),
Token("过敏", 3), # towards, wheat-based food, allergy
]
CH_wrong_entity = {"start": 0, "end": 2, "value": "对面", "entity": "direction"}
CH_correct_entity = {"start": 1, "end": 3, "value": "面食", "entity": "food_type"}
# EN example
# "Hey Robot, I would like to eat pizza near Alexanderplatz tonight"
EN_indices = [0, 4, 9, 11, 13, 19, 24, 27, 31, 37, 42, 57]
EN_tokens = [
"Hey",
"Robot",
",",
"I",
"would",
"like",
"to",
"eat",
"pizza",
"near",
"Alexanderplatz",
"tonight",
]
EN_tokens = [Token(t, i) for t, i in zip(EN_tokens, EN_indices)]
EN_targets = [
{"start": 31, "end": 36, "value": "pizza", "entity": "food"},
{"start": 37, "end": 56, "value": "near Alexanderplatz", "entity": "location"},
{"start": 57, "end": 64, "value": "tonight", "entity": "datetime"},
]
EN_predicted = [
{
"start": 4,
"end": 9,
"value": "Robot",
"entity": "person",
"extractor": "EntityExtractorA",
},
{
"start": 31,
"end": 36,
"value": "pizza",
"entity": "food",
"extractor": "EntityExtractorA",
},
{
"start": 42,
"end": 56,
"value": "Alexanderplatz",
"entity": "location",
"extractor": "EntityExtractorA",
},
{
"start": 42,
"end": 64,
"value": "Alexanderplatz tonight",
"entity": "movie",
"extractor": "EntityExtractorB",
},
]
EN_entity_result = EntityEvaluationResult(
EN_targets, EN_predicted, EN_tokens, " ".join([t.text for t in EN_tokens])
)
EN_entity_result_no_tokens = EntityEvaluationResult(EN_targets, EN_predicted, [], "")
def test_token_entity_intersection():
# included
intsec = determine_intersection(CH_correct_segmentation[1], CH_correct_entity)
assert intsec == len(CH_correct_segmentation[1].text)
# completely outside
intsec = determine_intersection(CH_correct_segmentation[2], CH_correct_entity)
assert intsec == 0
# border crossing
intsec = determine_intersection(CH_correct_segmentation[1], CH_wrong_entity)
assert intsec == 1
def test_token_entity_boundaries():
# smaller and included
assert is_token_within_entity(CH_wrong_segmentation[1], CH_correct_entity)
assert not does_token_cross_borders(CH_wrong_segmentation[1], CH_correct_entity)
# exact match
assert is_token_within_entity(CH_correct_segmentation[1], CH_correct_entity)
assert not does_token_cross_borders(CH_correct_segmentation[1], CH_correct_entity)
# completely outside
assert not is_token_within_entity(CH_correct_segmentation[0], CH_correct_entity)
assert not does_token_cross_borders(CH_correct_segmentation[0], CH_correct_entity)
# border crossing
assert not is_token_within_entity(CH_wrong_segmentation[0], CH_correct_entity)
assert does_token_cross_borders(CH_wrong_segmentation[0], CH_correct_entity)
def test_entity_overlap():
assert do_entities_overlap([CH_correct_entity, CH_wrong_entity])
assert not do_entities_overlap(EN_targets)
def test_determine_token_labels_throws_error():
with pytest.raises(ValueError):
determine_token_labels(
CH_correct_segmentation[0],
[CH_correct_entity, CH_wrong_entity],
[CRFEntityExtractor.name],
)
def test_determine_token_labels_no_extractors():
with pytest.raises(ValueError):
determine_token_labels(
CH_correct_segmentation[0], [CH_correct_entity, CH_wrong_entity], None
)
def test_determine_token_labels_no_extractors_no_overlap():
label = determine_token_labels(CH_correct_segmentation[0], EN_targets, None)
assert label == NO_ENTITY_TAG
def test_determine_token_labels_with_extractors():
label = determine_token_labels(
CH_correct_segmentation[0],
[CH_correct_entity, CH_wrong_entity],
[SpacyEntityExtractor.name, MitieEntityExtractor.name],
)
assert label == "direction"
@pytest.mark.parametrize(
"token, entities, extractors, expected_confidence",
[
(
Token("pizza", 4),
[
{
"start": 4,
"end": 9,
"value": "pizza",
"entity": "food",
"extractor": "EntityExtractorA",
}
],
["EntityExtractorA"],
0.0,
),
(Token("pizza", 4), [], ["EntityExtractorA"], 0.0),
(
Token("pizza", 4),
[
{
"start": 4,
"end": 9,
"value": "pizza",
"entity": "food",
"confidence_entity": 0.87,
"extractor": "CRFEntityExtractor",
}
],
["CRFEntityExtractor"],
0.87,
),
(
Token("pizza", 4),
[
{
"start": 4,
"end": 9,
"value": "pizza",
"entity": "food",
"confidence_entity": 0.87,
"extractor": "DIETClassifier",
}
],
["DIETClassifier"],
0.87,
),
],
)
def test_get_entity_confidences(
token: Token,
entities: List[Dict[Text, Any]],
extractors: List[Text],
expected_confidence: float,
):
confidence = _get_entity_confidences(token, entities, extractors)
assert confidence == expected_confidence
def test_label_merging():
import numpy as np
aligned_predictions = [
{
"target_labels": ["O", "O"],
"extractor_labels": {"EntityExtractorA": ["O", "O"]},
},
{
"target_labels": ["LOC", "O", "O"],
"extractor_labels": {"EntityExtractorA": ["O", "O", "O"]},
},
]
assert np.all(merge_labels(aligned_predictions) == ["O", "O", "LOC", "O", "O"])
assert np.all(
merge_labels(aligned_predictions, "EntityExtractorA")
== ["O", "O", "O", "O", "O"]
)
def test_confidence_merging():
import numpy as np
aligned_predictions = [
{
"target_labels": ["O", "O"],
"extractor_labels": {"EntityExtractorA": ["O", "O"]},
"confidences": {"EntityExtractorA": [0.0, 0.0]},
},
{
"target_labels": ["LOC", "O", "O"],
"extractor_labels": {"EntityExtractorA": ["O", "O", "O"]},
"confidences": {"EntityExtractorA": [0.98, 0.0, 0.0]},
},
]
assert np.all(
merge_confidences(aligned_predictions, "EntityExtractorA")
== [0.0, 0.0, 0.98, 0.0, 0.0]
)
def test_drop_intents_below_freq():
td = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa.json"
)
# include some lookup tables and make sure new td has them
td = td.merge(TrainingData(lookup_tables=[{"lookup_table": "lookup_entry"}]))
clean_td = drop_intents_below_freq(td, 0)
assert clean_td.intents == {
"affirm",
"goodbye",
"greet",
"restaurant_search",
"chitchat",
}
clean_td = drop_intents_below_freq(td, 10)
assert clean_td.intents == {"affirm", "restaurant_search"}
assert clean_td.lookup_tables == td.lookup_tables
@pytest.mark.timeout(
300, func_only=True
) # these can take a longer time than the default timeout
def test_run_evaluation(unpacked_trained_moodbot_path: Text, nlu_as_json_path: Text):
result = run_evaluation(
nlu_as_json_path,
os.path.join(unpacked_trained_moodbot_path, "nlu"),
errors=False,
successes=False,
disable_plotting=True,
)
assert result.get("intent_evaluation")
def test_eval_data(
component_builder: ComponentBuilder,
tmp_path: Path,
project: Text,
unpacked_trained_rasa_model: Text,
):
config_path = os.path.join(project, "config.yml")
data_importer = TrainingDataImporter.load_nlu_importer_from_config(
config_path,
training_data_paths=[
"data/examples/rasa/demo-rasa.yml",
"data/examples/rasa/demo-rasa-responses.yml",
],
)
_, nlu_model_directory = rasa.model.get_model_subdirectories(
unpacked_trained_rasa_model
)
interpreter = Interpreter.load(nlu_model_directory, component_builder)
data = data_importer.get_nlu_data()
(intent_results, response_selection_results, entity_results) = get_eval_data(
interpreter, data
)
assert len(intent_results) == 46
assert len(response_selection_results) == 0
assert len(entity_results) == 46
@pytest.mark.timeout(
240, func_only=True
) # these can take a longer time than the default timeout
def test_run_cv_evaluation(
pretrained_embeddings_spacy_config: RasaNLUModelConfig, monkeypatch: MonkeyPatch
):
td = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa.json"
)
nlu_config = RasaNLUModelConfig(
{
"language": "en",
"pipeline": [
{"name": "WhitespaceTokenizer"},
{"name": "CountVectorsFeaturizer"},
{"name": "DIETClassifier", EPOCHS: 2},
],
}
)
# mock training
trainer = Trainer(nlu_config)
trainer.pipeline = remove_pretrained_extractors(trainer.pipeline)
mock = Mock(return_value=Interpreter(trainer.pipeline, None))
monkeypatch.setattr(Trainer, "train", mock)
n_folds = 2
intent_results, entity_results, response_selection_results = cross_validate(
td,
n_folds,
nlu_config,
successes=False,
errors=False,
disable_plotting=True,
report_as_dict=True,
)
assert len(intent_results.train["Accuracy"]) == n_folds
assert len(intent_results.train["Precision"]) == n_folds
assert len(intent_results.train["F1-score"]) == n_folds
assert len(intent_results.test["Accuracy"]) == n_folds
assert len(intent_results.test["Precision"]) == n_folds
assert len(intent_results.test["F1-score"]) == n_folds
assert all(key in intent_results.evaluation for key in ["errors", "report"])
assert any(
isinstance(intent_report, dict)
and intent_report.get("confused_with") is not None
for intent_report in intent_results.evaluation["report"].values()
)
for extractor_evaluation in entity_results.evaluation.values():
assert all(key in extractor_evaluation for key in ["errors", "report"])
def test_run_cv_evaluation_with_response_selector(monkeypatch: MonkeyPatch):
training_data_obj = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa.yml"
)
training_data_responses_obj = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa-responses.yml"
)
training_data_obj = training_data_obj.merge(training_data_responses_obj)
nlu_config = RasaNLUModelConfig(
{
"language": "en",
"pipeline": [
{"name": "WhitespaceTokenizer"},
{"name": "CountVectorsFeaturizer"},
{"name": "DIETClassifier", EPOCHS: 2},
{"name": "ResponseSelector", EPOCHS: 2},
],
}
)
# mock training
trainer = Trainer(nlu_config)
trainer.pipeline = remove_pretrained_extractors(trainer.pipeline)
mock = Mock(return_value=Interpreter(trainer.pipeline, None))
monkeypatch.setattr(Trainer, "train", mock)
n_folds = 2
intent_results, entity_results, response_selection_results = cross_validate(
training_data_obj,
n_folds,
nlu_config,
successes=False,
errors=False,
disable_plotting=True,
report_as_dict=True,
)
assert len(intent_results.train["Accuracy"]) == n_folds
assert len(intent_results.train["Precision"]) == n_folds
assert len(intent_results.train["F1-score"]) == n_folds
assert len(intent_results.test["Accuracy"]) == n_folds
assert len(intent_results.test["Precision"]) == n_folds
assert len(intent_results.test["F1-score"]) == n_folds
assert all(key in intent_results.evaluation for key in ["errors", "report"])
assert any(
isinstance(intent_report, dict)
and intent_report.get("confused_with") is not None
for intent_report in intent_results.evaluation["report"].values()
)
assert len(response_selection_results.train["Accuracy"]) == n_folds
assert len(response_selection_results.train["Precision"]) == n_folds
assert len(response_selection_results.train["F1-score"]) == n_folds
assert len(response_selection_results.test["Accuracy"]) == n_folds
assert len(response_selection_results.test["Precision"]) == n_folds
assert len(response_selection_results.test["F1-score"]) == n_folds
assert all(
key in response_selection_results.evaluation for key in ["errors", "report"]
)
assert any(
isinstance(intent_report, dict)
and intent_report.get("confused_with") is not None
for intent_report in response_selection_results.evaluation["report"].values()
)
assert len(entity_results.train["DIETClassifier"]["Accuracy"]) == n_folds
assert len(entity_results.train["DIETClassifier"]["Precision"]) == n_folds
assert len(entity_results.train["DIETClassifier"]["F1-score"]) == n_folds
assert len(entity_results.test["DIETClassifier"]["Accuracy"]) == n_folds
assert len(entity_results.test["DIETClassifier"]["Precision"]) == n_folds
assert len(entity_results.test["DIETClassifier"]["F1-score"]) == n_folds
for extractor_evaluation in entity_results.evaluation.values():
assert all(key in extractor_evaluation for key in ["errors", "report"])
def test_response_selector_present():
response_selector_component = ResponseSelector()
interpreter_with_response_selector = Interpreter(
[response_selector_component], context=None
)
interpreter_without_response_selector = Interpreter([], context=None)
assert is_response_selector_present(interpreter_with_response_selector)
assert not is_response_selector_present(interpreter_without_response_selector)
def test_intent_evaluation_report(tmp_path: Path):
path = tmp_path / "evaluation"
path.mkdir()
report_folder = str(path / "reports")
report_filename = os.path.join(report_folder, "intent_report.json")
rasa.shared.utils.io.create_directory(report_folder)
intent_results = [
IntentEvaluationResult("", "restaurant_search", "I am hungry", 0.12345),
IntentEvaluationResult("greet", "greet", "hello", 0.98765),
]
result = evaluate_intents(
intent_results,
report_folder,
successes=True,
errors=True,
disable_plotting=False,
)
report = json.loads(rasa.shared.utils.io.read_file(report_filename))
greet_results = {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1,
"confused_with": {},
}
prediction = {
"text": "hello",
"intent": "greet",
"predicted": "greet",
"confidence": 0.98765,
}
assert len(report.keys()) == 4
assert report["greet"] == greet_results
assert result["predictions"][0] == prediction
assert os.path.exists(os.path.join(report_folder, "intent_confusion_matrix.png"))
assert os.path.exists(os.path.join(report_folder, "intent_histogram.png"))
assert not os.path.exists(os.path.join(report_folder, "intent_errors.json"))
assert os.path.exists(os.path.join(report_folder, "intent_successes.json"))
def test_intent_evaluation_report_large(tmp_path: Path):
path = tmp_path / "evaluation"
path.mkdir()
report_folder = path / "reports"
report_filename = report_folder / "intent_report.json"
rasa.shared.utils.io.create_directory(str(report_folder))
def correct(label: Text) -> IntentEvaluationResult:
return IntentEvaluationResult(label, label, "", 1.0)
def incorrect(label: Text, _label: Text) -> IntentEvaluationResult:
return IntentEvaluationResult(label, _label, "", 1.0)
a_results = [correct("A")] * 10
b_results = [correct("B")] * 7 + [incorrect("B", "C")] * 3
c_results = [correct("C")] * 3 + [incorrect("C", "D")] + [incorrect("C", "E")]
d_results = [correct("D")] * 29 + [incorrect("D", "B")] * 3
e_results = [incorrect("E", "C")] * 5 + [incorrect("E", "")] * 5
intent_results = a_results + b_results + c_results + d_results + e_results
evaluate_intents(
intent_results,
str(report_folder),
successes=False,
errors=False,
disable_plotting=True,
)
report = json.loads(rasa.shared.utils.io.read_file(str(report_filename)))
a_results = {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 10,
"confused_with": {},
}
e_results = {
"precision": 0.0,
"recall": 0.0,
"f1-score": 0.0,
"support": 10,
"confused_with": {"C": 5, "": 5},
}
c_confused_with = {"D": 1, "E": 1}
assert len(report.keys()) == 8
assert report["A"] == a_results
assert report["E"] == e_results
assert report["C"]["confused_with"] == c_confused_with
def test_response_evaluation_report(tmp_path: Path):
path = tmp_path / "evaluation"
path.mkdir()
report_folder = str(path / "reports")
report_filename = os.path.join(report_folder, "response_selection_report.json")
rasa.shared.utils.io.create_directory(report_folder)
response_results = [
ResponseSelectionEvaluationResult(
"chitchat/ask_weather",
"chitchat/ask_weather",
"What's the weather",
0.65432,
),
ResponseSelectionEvaluationResult(
"chitchat/ask_name", "chitchat/ask_name", "What's your name?", 0.98765
),
]
result = evaluate_response_selections(
response_results,
report_folder,
successes=True,
errors=True,
disable_plotting=False,
)
report = json.loads(rasa.shared.utils.io.read_file(report_filename))
name_query_results = {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1,
"confused_with": {},
}
prediction = {
"text": "What's your name?",
"intent_response_key_target": "chitchat/ask_name",
"intent_response_key_prediction": "chitchat/ask_name",
"confidence": 0.98765,
}
assert len(report.keys()) == 5
assert report["chitchat/ask_name"] == name_query_results
assert result["predictions"][1] == prediction
assert os.path.exists(
os.path.join(report_folder, "response_selection_confusion_matrix.png")
)
assert os.path.exists(
os.path.join(report_folder, "response_selection_histogram.png")
)
assert not os.path.exists(
os.path.join(report_folder, "response_selection_errors.json")
)
assert os.path.exists(
os.path.join(report_folder, "response_selection_successes.json")
)
@pytest.mark.parametrize(
"components, expected_extractors",
[
([DIETClassifier({ENTITY_RECOGNITION: False})], set()),
([DIETClassifier({ENTITY_RECOGNITION: True})], {"DIETClassifier"}),
([CRFEntityExtractor()], {"CRFEntityExtractor"}),
(
[SpacyEntityExtractor(), CRFEntityExtractor()],
{"SpacyEntityExtractor", "CRFEntityExtractor"},
),
([ResponseSelector()], set()),
],
)
def test_get_entity_extractors(
components: List[Component], expected_extractors: Set[Text]
):
mock_interpreter = Interpreter(components, None)
extractors = get_entity_extractors(mock_interpreter)
assert extractors == expected_extractors
def test_entity_evaluation_report(tmp_path: Path):
class EntityExtractorA(EntityExtractor):
provides = ["entities"]
def __init__(self, component_config=None) -> None:
super().__init__(component_config)
class EntityExtractorB(EntityExtractor):
provides = ["entities"]
def __init__(self, component_config=None) -> None:
super().__init__(component_config)
path = tmp_path / "evaluation"
path.mkdir()
report_folder = str(path / "reports")
report_filename_a = os.path.join(report_folder, "EntityExtractorA_report.json")
report_filename_b = os.path.join(report_folder, "EntityExtractorB_report.json")
rasa.shared.utils.io.create_directory(report_folder)
mock_interpreter = Interpreter(
[
EntityExtractorA({"provides": ["entities"]}),
EntityExtractorB({"provides": ["entities"]}),
],
None,
)
extractors = get_entity_extractors(mock_interpreter)
result = evaluate_entities(
[EN_entity_result],
extractors,
report_folder,
errors=True,
successes=True,
disable_plotting=False,
)
report_a = json.loads(rasa.shared.utils.io.read_file(report_filename_a))
report_b = json.loads(rasa.shared.utils.io.read_file(report_filename_b))
assert len(report_a) == 6
assert report_a["datetime"]["support"] == 1.0
assert report_b["macro avg"]["recall"] == 0.0
assert report_a["macro avg"]["recall"] == 0.5
assert result["EntityExtractorA"]["accuracy"] == 0.75
assert os.path.exists(
os.path.join(report_folder, "EntityExtractorA_confusion_matrix.png")
)
assert os.path.exists(os.path.join(report_folder, "EntityExtractorA_errors.json"))
assert os.path.exists(
os.path.join(report_folder, "EntityExtractorA_successes.json")
)
assert not os.path.exists(
os.path.join(report_folder, "EntityExtractorA_histogram.png")
)
def test_empty_intent_removal():
intent_results = [
IntentEvaluationResult("", "restaurant_search", "I am hungry", 0.12345),
IntentEvaluationResult("greet", "greet", "hello", 0.98765),
]
intent_results = remove_empty_intent_examples(intent_results)
assert len(intent_results) == 1
assert intent_results[0].intent_target == "greet"
assert intent_results[0].intent_prediction == "greet"
assert intent_results[0].confidence == 0.98765
assert intent_results[0].message == "hello"
def test_empty_response_removal():
response_results = [
ResponseSelectionEvaluationResult(None, None, "What's the weather", 0.65432),
ResponseSelectionEvaluationResult(
"chitchat/ask_name", "chitchat/ask_name", "What's your name?", 0.98765
),
]
response_results = remove_empty_response_examples(response_results)
assert len(response_results) == 1
assert response_results[0].intent_response_key_target == "chitchat/ask_name"
assert response_results[0].intent_response_key_prediction == "chitchat/ask_name"
assert response_results[0].confidence == 0.98765
assert response_results[0].message == "What's your name?"
def test_evaluate_entities_cv_empty_tokens():
mock_extractors = ["EntityExtractorA", "EntityExtractorB"]
result = align_entity_predictions(EN_entity_result_no_tokens, mock_extractors)
assert result == {
"target_labels": [],
"extractor_labels": {"EntityExtractorA": [], "EntityExtractorB": []},
"confidences": {"EntityExtractorA": [], "EntityExtractorB": []},
}, "Wrong entity prediction alignment"
def test_evaluate_entities_cv():
mock_extractors = ["EntityExtractorA", "EntityExtractorB"]
result = align_entity_predictions(EN_entity_result, mock_extractors)
assert result == {
"target_labels": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"food",
"location",
"location",
"datetime",
],
"extractor_labels": {
"EntityExtractorA": [
"O",
"person",
"O",
"O",
"O",
"O",
"O",
"O",
"food",
"O",
"location",
"O",
],
"EntityExtractorB": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"movie",
"movie",
],
},
"confidences": {
"EntityExtractorA": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
"EntityExtractorB": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
},
}, "Wrong entity prediction alignment"
def test_remove_pretrained_extractors(component_builder: ComponentBuilder):
_config = RasaNLUModelConfig(
{
"pipeline": [
{"name": "SpacyNLP", "model": "en_core_web_md"},
{"name": "SpacyEntityExtractor"},
{"name": "DucklingEntityExtractor"},
]
}
)
trainer = Trainer(_config, component_builder)
target_components_names = ["SpacyNLP"]
filtered_pipeline = remove_pretrained_extractors(trainer.pipeline)
filtered_components_names = [c.name for c in filtered_pipeline]
assert filtered_components_names == target_components_names
def test_label_replacement():
original_labels = ["O", "location"]
target_labels = ["no_entity", "location"]
assert substitute_labels(original_labels, "O", "no_entity") == target_labels
async def test_nlu_comparison(
tmp_path: Path, monkeypatch: MonkeyPatch, nlu_as_json_path: Text
):
config = {
"language": "en",
"pipeline": [
{"name": "WhitespaceTokenizer"},
{"name": "KeywordIntentClassifier"},
{"name": "RegexEntityExtractor"},
],
}
# the configs need to be at a different path, otherwise the results are
# combined on the same dictionary key and cannot be plotted properly
configs = [write_file_config(config).name, write_file_config(config).name]
# mock training
monkeypatch.setattr(Interpreter, "load", Mock(spec=RasaNLUInterpreter))
monkeypatch.setattr(sys.modules["rasa.nlu"], "train", AsyncMock())
monkeypatch.setattr(
sys.modules["rasa.nlu.test"],
"remove_pretrained_extractors",
Mock(return_value=None),
)
monkeypatch.setattr(
sys.modules["rasa.nlu.test"],
"get_eval_data",
Mock(return_value=(1, None, (None,),)),
)
monkeypatch.setattr(
sys.modules["rasa.nlu.test"],
"evaluate_intents",
Mock(return_value={"f1_score": 1}),
)
output = str(tmp_path)
test_data_importer = TrainingDataImporter.load_from_dict(
training_data_paths=[nlu_as_json_path]
)
test_data = test_data_importer.get_nlu_data()
await compare_nlu_models(
configs, test_data, output, runs=2, exclusion_percentages=[50, 80]
)
assert set(os.listdir(output)) == {
"run_1",
"run_2",
"results.json",
"nlu_model_comparison_graph.pdf",
}
run_1_path = os.path.join(output, "run_1")
assert set(os.listdir(run_1_path)) == {"50%_exclusion", "80%_exclusion", "test.md"}
exclude_50_path = os.path.join(run_1_path, "50%_exclusion")
modelnames = [os.path.splitext(os.path.basename(config))[0] for config in configs]
modeloutputs = set(
["train"]
+ [f"{m}_report" for m in modelnames]
+ [f"{m}.tar.gz" for m in modelnames]
)
assert set(os.listdir(exclude_50_path)) == modeloutputs
@pytest.mark.parametrize(
"entity_results,targets,predictions,successes,errors",
[
(
[
EntityEvaluationResult(
entity_targets=[
{
"start": 17,
"end": 24,
"value": "Italian",
"entity": "cuisine",
}
],
entity_predictions=[
{
"start": 17,
"end": 24,
"value": "Italian",
"entity": "cuisine",
}
],
tokens=[
"I",
"want",
"to",
"book",
"an",
"Italian",
"restaurant",
".",
],
message="I want to book an Italian restaurant.",
),
EntityEvaluationResult(
entity_targets=[
{
"start": 8,
"end": 15,
"value": "Mexican",
"entity": "cuisine",
},
{
"start": 31,
"end": 32,
"value": "4",
"entity": "number_people",
},
],
entity_predictions=[],
tokens=[
"Book",
"an",
"Mexican",
"restaurant",
"for",
"4",
"people",
".",
],
message="Book an Mexican restaurant for 4 people.",
),
],
[
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
"cuisine",
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
"cuisine",
NO_ENTITY,
NO_ENTITY,
"number_people",
NO_ENTITY,
NO_ENTITY,
],
[
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
"cuisine",
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
],
[
{
"text": "I want to book an Italian restaurant.",
"entities": [
{
"start": 17,
"end": 24,
"value": "Italian",
"entity": "cuisine",
}
],
"predicted_entities": [
{
"start": 17,
"end": 24,
"value": "Italian",
"entity": "cuisine",
}
],
}
],
[
{
"text": "Book an Mexican restaurant for 4 people.",
"entities": [
{
"start": 8,
"end": 15,
"value": "Mexican",
"entity": "cuisine",
},
{
"start": 31,
"end": 32,
"value": "4",
"entity": "number_people",
},
],
"predicted_entities": [],
}
],
)
],
)
def test_collect_entity_predictions(
entity_results: List[EntityEvaluationResult],
targets: List[Text],
predictions: List[Text],
successes: List[Dict[Text, Any]],
errors: List[Dict[Text, Any]],
):
actual = collect_successful_entity_predictions(entity_results, targets, predictions)
assert len(successes) == len(actual)
assert successes == actual
actual = collect_incorrect_entity_predictions(entity_results, targets, predictions)
assert len(errors) == len(actual)
assert errors == actual
class ConstantInterpreter(Interpreter):
def __init__(self, prediction_to_return: Dict[Text, Any]) -> None:
# add intent classifier to make sure intents are evaluated
super().__init__([FallbackClassifier()], None)
self.prediction = prediction_to_return
def parse(
self,
text: Text,
time: Optional[datetime.datetime] = None,
only_output_properties: bool = True,
) -> Dict[Text, Any]:
return self.prediction
def test_replacing_fallback_intent():
expected_intent = "greet"
expected_confidence = 0.345
fallback_prediction = {
INTENT: {
INTENT_NAME_KEY: DEFAULT_NLU_FALLBACK_INTENT_NAME,
PREDICTED_CONFIDENCE_KEY: 1,
},
INTENT_RANKING_KEY: [
{
INTENT_NAME_KEY: DEFAULT_NLU_FALLBACK_INTENT_NAME,
PREDICTED_CONFIDENCE_KEY: 1,
},
{
INTENT_NAME_KEY: expected_intent,
PREDICTED_CONFIDENCE_KEY: expected_confidence,
},
{INTENT_NAME_KEY: "some", PREDICTED_CONFIDENCE_KEY: 0.1},
],
}
interpreter = ConstantInterpreter(fallback_prediction)
training_data = TrainingData(
[Message.build("hi", "greet"), Message.build("bye", "bye")]
)
intent_evaluations, _, _ = get_eval_data(interpreter, training_data)
assert all(
prediction.intent_prediction == expected_intent
and prediction.confidence == expected_confidence
for prediction in intent_evaluations
)
@pytest.mark.parametrize(
"components, expected_result",
[([CRFEntityExtractor()], True), ([WhitespaceTokenizer()], False)],
)
def test_is_entity_extractor_present(components, expected_result):
interpreter = Interpreter(components, context=None)
assert is_entity_extractor_present(interpreter) == expected_result
| 31.528689
| 88
| 0.586585
|
1c14b6cf25a43c1c8af9aa94b5243285b1576d68
| 3,707
|
py
|
Python
|
old/gene.cluster.py
|
orionzhou/biolib
|
940fb66f1b2608d34a2d00ebdf41dc84c6381f42
|
[
"BSD-2-Clause"
] | 3
|
2019-02-22T20:35:23.000Z
|
2021-11-25T10:01:50.000Z
|
old/gene.cluster.py
|
orionzhou/biolib
|
940fb66f1b2608d34a2d00ebdf41dc84c6381f42
|
[
"BSD-2-Clause"
] | null | null | null |
old/gene.cluster.py
|
orionzhou/biolib
|
940fb66f1b2608d34a2d00ebdf41dc84c6381f42
|
[
"BSD-2-Clause"
] | 1
|
2021-02-19T03:10:14.000Z
|
2021-02-19T03:10:14.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import math
import os.path as op
import numpy as np
import argparse
orgs = """HM101
HM058 HM125 HM056 HM129 HM060
HM095 HM185 HM034 HM004 HM050
HM023 HM010 HM022 HM324 HM340
HM056.AC HM034.AC HM340.AC""".split()
dirw = "/home/youngn/zhoup/Data/misc2/gene.cluster"
if not op.exists(dirw): os.makedirs(dirw)
def make_blastp_db(orgs):
dirg = os.environ['genome']
diro = op.join(os.environ['data'], "db", "blastp")
if not op.exists(diro): os.makedirs(diro)
for org in orgs:
ff = op.join(dirg, org, "51.fas")
fo = op.join(diro, org)
cmd = "makeblastdb -dbtype prot -in %s -out %s" % (ff, fo)
#print cmd
os.system(cmd)
return
def run_blast(org, fo):
dirg = op.join(os.environ['genome'], org)
ff = op.join(dirg, "51.fas")
fdb = op.join(os.environ['data'], 'db', 'blastp', org)
cmd = "blastp -db %s -query %s -out %s -num_threads %d -evalue 1e-5 -outfmt '7 qseqid qlen sseqid slen length bitscore evalue'" % (fdb, ff, fo, 24)
print cmd
def blast2tbl(fi, fo):
e_min = 1
fhi = open(fi, "r")
for line in fhi:
if line[0] == "#":
continue
line = line.strip("\n")
(qid, qlen, tid, tlen, alen, bit, e) = line.split("\t")
e = float(e)
if e != 0 and e < e_min: e_min = e
fhi.close()
fhi = open(fi, "r")
fho = open(fo, "w")
for line in fhi:
if line[0] == "#":
continue
line = line.strip("\n")
(qid, qlen, tid, tlen, alen, bit, e) = line.split("\t")
(qlen, tlen, alen) = (float(qlen), float(tlen), float(alen))
(e, bit) = (float(e), float(bit))
if e == 0: e = e_min
rlen = alen / min(qlen, tlen)
if qid != tid and rlen >= 0.5 and e < 1e-10:
score = - math.log10(e)
print >>fho, "%s\t%s\t%g" % (qid, tid, score)
fhi.close()
fho.close()
def run_blat(dirw, org):
dirg = os.environ['genome']
diro = op.join(dirw, "01_pro_blat")
if not op.exists(diro): os.makedirs(diro)
ff = op.join(dirg, org, "51.fas")
fo = op.join(diro, org + ".tbl")
cmd = "blat -prot -out=blast8 %s %s %s" % (ff, ff, fo)
print cmd
os.system(cmd)
return
def blat_filter(dirw, org):
diri = op.join(dirw, "01_pro_blat")
diro = op.join(dirw, "02_filtered")
if not op.exists(diro): os.makedirs(diro)
fi = op.join(diri, org + ".tbl")
fo = op.join(diro, org + ".tbl")
fhi = open(fi, "r")
fho = open(fo, "w")
for line in fhi:
line = line.strip("\n")
(qid, tid, idt, alen, mis, gap, qbeg, qend, tbeg, tend, e, bit) = line.split("\t")
(e, bit) = (float(e), float(bit))
if qid != tid and e < 1e-5 and alen:
print >>fho, line
fhi.close()
fho.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='JJJ')
parser.add_argument('--version', action="version", version="%(prog)s 1.0")
args = parser.parse_args()
d01 = op.join(dirw, "01.blast")
d03 = op.join(dirw, "03.mcl.in")
d04 = op.join(dirw, "04.mcl")
d08 = op.join(dirw, "08.grp")
for dirw in [d01, d03, d04, d08]:
if not op.exists(dirw): os.makedirs(dirw)
for org in orgs:
print "working on " + org
f01 = op.join(d01, org + ".tbl")
#run_blast(org, f01)
f03 = op.join(d03, org + ".tbl")
blast2tbl(f01, f03)
f04 = op.join(d04, org + ".mcl")
cmd = "$soft/mcl/bin/mcl %s -te 4 -I 2.0 --abc -o %s" % (f03, f04)
os.system(cmd)
f08 = op.join(d08, org + ".tbl")
| 30.891667
| 151
| 0.541678
|
30619bf96f2b7a67321a63a5b0e9c74356f75de3
| 7,574
|
py
|
Python
|
analysis/delphi_download.py
|
neuroelf/dermodelphi
|
c8dcc52310efcbe8f314f0cd01ffafed2199f56a
|
[
"MIT"
] | null | null | null |
analysis/delphi_download.py
|
neuroelf/dermodelphi
|
c8dcc52310efcbe8f314f0cd01ffafed2199f56a
|
[
"MIT"
] | 1
|
2022-02-18T06:13:10.000Z
|
2022-02-18T06:13:10.000Z
|
analysis/delphi_download.py
|
neuroelf/dermodelphi
|
c8dcc52310efcbe8f314f0cd01ffafed2199f56a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# imports
import csv
import os
import warnings
import bs4
import getpass
import json
import netrc
import pandas
import requests
# backend URL
DELPHI_BACKEND = 'https://delphi.diagnosismapper.com/'
# location to JSON file
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
PACKAGE_DIR = THIS_DIR.rpartition(os.sep)[0]
print(PACKAGE_DIR)
JSON_FILE = os.path.join(PACKAGE_DIR, 'mern-app', 'src', 'json', 'dm_diagnoses.json')
if not os.path.exists(JSON_FILE):
raise RuntimeError('Necessary JSON file not found.')
with open(JSON_FILE, 'r') as json_file:
DM_BASIS = json.loads(json_file.read())
# get list of all children (cnodes) in original basis
original_anodes = dict()
original_bnodes = dict()
original_bfull = dict()
original_cnodes = dict()
for a in DM_BASIS['children']:
original_anodes[a['id']] = a['name']
for b in a['children']:
original_bnodes[b['id']] = b['name']
original_bfull[b['id']] = a['name'] + ' - ' + b['name']
for c in b['children']:
c['levelA'] = a['name']
c['levelB'] = b['name']
c['levelC'] = c['name']
original_cnodes[c['id']] = c
original_cnodes_list = sorted(list(original_cnodes.keys()))
original_cnodes_set = set(original_cnodes_list)
# requests
def endpoint(e, p=None):
try:
r = requests.get(DELPHI_BACKEND + e, params=p)
except:
raise
try:
return r.json()
except:
pass
try:
return bs4.BeautifulSoup(r.text, features='lxml').get_text().strip()
except:
return r.text
def check_user(u, s):
return endpoint('checkEmail/' + u + '/' + s)
def get_sessions(u, t):
return endpoint('admin/' + u + '/token/' + t + '/sessions')
def get_session_blocks(s):
return endpoint('session/' + s + '/blocks')
# append row to rows
# csv_rows = [['sessionId', 'levelCNodeId', 'levelA', 'levelB', 'levelC',
# 'correct', 'corrected', 'newName', 'correctSpelling', 'combineWith',
# 'reassignCategory', 'editMods', 'newMods', 'editSyns', 'newSyns',
# 'deleteTerm', 'deleteMods', 'deleteSyns', 'otherCorrection']]
def append_row(rows, n, s, sid):
is_correct = 'TRUE' if s['correct'] else 'FALSE'
if s['correction'] == 'CORRECTION_NONE':
correction = ''
else:
correction = s['correction'].replace('CORRECTION_', '')
if s['corrcombine'] == 0:
combine = ''
elif s['corrcombine'] in original_cnodes_set:
combine = original_cnodes[s['corrcombine']]['name']
else:
combine = str(s['corrcombine'])
if s['corrmoveto'] == 0:
moveto = ''
elif s['corrmoveto'] in original_bfull:
moveto = original_bfull[s['corrmoveto']]
else:
moveto = str(s['corrmoveto'])
if correction == 'DELETE':
delete_term = 'TRUE'
else:
delete_term = 'FALSE'
if correction in ['DELBOTH', 'DELMODS']:
delete_mods = 'TRUE'
else:
delete_mods = 'FALSE'
if correction in ['DELBOTH', 'DELSYNS']:
delete_syns = 'TRUE'
else:
delete_syns = 'FALSE'
rows.append([sid, n['id'], n['levelA'], n['levelB'], n['levelC'], is_correct,
correction, s['corrnewname'], s['corrspelling'], combine, moveto,
s['correditmods'], s['corrnewmods'], s['correditsyns'], s['corrnewsyns'],
delete_term, delete_mods, delete_syns, s['corrother']])
# main function
def main():
# username in netrc?
netrc_o = netrc.netrc()
hostname = DELPHI_BACKEND[8:-1]
netrc_tokens = netrc_o.authenticators(hostname)
if not netrc_tokens is None:
username = netrc_tokens[0]
session_id = netrc_tokens[2]
else:
username = input('Username for ' + DELPHI_BACKEND + ': ')
session_id = getpass.getpass('Session ID for ' + username + ': ')
try:
check = check_user(username, session_id)
if not '_id' in check:
raise RuntimeError('Invalid username or Session ID.')
except Exception as e:
warnings.warn(str(e))
return
# check admin token, and get sessions
token = getpass.getpass('Admin token for ' + username + ': ')
sessions = dict()
new_as = dict()
new_bs = dict()
new_cs = dict()
try:
s = get_sessions(session_id, token)
if not isinstance(s, list):
raise RuntimeError('Invalid admin token for ' + username + '.')
for so in s:
sid = so['sessionId']
new_as[sid] = dict()
new_bs[sid] = dict()
new_cs[sid] = dict()
sessions[sid] = so
for a in so['newAs']:
new_as[sid][a['id']] = a['name']
for b in so['newBs']:
new_bs[sid][b['id']] = b['name']
for c in so['newCs']:
new_cs[sid][c['id']] = c['name']
except Exception as e:
warnings.warn(str(e))
return
# header
csv_rows = [['sessionId', 'levelCNodeId', 'levelA', 'levelB', 'levelC',
'correct', 'corrSelection', 'newName', 'correctSpelling', 'combineWith',
'reassignCategory', 'editMods', 'newMods', 'editSyns', 'newSyns',
'deleteTerm', 'deleteMods', 'deleteSyns', 'otherCorrection']]
# grab all blocks for all sessions
session_blocks = dict()
cnodes = dict()
new_cnodes = dict()
for s in sessions.keys():
blocks = get_session_blocks(s)
session_blocks[s] = blocks
cnodes[s] = dict()
new_cnodes[s] = dict()
for b in blocks:
block = b['block']
for (cid, state) in block.items():
if isinstance(cid, str) and cid == 'locked':
continue
cid = int(cid)
cnodes[s][cid] = state
if not cid in original_cnodes_set:
bid = cid // 10000
aid = bid // 100
if aid in original_anodes:
aname = original_anodes[aid]
else:
aname = new_as[s][aid]
if bid in original_bnodes:
bname = original_bnodes[bid]
else:
bname = new_bs[s][bid]
cname = new_cs[s][cid]
new_cnodes[s][cid] = {
'id': cid,
'levelA': aname,
'levelB': bname,
'levelC': cname,
}
for c in original_cnodes_list:
if c in cnodes[s]:
state = cnodes[s][c]
else:
state = {
'correct': False,
'correction': 'CORRECTION_NONE',
'corrcombine': 0,
'correditmods': '',
'correditsyns': '',
'corrmoveto': 0,
'corrnewmods': '',
'corrnewname': '',
'corrnewsyns': '',
'corrother': '',
'corrspelling': '',
'byuser': False,
}
cnode = original_cnodes[c]
append_row(csv_rows, cnode, state, s)
for (c, cnode) in new_cnodes[s].items():
append_row(csv_rows, cnode, cnodes[s][c], s)
with open(THIS_DIR + os.sep + "analysis.csv", 'w', newline='') as csv_file:
cw = csv.writer(csv_file, delimiter=',')
for row in csv_rows:
cw.writerow(row)
if __name__ == '__main__':
main()
| 33.219298
| 85
| 0.537893
|
1942b2520de2623e26424852255d25e71cd1fb45
| 523
|
py
|
Python
|
source/GUI/panelThree.py
|
gwiedeman/eadmachine
|
f6c0c0f92fc20ab6dcf4962fda827b7adb4749d4
|
[
"Unlicense"
] | 5
|
2016-01-25T15:27:12.000Z
|
2021-08-17T22:31:48.000Z
|
source/GUI/panelThree.py
|
gwiedeman/eadmachine
|
f6c0c0f92fc20ab6dcf4962fda827b7adb4749d4
|
[
"Unlicense"
] | null | null | null |
source/GUI/panelThree.py
|
gwiedeman/eadmachine
|
f6c0c0f92fc20ab6dcf4962fda827b7adb4749d4
|
[
"Unlicense"
] | null | null | null |
import wx
class TabPanel(wx.Panel):
"""
This will be the first notebook tab
"""
#----------------------------------------------------------------------
def __init__(self, parent):
""""""
wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
titleImage = wx.StaticBitmap(self, pos=(110,0), size=(360,100))
titleImage.SetBitmap(wx.Bitmap('images/logotitle.gif'))
instructtxt = wx.StaticText(self, id=-1, pos=(130,100), label="Batch Migrate EAD2002 to EAD3", name="")
| 34.866667
| 112
| 0.539197
|
b4e6bda10fd1a7196c8e7e2aef32645cc84a68ec
| 4,045
|
py
|
Python
|
utils/prioritize/az300_to_bed.py
|
kokyriakidis/cloudbiolinux
|
a318ecbade2b27e23c275601571b1b19c8842d7a
|
[
"MIT"
] | 122
|
2015-01-04T13:23:27.000Z
|
2022-01-18T22:52:12.000Z
|
utils/prioritize/az300_to_bed.py
|
kokyriakidis/cloudbiolinux
|
a318ecbade2b27e23c275601571b1b19c8842d7a
|
[
"MIT"
] | 170
|
2015-02-09T18:03:49.000Z
|
2021-11-14T02:32:09.000Z
|
utils/prioritize/az300_to_bed.py
|
kokyriakidis/cloudbiolinux
|
a318ecbade2b27e23c275601571b1b19c8842d7a
|
[
"MIT"
] | 107
|
2015-01-06T06:10:04.000Z
|
2022-02-10T17:25:34.000Z
|
#!/usr/bin/env python
"""Map AZ300 gene name list to transcript regions.
This requires a 3 pass approach to get gene names to coordinates:
- it first tries against ensemble BED coordinates in a transcript file with
the supplied name
- it then remaps the name with gene.info to alternative symbols and tries to
find those in the transcript file.
- Finally, it uses coordinates from gene.info if those exist.
"""
import os
import sys
import requests
ref_dir = "/human"
in_file = sys.argv[1]
def read_targets(in_file):
targets = set([])
with open(in_file) as in_handle:
for line in in_handle:
cur_symbol = (line.strip())
targets.add(cur_symbol)
return targets
def get_gene_info(cur_symbol):
chroms = [str(x) for x in range(1, 23)] + ["X", "Y"]
fields = "symbol,genomic_pos_hg19"
url = "http://mygene.info/v2/query?q=%s&species=human&fields=%s" % (cur_symbol, fields)
info = requests.get(url).json()
hits = info["hits"]
symbols = [x["symbol"] for x in hits]
pos = []
for ps in [x["genomic_pos_hg19"] for x in hits if "genomic_pos_hg19" in x]:
if not isinstance(ps, (list, tuple)):
ps = [ps]
for p in ps:
if p["chr"] in chroms:
pos.append(p)
return symbols, pos
def find_missing_targets(missing, in_file, genome):
missing_file = "%s-%s-missingsymbols.txt" % (os.path.splitext(in_file)[0], genome)
out = set([])
if os.path.exists(missing_file):
with open(missing_file) as in_handle:
for line in in_handle:
out.add(line.strip())
else:
with open(missing_file, "w") as out_handle:
for cur_symbol in missing:
symbols, pos = get_gene_info(cur_symbol)
if cur_symbol not in symbols and len(symbols) > 0:
cur_symbol = symbols[0]
out_handle.write("%s\n" % cur_symbol)
out.add(cur_symbol)
return out
def write_from_transcript_file(targets, ref_dir, genome, out_handle):
ref_file = os.path.join(ref_dir, genome, "rnaseq", "ref-transcripts.bed")
found = set([])
with open(ref_file) as in_handle:
for line in in_handle:
name = line.split()[3]
if name in targets:
found.add(name)
out_handle.write(line)
return targets - found
def write_from_remap_names(targets, ref_dir, genome, out_handle, in_file):
ref_file = os.path.join(ref_dir, genome, "rnaseq", "ref-transcripts.bed")
targets = find_missing_targets(targets, in_file, genome)
found = set([])
with open(ref_file) as in_handle:
for line in in_handle:
name = line.split()[3]
if name in targets:
found.add(name)
out_handle.write(line)
return targets - found
def write_from_gene_info(targets, genome, out_handle):
missing = []
for target in sorted(targets):
symbols, pos = get_gene_info(target)
if pos:
assert isinstance(pos, (list, tuple))
if symbols:
target = symbols[0]
for p in pos:
chrom = "%s%s" % ("chr" if genome == "hg19" else "", p["chr"])
out_handle.write("\t".join([chrom, str(p["start"]), str(p["end"]), target]) + "\n")
else:
missing.append(target)
return missing
for genome in ["hg19", "GRCh37", "hg38"]:
out_file = "%s-%s.bed" % (os.path.splitext(in_file)[0], genome)
with open(out_file, "w") as out_handle:
targets = read_targets(in_file)
print "total", len(targets)
targets = write_from_transcript_file(targets, ref_dir, genome, out_handle)
print("after first name pass", len(targets))
targets = write_from_remap_names(targets, ref_dir, genome, out_handle, in_file)
print("after rename name pass", len(targets))
targets = write_from_gene_info(targets, genome, out_handle)
print("after coordinate retrieval", targets)
| 36.772727
| 99
| 0.614586
|
65450bbda9c4d9643aee9e790dd3a9fdb3e4f563
| 937
|
py
|
Python
|
test/test_rerun_zos_parameters.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | 2
|
2020-03-20T18:24:23.000Z
|
2021-03-05T22:05:04.000Z
|
test/test_rerun_zos_parameters.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | null | null | null |
test/test_rerun_zos_parameters.py
|
dcompane/controlm_py
|
c521208be2f00303383bb32ca5eb2b7ff91999d3
|
[
"MIT"
] | 1
|
2021-05-27T15:54:37.000Z
|
2021-05-27T15:54:37.000Z
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_py
from controlm_py.models.rerun_zos_parameters import RerunZosParameters # noqa: E501
from controlm_py.rest import ApiException
class TestRerunZosParameters(unittest.TestCase):
"""RerunZosParameters unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRerunZosParameters(self):
"""Test RerunZosParameters"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_py.models.rerun_zos_parameters.RerunZosParameters() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.425
| 92
| 0.717182
|
f8ce388d5462683f462ab0e8f2aa128181a7e5b3
| 1,522
|
py
|
Python
|
profiles_api/serializers.py
|
Algernonagon/rest-api
|
35fd73fb8f4524c8f9082f21707491411acf3e50
|
[
"MIT"
] | null | null | null |
profiles_api/serializers.py
|
Algernonagon/rest-api
|
35fd73fb8f4524c8f9082f21707491411acf3e50
|
[
"MIT"
] | 7
|
2020-06-06T01:54:36.000Z
|
2022-02-10T10:30:34.000Z
|
profiles_api/serializers.py
|
Algernonagon/rest-api
|
35fd73fb8f4524c8f9082f21707491411acf3e50
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type':'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
"""Handle updating user account"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {'user_profile':{'read_only':True}}
| 30.44
| 69
| 0.610381
|
44f6a91fa13949d563a870cd5ef247e7869f85bb
| 2,234
|
py
|
Python
|
db/food_database/models.py
|
IBPA/FoodAtlas
|
0a431f0a391adaa8984b380f3f6f7189f27b9311
|
[
"Apache-2.0"
] | 1
|
2022-02-07T10:04:35.000Z
|
2022-02-07T10:04:35.000Z
|
db/food_database/models.py
|
IBPA/FoodAtlas
|
0a431f0a391adaa8984b380f3f6f7189f27b9311
|
[
"Apache-2.0"
] | null | null | null |
db/food_database/models.py
|
IBPA/FoodAtlas
|
0a431f0a391adaa8984b380f3f6f7189f27b9311
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class Food(models.Model):
foodon_id = models.CharField(max_length=100)
foodb_id = models.CharField(max_length=100)
name = models.CharField(max_length=100)
synonyms = models.CharField(max_length=100)
# Create your models here.
class Chemical(models.Model):
foodb_id = models.CharField(max_length=100)
chebi_id = models.CharField(max_length=100)
name = models.CharField(max_length=100)
synonyms = models.CharField(max_length=100)
class FoodPart(models.Model):
name = models.CharField(max_length=100)
plant_ontology_id = models.CharField(max_length=100)
class Units(models.Model):
name = models.CharField(max_length=100)
# units_ontology_id
class Article(models.Model):
pmid = models.TextField()
doi = models.TextField()
class FoodChemicalRelationshipEvidence(models.Model):
text = models.TextField() # premise
article = models.ForeignKey(Article, on_delete=models.DO_NOTHING)
class FoodChemicalRelationship(models.Model):
food = models.ForeignKey(Food, on_delete=models.DO_NOTHING)
chemical = models.ForeignKey(Chemical, on_delete=models.DO_NOTHING)
# food_part = models.ForeignKey(FoodPart, on_delete=models.DO_NOTHING)
concentration = models.FloatField(null=True)
# units = models.ForeignKey(Units, on_delete=models.DO_NOTHING)
# evidences = models.ManyToManyField(FoodChemicalRelationshipEvidence)
# label = (
# models.TextField()
# ) # human annotated label, entailed or not entailed
# prediction = (
# models.FloatField()
# ) # model predicted probability of entailment
# class FoodChemicalRelationshipPrediction(models.Model):
# proba_entails = models.FloatField()
# relationship = models.ForeignKey(FoodChemicalRelationship, on_delete=models.DO_NOTHING)
# evidence = models.ForeignKey(FoodChemicalRelationshipEvidence, on_delete=models.DO_NOTHING)
# class FoodChemicalRelationshipAnnotation(models.Model):
# label = models.CharField(max_length=100)
# relationship = models.ForeignKey(FoodChemicalRelationship, on_delete=models.DO_NOTHING)
# evidence = models.ForeignKey(FoodChemicalRelationshipEvidence, on_delete=models.DO_NOTHING)
| 33.848485
| 97
| 0.753805
|
46009a30ac9f914f751741fa83ea82c1a0a6bb7f
| 20,858
|
py
|
Python
|
tensorflow/python/layers/core_test.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 3
|
2018-09-25T00:35:34.000Z
|
2018-09-25T00:38:06.000Z
|
tensorflow/python/layers/core_test.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 1
|
2019-08-22T20:29:33.000Z
|
2019-12-19T10:16:21.000Z
|
tensorflow/python/layers/core_test.py
|
Zwysilence/tensorflow
|
b55001be83da044bb21d539d433dec6231eaec55
|
[
"Apache-2.0"
] | 6
|
2018-12-20T01:35:20.000Z
|
2020-07-10T17:29:57.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
def testVariableInput(self):
with self.cached_session():
v = variable_scope.get_variable(
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
x = core_layers.Dense(1)(v)
variables.global_variables_initializer().run()
self.assertAllEqual(x.eval(), [[0.0]])
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 2], outputs.get_shape().as_list())
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNoEagerLeak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
core_layers.Dense(5)(inputs)
core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')(inputs)
@test_util.run_in_graph_and_eager_modes
def testCallTensorDot(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 4, 3), seed=1)
outputs = dense(inputs)
self.assertListEqual([5, 4, 2], outputs.get_shape().as_list())
@test_util.run_in_graph_and_eager_modes
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
@test_util.run_in_graph_and_eager_modes
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
if not context.executing_eagerly():
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
@test_util.run_in_graph_and_eager_modes
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
@test_util.run_in_graph_and_eager_modes
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
if not context.executing_eagerly():
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testFunctionalDense(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(outputs.op.name, 'my_dense/Relu')
def testFunctionalDenseTwice(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = _get_variable_dict_from_varstore().values()
core_layers.dense(inputs, 2)
vars2 = _get_variable_dict_from_varstore().values()
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuse(self):
with self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2, name='my_dense', reuse=True)
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
# TODO(alive): get this to work in eager mode.
def testFunctionalDenseTwiceReuseFromScope(self):
with self.cached_session():
with variable_scope.variable_scope('scope'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
with variable_scope.variable_scope('scope', reuse=True):
core_layers.dense(inputs, 2, name='my_dense')
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
def testFunctionalDenseInitializerFromScope(self):
with variable_scope.variable_scope(
'scope',
initializer=init_ops.ones_initializer()), self.cached_session():
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
variables.global_variables_initializer().run()
weights = _get_variable_dict_from_varstore()
self.assertEqual(len(weights), 2)
# Check that the matrix weights got initialized to ones (from scope).
self.assertAllClose(weights['scope/dense/kernel'].read_value().eval(),
np.ones((3, 2)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights['scope/dense/bias'].read_value().eval(),
np.zeros((2)))
def testEagerExecution(self):
with context.eager_mode():
container = variable_scope.EagerVariableStore()
x = constant_op.constant([[2.0]])
with container.as_default():
y = core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertAllEqual(y, [[2.0]])
self.assertEqual(len(container.variables()), 2)
# Recreate the layer to test reuse.
with container.as_default():
core_layers.dense(
x, 1, name='my_dense',
kernel_initializer=init_ops.ones_initializer())
self.assertEqual(len(container.variables()), 2)
def testFunctionalDenseWithCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.assertEqual(called[0], 2)
def testFunctionalDenseInScope(self):
with self.cached_session():
with variable_scope.variable_scope('test'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
var_dict = _get_variable_dict_from_varstore()
var_key = 'test/my_dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test1') as scope:
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name=scope)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test1/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
with variable_scope.variable_scope('test2'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
var_dict = _get_variable_dict_from_varstore()
var_key = 'test2/dense/kernel'
self.assertEqual(var_dict[var_key].name, '%s:0' % var_key)
@test_util.run_in_graph_and_eager_modes
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
ts = tensor_shape.TensorShape
# pylint: disable=protected-access
with self.assertRaises(ValueError):
dense.compute_output_shape(ts(None))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([]))
with self.assertRaises(ValueError):
dense.compute_output_shape(ts([1]))
self.assertEqual(
[None, 2],
dense.compute_output_shape((None, 3)).as_list())
self.assertEqual(
[None, 2],
dense.compute_output_shape(ts([None, 3])).as_list())
self.assertEqual(
[None, 4, 2],
dense.compute_output_shape(ts([None, 4, 3])).as_list())
# pylint: enable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConstraints(self):
k_constraint = lambda x: x / math_ops.reduce_sum(x)
b_constraint = lambda x: x / math_ops.reduce_max(x)
dense = core_layers.Dense(2,
kernel_constraint=k_constraint,
bias_constraint=b_constraint)
inputs = random_ops.random_uniform((5, 3), seed=1)
dense(inputs)
self.assertEqual(dense.kernel_constraint, k_constraint)
self.assertEqual(dense.bias_constraint, b_constraint)
def _get_variable_dict_from_varstore():
var_dict = variable_scope._get_default_variable_store()._vars # pylint: disable=protected-access
sorted_var_dict = collections.OrderedDict(
sorted(var_dict.items(), key=lambda t: t[0]))
return sorted_var_dict
class DropoutTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name='dropout')
self.assertEqual(dp.rate, 0.5)
self.assertEqual(dp.noise_shape, None)
dp.apply(array_ops.ones(()))
self.assertEqual(dp.name, 'dropout')
@test_util.run_in_graph_and_eager_modes
def testBooleanLearningPhase(self):
dp = core_layers.Dropout(0.5)
inputs = array_ops.ones((5, 3))
dropped = dp.apply(inputs, training=True)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = dp.apply(inputs, training=False)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 3)), np_output)
def testDynamicLearningPhase(self):
with self.cached_session() as sess:
dp = core_layers.Dropout(0.5, seed=1)
inputs = array_ops.ones((5, 5))
training = array_ops.placeholder(dtype='bool')
dropped = dp.apply(inputs, training=training)
self.evaluate(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
@test_util.run_in_graph_and_eager_modes
def testDynamicNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [None, 1, None]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testCustomNoiseShape(self):
inputs = array_ops.ones((5, 3, 2))
noise_shape = [5, 1, 2]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
self.evaluate(variables.global_variables_initializer())
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testFunctionalDropout(self):
with self.cached_session():
inputs = array_ops.ones((5, 5))
dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1)
variables.global_variables_initializer().run()
np_output = self.evaluate(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1)
np_output = self.evaluate(dropped)
self.assertAllClose(np.ones((5, 5)), np_output)
def testDynamicRate(self):
with self.cached_session() as sess:
rate = array_ops.placeholder(dtype='float32', name='rate')
dp = core_layers.Dropout(rate, name='dropout')
inputs = array_ops.ones((5, 5))
dropped = dp.apply(inputs, training=True)
sess.run(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={rate: 0.5})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={rate: 0.0})
self.assertAllClose(np.ones((5, 5)), np_output)
class FlattenTest(test.TestCase):
def testCreateFlatten(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((3, 2, 3))})
self.assertEqual(list(np_output.shape), [3, 6])
self.assertEqual(y.get_shape().as_list(), [None, 6])
x = array_ops.placeholder(shape=(1, 2, 3, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((1, 2, 3, 2))})
self.assertEqual(list(np_output.shape), [1, 12])
self.assertEqual(y.get_shape().as_list(), [1, 12])
def testComputeShape(self):
shape = core_layers.Flatten().compute_output_shape((1, 2, 3, 2))
self.assertEqual(shape.as_list(), [1, 12])
shape = core_layers.Flatten().compute_output_shape((None, 3, 2))
self.assertEqual(shape.as_list(), [None, 6])
shape = core_layers.Flatten().compute_output_shape((None, 3, None))
self.assertEqual(shape.as_list(), [None, None])
def testFunctionalFlatten(self):
x = array_ops.placeholder(shape=(None, 2, 3), dtype='float32')
y = core_layers.flatten(x, name='flatten')
self.assertEqual(y.get_shape().as_list(), [None, 6])
def testFlattenValueError(self):
x = array_ops.placeholder(shape=(None,), dtype='float32')
with self.assertRaises(ValueError):
core_layers.Flatten()(x)
def testFlattenUnknownAxes(self):
with self.cached_session() as sess:
x = array_ops.placeholder(shape=(5, None, None), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 2, 3))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
x = array_ops.placeholder(shape=(5, None, 2), dtype='float32')
y = core_layers.Flatten()(x)
np_output = sess.run(y, feed_dict={x: np.zeros((5, 3, 2))})
self.assertEqual(list(np_output.shape), [5, 6])
self.assertEqual(y.get_shape().as_list(), [5, None])
if __name__ == '__main__':
test.main()
| 41.384921
| 99
| 0.699204
|
3f59e3431fca5b35c2016d9e0390026de6cdce3f
| 1,418
|
py
|
Python
|
cgi-bin/change_password.py
|
bengood362/csci4140-asg1
|
caa8aa61b2f6daefe4cde69a6b2d8fa23b2d4b1f
|
[
"ImageMagick"
] | null | null | null |
cgi-bin/change_password.py
|
bengood362/csci4140-asg1
|
caa8aa61b2f6daefe4cde69a6b2d8fa23b2d4b1f
|
[
"ImageMagick"
] | null | null | null |
cgi-bin/change_password.py
|
bengood362/csci4140-asg1
|
caa8aa61b2f6daefe4cde69a6b2d8fa23b2d4b1f
|
[
"ImageMagick"
] | null | null | null |
#!/usr/bin/python
# try_change_password.cgi
import cgi
import utils
def htmlTop():
print("""Content-type:text/html\n\n
<!DOCTYPE html>
<html lang='en'>
<head>
<meta charset='utf-8'/>
<title>Change password</title>
</head>
<body>""")
def htmlMid(username, message=''):
print('''<h1>Change password</h1>
<form action="try_change_password.py" method="post" id="change">
<input type="hidden" name="username" value="{0}"/>
original password: <input type="password" name="password_o" required><br>
new password: <input type="password" name="password_n" required><br>
retype new password: <input type="password" name="password_n2" required><br><br>
<button type="submit" form="change" value="Change password">Change password</button>
</form>
<form action="login_index.py" method="post">
<input type="submit" value="Discard change"/>
</form>'''.format(username))
if message != '':
print "<br>"+message
def htmlTail():
print('''</body>
</html>''')
if __name__ == '__main__':
try:
formData = cgi.FieldStorage()
message = formData.getvalue('message','')
username = formData.getvalue('username','ERROR')
htmlTop()
htmlMid(username,message)
htmlTail()
except:
cgi.print_exception()
| 31.511111
| 92
| 0.588152
|
bea9bb6dffa02beecabaa2133c9ad2cc6f03bf36
| 20,637
|
py
|
Python
|
tensorflow/python/training/warm_starting_util.py
|
hsm207/tensorflow
|
8ab4678ba216c3ec8fa32f417cb667b056689939
|
[
"Apache-2.0"
] | 4
|
2021-06-15T17:26:07.000Z
|
2021-11-17T10:58:08.000Z
|
tensorflow/python/training/warm_starting_util.py
|
hsm207/tensorflow
|
8ab4678ba216c3ec8fa32f417cb667b056689939
|
[
"Apache-2.0"
] | 4
|
2020-09-26T00:55:50.000Z
|
2022-02-10T01:53:06.000Z
|
tensorflow/python/training/warm_starting_util.py
|
hsm207/tensorflow
|
8ab4678ba216c3ec8fa32f417cb667b056689939
|
[
"Apache-2.0"
] | 6
|
2018-12-20T01:35:20.000Z
|
2020-07-10T17:29:57.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to warm-start TF.Learn Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_ops
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.VocabInfo")
class VocabInfo(
collections.namedtuple("VocabInfo", [
"new_vocab",
"new_vocab_size",
"num_oov_buckets",
"old_vocab",
"old_vocab_size",
"backup_initializer",
"axis",
])):
"""Vocabulary information for warm-starting.
See `tf.estimator.WarmStartSettings` for examples of using
VocabInfo to warm-start.
Attributes:
new_vocab: [Required] A path to the new vocabulary file (used with the
model to be trained).
new_vocab_size: [Required] An integer indicating how many entries of the new
vocabulary will used in training.
num_oov_buckets: [Required] An integer indicating how many OOV buckets are
associated with the vocabulary.
old_vocab: [Required] A path to the old vocabulary file (used with the
checkpoint to be warm-started from).
old_vocab_size: [Optional] An integer indicating how many entries of the old
vocabulary were used in the creation of the checkpoint. If not provided,
the entire old vocabulary will be used.
backup_initializer: [Optional] A variable initializer used for variables
corresponding to new vocabulary entries and OOV. If not provided, these
entries will be zero-initialized.
axis: [Optional] Denotes what axis the vocabulary corresponds to. The
default, 0, corresponds to the most common use case (embeddings or
linear weights for binary classification / regression). An axis of 1
could be used for warm-starting output layers with class vocabularies.
For example:
embeddings_vocab_info = tf.VocabInfo(
new_vocab='embeddings_vocab',
new_vocab_size=100,
num_oov_buckets=1,
old_vocab='pretrained_embeddings_vocab',
old_vocab_size=10000,
backup_initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=(1 / math.sqrt(embedding_dim))),
axis=0)
softmax_output_layer_kernel_vocab_info = tf.VocabInfo(
new_vocab='class_vocab',
new_vocab_size=5,
num_oov_buckets=0, # No OOV for classes.
old_vocab='old_class_vocab',
old_vocab_size=8,
backup_initializer=tf.glorot_uniform_initializer(),
axis=1)
softmax_output_layer_bias_vocab_info = tf.VocabInfo(
new_vocab='class_vocab',
new_vocab_size=5,
num_oov_buckets=0, # No OOV for classes.
old_vocab='old_class_vocab',
old_vocab_size=8,
backup_initializer=tf.zeros_initializer(),
axis=0)
Currently, only axis=0 and axis=1 are supported.
"""
def __new__(cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size=-1,
backup_initializer=None,
axis=0):
if axis != 0 and axis != 1:
raise ValueError("The only supported values for the axis argument are 0 "
"and 1. Provided axis: {}".format(axis))
return super(VocabInfo, cls).__new__(
cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size,
backup_initializer,
axis,
)
def _infer_var_name(var):
"""Returns name of the `var`.
Args:
var: A list. The list can contain either of the following:
(i) A single `Variable`
(ii) A single `ResourceVariable`
(iii) Multiple `Variable` objects which must be slices of the same larger
variable.
(iv) A single `PartitionedVariable`
Returns:
Name of the `var`
"""
name_to_var_dict = saver.BaseSaverBuilder.OpListToDict(var)
if len(name_to_var_dict) > 1:
raise TypeError("`var` = %s passed as arg violates the constraints. "
"name_to_var_dict = %s" % (var, name_to_var_dict))
return list(name_to_var_dict.keys())[0]
def _warm_start_var(var, prev_ckpt, prev_tensor_name=None):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
"""
if checkpoint_utils._is_variable(var): # pylint: disable=protected-access
current_var_name = _infer_var_name([var])
elif (isinstance(var, list) and
all(checkpoint_utils._is_variable(v) for v in var)): # pylint: disable=protected-access
current_var_name = _infer_var_name(var)
elif isinstance(var, variables_lib.PartitionedVariable):
current_var_name = _infer_var_name([var])
var = var._get_variable_list() # pylint: disable=protected-access
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = current_var_name
checkpoint_utils.init_from_checkpoint(prev_ckpt, {prev_tensor_name: var})
# pylint: disable=protected-access
# Accesses protected members of tf.Variable to reset the variable's internal
# state.
def _warm_start_var_with_vocab(var,
current_vocab_path,
current_vocab_size,
prev_ckpt,
prev_vocab_path,
previous_vocab_size=-1,
current_oov_buckets=0,
prev_tensor_name=None,
initializer=None,
axis=0):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Use this method when the `var` is backed by vocabulary. This method stitches
the given `var` such that values corresponding to individual features in the
vocabulary remain consistent irrespective of changing order of the features
between old and new vocabularies.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
current_vocab_path: Path to the vocab file used for the given `var`.
current_vocab_size: An `int` specifying the number of entries in the current
vocab.
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.
previous_vocab_size: If provided, will constrain previous vocab to the first
`previous_vocab_size` entries. -1 means use the entire previous vocab.
current_oov_buckets: An `int` specifying the number of out-of-vocabulary
buckets used for given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
initializer: Variable initializer to be used for missing entries. If None,
missing entries will be zero-initialized.
axis: Axis of the variable that the provided vocabulary corresponds to.
Raises:
ValueError: If required args are not provided.
"""
if not (current_vocab_path and current_vocab_size and prev_ckpt and
prev_vocab_path):
raise ValueError("Invalid args: Must provide all of [current_vocab_path, "
"current_vocab_size, prev_ckpt, prev_vocab_path}.")
if checkpoint_utils._is_variable(var):
var = [var]
elif (isinstance(var, list) and
all(checkpoint_utils._is_variable(v) for v in var)):
var = var
elif isinstance(var, variables_lib.PartitionedVariable):
var = var._get_variable_list()
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = _infer_var_name(var)
# TODO(eddz): Fix functionality for rank-1 Variables (like FC biases).
total_v_first_axis = sum([v.get_shape().as_list()[0] for v in var])
for v in var:
v_shape = v.get_shape().as_list()
slice_info = v._get_save_slice_info()
partition_info = None
if slice_info:
partition_info = variable_scope._PartitionInfo(
full_shape=slice_info.full_shape,
var_offset=slice_info.var_offset)
if axis == 0:
new_row_vocab_size = current_vocab_size
new_col_vocab_size = v_shape[1]
old_row_vocab_size = previous_vocab_size
old_row_vocab_file = prev_vocab_path
new_row_vocab_file = current_vocab_path
old_col_vocab_file = None
new_col_vocab_file = None
num_row_oov_buckets = current_oov_buckets
num_col_oov_buckets = 0
elif axis == 1:
# Note that we must compute this value across all partitions, whereas
# in the axis = 0 case, we can simply use v_shape[1] because we don't
# allow partitioning across axis = 1.
new_row_vocab_size = total_v_first_axis
new_col_vocab_size = current_vocab_size
old_row_vocab_size = -1
old_row_vocab_file = None
new_row_vocab_file = None
old_col_vocab_file = prev_vocab_path
new_col_vocab_file = current_vocab_path
num_row_oov_buckets = 0
num_col_oov_buckets = current_oov_buckets
else:
raise ValueError("The only supported values for the axis argument are 0 "
"and 1. Provided axis: {}".format(axis))
init = checkpoint_ops._load_and_remap_matrix_initializer(
ckpt_path=checkpoint_utils._get_checkpoint_filename(prev_ckpt),
old_tensor_name=prev_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_size=old_row_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer)
new_init_val = ops.convert_to_tensor(
init(shape=v_shape, partition_info=partition_info))
v._initializer_op = state_ops.assign(v, new_init_val)
# pylint: enable=protected-access
def _get_grouped_variables(vars_to_warm_start):
"""Collects and groups (possibly partitioned) variables into a dictionary.
The variables can be provided explicitly through vars_to_warm_start, or they
are retrieved from collections (see below).
Args:
vars_to_warm_start: One of the following:
- A regular expression (string) that captures which variables to
warm-start (see tf.get_collection). This expression will only consider
variables in the TRAINABLE_VARIABLES collection.
- A list of Variables to warm-start.
- A list of strings, each representing a full variable name to warm-start.
- `None`, in which case only variables specified in
`var_name_to_vocab_info` will be warm-started.
Returns:
A dictionary mapping variable names (strings) to lists of Variables.
Raises:
ValueError: If vars_to_warm_start is not a string, `None`, a list of
`Variables`, or a list of strings.
"""
if isinstance(vars_to_warm_start, str) or vars_to_warm_start is None:
# Both vars_to_warm_start = '.*' and vars_to_warm_start = None will match
# everything (in TRAINABLE_VARIABLES) here.
list_of_vars = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES,
scope=vars_to_warm_start)
elif isinstance(vars_to_warm_start, list):
if all([isinstance(v, str) for v in vars_to_warm_start]):
list_of_vars = []
for v in vars_to_warm_start:
list_of_vars += ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=v)
elif all([checkpoint_utils._is_variable(v) for v in vars_to_warm_start]): # pylint: disable=protected-access
list_of_vars = vars_to_warm_start
else:
raise ValueError("If `vars_to_warm_start` is a list, it must be all "
"`Variable` or all `str`. Given types are {}".format(
[type(v) for v in vars_to_warm_start]))
else:
raise ValueError("`vars_to_warm_start must be a `list` or `str`. Given "
"type is {}".format(type(vars_to_warm_start)))
# We have to deal with partitioned variables, since get_collection flattens
# out the list.
grouped_variables = {}
for v in list_of_vars:
if not isinstance(v, list):
var_name = _infer_var_name([v])
else:
var_name = _infer_var_name(v)
grouped_variables.setdefault(var_name, []).append(v)
return grouped_variables
@tf_export("train.warm_start")
def warm_start(ckpt_to_initialize_from,
vars_to_warm_start=".*",
var_name_to_vocab_info=None,
var_name_to_prev_var_name=None):
"""Warm-starts a model using the given settings.
If you are using a tf.estimator.Estimator, this will automatically be called
during training.
Args:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
vars_to_warm_start: [Optional] One of the following:
- A regular expression (string) that captures which variables to
warm-start (see tf.get_collection). This expression will only consider
variables in the TRAINABLE_VARIABLES collection.
- A list of Variables to warm-start.
- A list of strings, each representing a full variable name to warm-start.
- `None`, in which case only variables specified in
`var_name_to_vocab_info` will be warm-started.
Defaults to `'.*'`, which warm-starts all variables in the
TRAINABLE_VARIABLES collection. Note that this excludes variables such as
accumulators and moving statistics from batch norm.
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
VocabInfo. The variable names should be "full" variables, not the names
of the partitions. If not explicitly provided, the variable is assumed to
have no vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
between previous checkpoint and current model.
Raises:
ValueError: If the WarmStartSettings contains prev_var_name or VocabInfo
configuration for variable names that are not used. This is to ensure
a stronger check for variable configuration than relying on users to
examine the logs.
"""
if var_name_to_vocab_info is None:
var_name_to_vocab_info = {}
if var_name_to_prev_var_name is None:
var_name_to_prev_var_name = {}
logging.info("Warm-starting from: %s", (ckpt_to_initialize_from,))
grouped_variables = _get_grouped_variables(vars_to_warm_start)
# Keep track of which var_names in var_name_to_prev_var_name and
# var_name_to_vocab_info have been used. Err on the safer side by throwing an
# exception if any are unused by the end of the loop. It is easy to misname
# a variable during this configuration, in which case without this check, we
# would fail to warm-start silently.
prev_var_name_used = set()
vocab_info_used = set()
for var_name, variable in six.iteritems(grouped_variables):
prev_var_name = var_name_to_prev_var_name.get(var_name)
if prev_var_name:
prev_var_name_used.add(var_name)
vocab_info = var_name_to_vocab_info.get(var_name)
if vocab_info:
vocab_info_used.add(var_name)
logging.info(
"Warm-starting variable: {}; current_vocab: {} current_vocab_size: {}"
" prev_vocab: {} prev_vocab_size: {} current_oov: {} prev_tensor: {}"
" initializer: {}".format(
var_name,
vocab_info.new_vocab,
vocab_info.new_vocab_size,
vocab_info.old_vocab,
(vocab_info.old_vocab_size if vocab_info.old_vocab_size > 0
else "All"),
vocab_info.num_oov_buckets,
prev_var_name or "Unchanged",
vocab_info.backup_initializer or "zero-initialized"))
_warm_start_var_with_vocab(
variable,
current_vocab_path=vocab_info.new_vocab,
current_vocab_size=vocab_info.new_vocab_size,
prev_ckpt=ckpt_to_initialize_from,
prev_vocab_path=vocab_info.old_vocab,
previous_vocab_size=vocab_info.old_vocab_size,
current_oov_buckets=vocab_info.num_oov_buckets,
prev_tensor_name=prev_var_name,
initializer=vocab_info.backup_initializer,
axis=vocab_info.axis)
else:
# For the special value of vars_to_warm_start = None,
# we only warm-start variables with explicitly specified vocabularies.
if vars_to_warm_start:
logging.info("Warm-starting variable: {}; prev_var_name: {}".format(
var_name, prev_var_name or "Unchanged"))
# Because we use a default empty list in grouped_variables, single
# unpartitioned variables will be lists here, which we rectify in order
# for init_from_checkpoint logic to work correctly.
if len(variable) == 1:
variable = variable[0]
_warm_start_var(variable, ckpt_to_initialize_from, prev_var_name)
prev_var_name_not_used = set(
var_name_to_prev_var_name.keys()) - prev_var_name_used
vocab_info_not_used = set(var_name_to_vocab_info.keys()) - vocab_info_used
if prev_var_name_not_used:
raise ValueError(
"You provided the following variables in "
"var_name_to_prev_var_name that were not used: "
"{0}. Perhaps you misspelled them? Here is the list of viable "
"variable names: {1}".format(prev_var_name_not_used,
grouped_variables.keys()))
if vocab_info_not_used:
raise ValueError(
"You provided the following variables in "
"var_name_to_vocab_info that were not used: {0}. "
" Perhaps you misspelled them? Here is the list of viable variable "
"names: {1}".format(vocab_info_not_used, grouped_variables.keys()))
| 42.815353
| 113
| 0.693172
|
d8e15a25f908c0b8924d4756aac5f9aefd910a0d
| 2,722
|
py
|
Python
|
prescience_client/commands/delete_command.py
|
ovh/prescience-client
|
a447d19f11c6739c4a4828b6072e80b39dc4cf13
|
[
"BSD-3-Clause"
] | 15
|
2019-01-16T14:16:00.000Z
|
2021-12-08T09:09:24.000Z
|
prescience_client/commands/delete_command.py
|
ovh/prescience-client
|
a447d19f11c6739c4a4828b6072e80b39dc4cf13
|
[
"BSD-3-Clause"
] | 4
|
2019-03-28T15:12:46.000Z
|
2019-11-20T10:47:37.000Z
|
prescience_client/commands/delete_command.py
|
ovh/prescience-client
|
a447d19f11c6739c4a4828b6072e80b39dc4cf13
|
[
"BSD-3-Clause"
] | 1
|
2019-01-25T07:22:12.000Z
|
2019-01-25T07:22:12.000Z
|
from prescience_client.commands import prompt_for_source_id_if_needed, prompt_for_dataset_id_if_needed, \
prompt_for_model_id_if_needed
from prescience_client.commands.command import Command
class DeleteCommand(Command):
def __init__(self, prescience_client):
super().__init__(
name='delete',
help_message='Delete a prescience object',
prescience_client=prescience_client,
sub_commands=[
DeleteSourceCommand(prescience_client),
DeleteDatasetCommand(prescience_client),
DeleteModelCommand(prescience_client)
]
)
class DeleteSourceCommand(Command):
def __init__(self, prescience_client):
super().__init__(
name='source',
help_message='Delete a prescience source object',
prescience_client=prescience_client,
sub_commands=[]
)
def init_from_subparser(self, subparsers, selector):
super().init_from_subparser(subparsers, selector)
self.cmd_parser.add_argument('id', nargs='?', type=str, help='Identifier of the source object to delete')
def exec(self, args: dict):
source_id = prompt_for_source_id_if_needed(args, self.prescience_client)
self.prescience_client.delete_source(source_id=source_id)
class DeleteDatasetCommand(Command):
def __init__(self, prescience_client):
super().__init__(
name='dataset',
help_message='Delete a prescience dataset object',
prescience_client=prescience_client,
sub_commands=[]
)
def init_from_subparser(self, subparsers, selector):
super().init_from_subparser(subparsers, selector)
self.cmd_parser.add_argument('id', nargs='?', type=str, help='Identifier of the dataset object to delete')
def exec(self, args: dict):
dataset_id = prompt_for_dataset_id_if_needed(args, self.prescience_client)
self.prescience_client.delete_dataset(dataset_id=dataset_id)
class DeleteModelCommand(Command):
def __init__(self, prescience_client):
super().__init__(
name='model',
help_message='Delete a prescience model object',
prescience_client=prescience_client,
sub_commands=[]
)
def init_from_subparser(self, subparsers, selector):
super().init_from_subparser(subparsers, selector)
self.cmd_parser.add_argument('id', nargs='?', type=str, help='Identifier of the model object to delete')
def exec(self, args: dict):
model_id = prompt_for_model_id_if_needed(args, self.prescience_client)
self.prescience_client.delete_model(model_id=model_id)
| 38.885714
| 114
| 0.684056
|
12259ae863fbeb3aa008eaca07b40386b6cf457b
| 301
|
py
|
Python
|
data/multilingual/Mong.KHK/Mono_8/pdf_to_json_test_Mong.KHK_Mono_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Mong.KHK/Mono_8/pdf_to_json_test_Mong.KHK_Mono_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Mong.KHK/Mono_8/pdf_to_json_test_Mong.KHK_Mono_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Mong.KHK/Mono_8/udhr_Mong.KHK_Mono_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.1
| 71
| 0.810631
|
113201340c81a6e21229078ae0ba458c3747ab04
| 5,941
|
py
|
Python
|
kinow_client/models/product_attribute_create_request_1.py
|
kinow-io/kinow-python-sdk
|
4c1699a3c78048b84287bd049a669651a5b4e2d5
|
[
"Apache-2.0"
] | 1
|
2019-06-26T14:24:54.000Z
|
2019-06-26T14:24:54.000Z
|
kinow_client/models/product_attribute_create_request_1.py
|
kinow-io/kinow-python-sdk
|
4c1699a3c78048b84287bd049a669651a5b4e2d5
|
[
"Apache-2.0"
] | null | null | null |
kinow_client/models/product_attribute_create_request_1.py
|
kinow-io/kinow-python-sdk
|
4c1699a3c78048b84287bd049a669651a5b4e2d5
|
[
"Apache-2.0"
] | 1
|
2018-02-01T10:08:40.000Z
|
2018-02-01T10:08:40.000Z
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.58
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ProductAttributeCreateRequest1(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id_customer=None, fingerprint=None, type=None, os=None, application=None):
"""
ProductAttributeCreateRequest1 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id_customer': 'int',
'fingerprint': 'str',
'type': 'str',
'os': 'str',
'application': 'str'
}
self.attribute_map = {
'id_customer': 'id_customer',
'fingerprint': 'fingerprint',
'type': 'type',
'os': 'os',
'application': 'application'
}
self._id_customer = id_customer
self._fingerprint = fingerprint
self._type = type
self._os = os
self._application = application
@property
def id_customer(self):
"""
Gets the id_customer of this ProductAttributeCreateRequest1.
Customer ID to attach this Device
:return: The id_customer of this ProductAttributeCreateRequest1.
:rtype: int
"""
return self._id_customer
@id_customer.setter
def id_customer(self, id_customer):
"""
Sets the id_customer of this ProductAttributeCreateRequest1.
Customer ID to attach this Device
:param id_customer: The id_customer of this ProductAttributeCreateRequest1.
:type: int
"""
self._id_customer = id_customer
@property
def fingerprint(self):
"""
Gets the fingerprint of this ProductAttributeCreateRequest1.
Uniq fingerprint to identify Device
:return: The fingerprint of this ProductAttributeCreateRequest1.
:rtype: str
"""
return self._fingerprint
@fingerprint.setter
def fingerprint(self, fingerprint):
"""
Sets the fingerprint of this ProductAttributeCreateRequest1.
Uniq fingerprint to identify Device
:param fingerprint: The fingerprint of this ProductAttributeCreateRequest1.
:type: str
"""
self._fingerprint = fingerprint
@property
def type(self):
"""
Gets the type of this ProductAttributeCreateRequest1.
Device type (eg. Desktop, mobile, STB)
:return: The type of this ProductAttributeCreateRequest1.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ProductAttributeCreateRequest1.
Device type (eg. Desktop, mobile, STB)
:param type: The type of this ProductAttributeCreateRequest1.
:type: str
"""
self._type = type
@property
def os(self):
"""
Gets the os of this ProductAttributeCreateRequest1.
Device OS (eg. Windows 10, iOS, Android)
:return: The os of this ProductAttributeCreateRequest1.
:rtype: str
"""
return self._os
@os.setter
def os(self, os):
"""
Sets the os of this ProductAttributeCreateRequest1.
Device OS (eg. Windows 10, iOS, Android)
:param os: The os of this ProductAttributeCreateRequest1.
:type: str
"""
self._os = os
@property
def application(self):
"""
Gets the application of this ProductAttributeCreateRequest1.
Device application (eg. Chrome, Firefox)
:return: The application of this ProductAttributeCreateRequest1.
:rtype: str
"""
return self._application
@application.setter
def application(self, application):
"""
Sets the application of this ProductAttributeCreateRequest1.
Device application (eg. Chrome, Firefox)
:param application: The application of this ProductAttributeCreateRequest1.
:type: str
"""
self._application = application
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.127854
| 97
| 0.575492
|
377a6f04f9656f158c0b2fa5f71e372fef0e4dd0
| 2,459
|
py
|
Python
|
src/python/marbles/ie/ccg/utils.py
|
marbles-ai/ie
|
b3fef462d3418580c827c94bc206bd2991500c1f
|
[
"MIT"
] | null | null | null |
src/python/marbles/ie/ccg/utils.py
|
marbles-ai/ie
|
b3fef462d3418580c827c94bc206bd2991500c1f
|
[
"MIT"
] | null | null | null |
src/python/marbles/ie/ccg/utils.py
|
marbles-ai/ie
|
b3fef462d3418580c827c94bc206bd2991500c1f
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, print_function
from marbles import future_string
from marbles.ie.ccg import Category, CAT_CONJ
## @ingroup gfn
def extract_predarg_categories_from_pt(pt, lst=None):
"""Extract the predicate-argument categories from a CCG parse tree.
Args:
pt: The parse tree returned from marbles.ie.ccg.parse_ccg_derivation2().
lst: An optional list of existing predicate categories.
Returns:
A list of Category instances.
"""
global _PredArgIdx
if future_string != unicode:
pt = pt_to_utf8(pt)
if lst is None:
lst = []
stk = [pt]
while len(stk) != 0:
pt = stk.pop()
if pt[-1] == 'T':
stk.extend(pt[1:-1])
else:
# Leaf nodes contains six fields:
# <L CCGcat mod_POS-tag orig_POS-tag word PredArgCat>
# PredArgCat example: (S[dcl]\NP_3)/(S[pt]_4\NP_3:B)_4>
catkey = Category(pt[0])
# Ignore atoms and conj rules.
if not catkey.isfunctor or catkey.result_category() == CAT_CONJ or catkey.argument_category() == CAT_CONJ:
continue
predarg = Category(pt[4])
assert catkey == predarg.clean(True)
lst.append(predarg)
return lst
## @ingroup gfn
def pt_to_utf8(pt, force=False):
"""Convert a parse tree to utf-8. The conversion is done in-place.
Args:
pt: The parse tree returned from marbles.ie.drt.parse.parse_ccg_derivation().
Returns:
A utf-8 parse tree
"""
if force or isinstance(pt[0][0], unicode): # isinstance(pt[-1], unicode)
# Convert to utf-8
stk = [pt]
while len(stk) != 0:
lst = stk.pop()
for i in range(len(lst)):
x = lst[i]
if isinstance(x, list):
stk.append(x)
elif isinstance(x, unicode):
lst[i] = x.encode('utf-8')
return pt
## @ingroup gfn
def sentence_from_pt(pt):
"""Get the sentence from a CCG parse tree.
Args:
pt: The parse tree returned from marbles.ie.drt.parse.parse_ccg_derivation().
Returns:
A string
"""
s = []
stk = [pt]
while len(stk) != 0:
pt = stk.pop()
if pt[-1] == 'T':
stk.extend(reversed(pt[1:-1]))
else:
s.append(pt[1])
return ' '.join(s).replace(' ,', ',').replace(' .', '.')
| 28.929412
| 118
| 0.56161
|
3c5845fe92dd67afd72ab53a73bac76035171c92
| 35,208
|
py
|
Python
|
cengal/code_flow_control/chained_flow/versions/v_1/chained_flow.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | 3
|
2018-07-23T18:48:58.000Z
|
2021-07-18T14:17:20.000Z
|
cengal/code_flow_control/chained_flow/versions/v_1/chained_flow.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | null | null | null |
cengal/code_flow_control/chained_flow/versions/v_1/chained_flow.py
|
FI-Mihej/Cengal
|
516b9780da6ccc9168f8f89d7ba13dc29e24bc0b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Copyright © 2016 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import copy
import traceback
from enum import Enum
from contextlib import contextmanager
import cengal.IDGenerator as IDGenerator
from cengal.code_flow_control.smart_values.versions.v_1.smart_values import *
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = "ButenkoMS <gtalk@butenkoms.space>"
__copyright__ = "Copyright © 2016 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>"
__credits__ = ["ButenkoMS <gtalk@butenkoms.space>", ]
__license__ = "Apache License, Version 2.0"
__version__ = "1.0.0"
__maintainer__ = "ButenkoMS <gtalk@butenkoms.space>"
__email__ = "gtalk@butenkoms.space"
# __status__ = "Prototype"
__status__ = "Development"
# __status__ = "Production"
class ChainLinkFailed(Exception): pass
class ChainClosed(Exception):
"""
link considers it ordinary external exception
"""
def __init__(self, chain, link_id, link_info):
super(ChainClosed, self).__init__('Content Holder is closed. '
'Content Holder ID: {}; '
'Content Holder Info: {}; '
'Link ID: {}; '
'Link Info {}'.format(str(chain._chain_id),
str(chain._chain_info),
str(link_id),
str(link_info)))
self.chain = chain
self.link_id = link_id
self.link_info = link_info
class ChainFunctionParameterNeeded(Exception): pass
class CriteriaType(Enum):
# class CriteriaType(): # much more efficient than Enum inheritance
needed = 0 # only set of this links is needed (should be already successfully done)
optional = 1 # all links are needed except of this set of links
any = 2 # any result will fit criteria
forbidden = 3 # set of this links should be already failed
not_successful = 4 # set of this links should not be successfully done (also may not start) at the check time
class IgnoreLinkResultCriteriaType(Enum):
do_not_ignore = 0
ignore_if_failed = 1
ignore_if_successful = 2
class ChainHistoryExport(Exception):
def __init__(self, history, process_error_result=True):
super(ChainHistoryExport, self).__init__('')
self.history = history
self.process_error_result = process_error_result
class ChainInternalResult:
def __init__(self, type_id, str_data, data):
self.type_id = type_id
self.str_data = str_data
self.data = data
def __str__(self):
return self.str_data
class ChainInternalResultType(Enum):
# class CriteriaType(): # much more efficient than Enum inheritance
built_in_exception__chain_link_failed = 0
built_in_exception__bad_history_import = 1
external_exception = 2
link_did_not_returned_an_answer = 3
class Chain:
def __init__(self, chain_id=None, chain_info=None, global_link_results_criteria=None,
raise_exceptions=False, save_debug_trace=False, closeable=True):
"""
:param chain_id:
:param chain_info:
:param global_link_results_criteria: will be set to ValueType(CriteriaType.optional, set()) if None;
in this case all links are required.
:param raise_exceptions:
:param save_debug_trace:
:param closeable:
:return:
"""
# Use only ValueType(CriteriaType.optional, ...) or ValueType(CriteriaType.needed, set()).
# Other will be ignored here.
# You may use global_link_results_criteria=ValueType(CriteriaType.optional, set()) to create criteria
# "no fails in any link"
self._chain_id = chain_id
self._chain_info = chain_info
self._internal_links_index = IDGenerator.IDGenerator()
self._reserve_link_id_generator = IDGenerator.IDGenerator(IDGenerator.TypeOfGenerator.GUID_STRING)
self._criteria_list = list()
global_link_results_criteria = global_link_results_criteria or ValueType(CriteriaType.optional, set())
if global_link_results_criteria is not None:
self._criteria_list.append(global_link_results_criteria)
self._raise_exceptions = raise_exceptions
self._save_debug_trace = save_debug_trace
self._closeable = closeable
self._full_history = list()
self._links_library = dict()
self._all_made_links = set()
self._good_links = set()
self._bad_links = set()
self._current_link_id = None
self._current_link_info = None
self._current_link_result = None
self._closed = False
self._bool_result = ValueCache()
# def _push_criteria(self, set_of_needed_links=None, set_of_optional_links=None):
def _push_criteria(self, link_results_criteria):
# Do not use!
self._bool_result()
self._criteria_list.append(link_results_criteria)
def _pop_criteria(self):
# Do not use!
# May raise exception if len(self.criteria_list)==0, but this is OK.
self._bool_result()
return self._criteria_list.pop()
def read_criteria(self):
criteria = None
if self._criteria_list:
criteria = self._criteria_list[-1]
return criteria
def _push_link_info(self, link_id, link_info=None):
self._current_link_id = link_id
self._current_link_info = link_info
self._current_link_result = None
def _pop_link_info(self):
self._current_link_id = None
self._current_link_info = None
self._current_link_result = None
def push_result(self, bool_result, info_or_data=None):
self._current_link_result = (bool_result, info_or_data)
def push_result_c(self, result):
# "class" version: to use when result = ValueExistence()
self._current_link_result = (result.existence, result.result)
def read_link_result_link(self, link_id):
# result is NOT protected from changing!
original_result_data = self._links_library[link_id][3]
result = ValueExistence(original_result_data[0], original_result_data[1])
# result = self._links_library[link_id][3]
return result
def read_link_result_copy(self, link_id):
original_result_data = self._links_library[link_id][3]
result = ValueExistence(original_result_data[0], copy.copy(original_result_data[1]))
return result
def read_link_result_deepcopy(self, link_id):
original_result_data = self._links_library[link_id][3]
result = ValueExistence(original_result_data[0], copy.deepcopy(original_result_data[1]))
return result
def _save_link_result(self, ignore_link_result_criteria=None):
# ignore_link_result_criteria = ignore_link_result_criteria or IgnoreLinkResultCriteriaType.do_not_ignore
if ((IgnoreLinkResultCriteriaType.ignore_if_failed == ignore_link_result_criteria) and
(not self._current_link_result[0])) \
or ((IgnoreLinkResultCriteriaType.ignore_if_successful == ignore_link_result_criteria) and
self._current_link_result[0]):
return
self._bool_result()
import_depth = 0
full_link_info = (self._internal_links_index.get_new_ID(), self._current_link_id, self._current_link_info,
self._current_link_result, import_depth)
self._full_history.append(full_link_info)
self._links_library[self._current_link_id] = full_link_info
self._all_made_links.add(self._current_link_id)
if self._current_link_result[0]:
self._good_links.add(self._current_link_id)
else:
self._bad_links.add(self._current_link_id)
def __bool__(self):
if self._bool_result:
return self._bool_result.get()
else:
current_criteria = self.read_criteria()
result = True
if CriteriaType.needed == current_criteria:
if len(current_criteria.result) != len(current_criteria.result & self._good_links):
result = False
elif CriteriaType.optional == current_criteria:
if len(self._bad_links - current_criteria.result) != 0:
result = False
elif CriteriaType.any == current_criteria:
result = True
elif CriteriaType.forbidden == current_criteria:
if len(current_criteria.result) != len(current_criteria.result & self._bad_links):
result = False
elif CriteriaType.not_successful == current_criteria:
if len(current_criteria.result & self._good_links) != 0:
result = False
self._bool_result.set(result)
return result
def __nonzero__(self):
return self.__bool__()
@staticmethod
def _link_list_to_str(link_list):
links_str = ',\n'.join('(index({}), depth({}), ID({}), INFO({}), RESULT({}))'.format(str(another_link[0]),
str(another_link[4]),
str(another_link[1]),
str(another_link[2]),
'({}, {})'.format(
str(another_link[3][0]),
str(another_link[3][1])))
for another_link in link_list)
return links_str
def _link_str_to_chain_str(self, links_str):
full_string = '{{{{CONTEXT_HOLDER_ID({}): CONTEXT_HOLDER_INFO({})}}:[\n{}\n]}}'.format(
self._chain_id, self._chain_info, links_str)
return full_string
def get_bad_links(self):
result = list()
for another_link in self._full_history:
if not another_link[3][0]:
result.append(another_link)
return result
def get_bad_links_str(self):
bad_links = self.get_bad_links()
full_history_str = self._link_list_to_str(bad_links)
full_string = self._link_str_to_chain_str(full_history_str)
return full_string
def raise_bad_links(self):
raise ChainHistoryExport(self.get_bad_links())
def raise_full_history(self):
raise ChainHistoryExport(self._full_history)
def process_history_import(self, his_ex):
history = his_ex.history
for another_link in history:
full_link_info = (self._internal_links_index.get_new_ID(), another_link[1], another_link[2],
another_link[3], another_link[4] + 1)
self._full_history.append(full_link_info)
def close(self):
self._closed = True
def _reopen(self):
self._closed = False
def __str__(self):
full_history_str = self._link_list_to_str(self._full_history)
full_string = self._link_str_to_chain_str(full_history_str)
return full_string
def __call__(self, *args, **kwargs):
return link(self, *args, **kwargs)
def chain(self, *args, **kwargs):
return self.__call__(*args, **kwargs)
@contextmanager
def link(chain, link_id, link_info=None, link_results_criteria=None, ignore_link_result_criteria=None):
if link_id is None:
link_id = (chain._reserve_link_id_generator.counter,
chain._reserve_link_id_generator.get_new_ID())
if chain._closeable and chain._closed:
raise ChainClosed(chain, link_id, link_info)
chain._push_link_info(link_id, link_info)
if link_results_criteria is not None:
chain._push_criteria(link_results_criteria)
need_to_save_link_result = True
try:
yield chain
except ChainLinkFailed as exc:
result_info = None
if exc.args:
result_info = exc.args[0]
chain.push_result(False, ChainInternalResult(
ChainInternalResultType.built_in_exception__chain_link_failed,
'CHAIN INTERNAL RESULT. BUILT-IN EXCEPTION: ChainLinkFailed ({})'.format(result_info), result_info))
except ChainHistoryExport as export:
chain.process_history_import(export)
if export.process_error_result:
chain.push_result(False, ChainInternalResult(
ChainInternalResultType.built_in_exception__bad_history_import,
'CHAIN INTERNAL RESULT. BUILT-IN EXCEPTION: BAD HISTORY IMPORT EXCEPTION', None))
except:
exc = sys.exc_info()
exc_type, exc_obj, exc_tb = exc
tb_full_file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
tb_line_number = exc_tb.tb_lineno
tb_function_name = str()
tb_text = str()
tb_list = traceback.extract_tb(exc_tb, 2)
if len(tb_list) >= 2:
actual_tb = tb_list[1]
tb_full_file_name, tb_line_number, tb_function_name, tb_text = actual_tb
exception = exc
error_str = '{} {}'.format(str(exception[0]), str(exception[1].args[0]))
# print('+++', error_str)
formatted_traceback = traceback.format_exception(exception[0], exception[1], exception[2])
exception = exception[:2] + (formatted_traceback,)
trace_str = ''.join(exception[2])
if chain._save_debug_trace:
result_string = 'CHAIN INTERNAL RESULT. CODE EXCEPTION "{}" AT "{}":{} in {} WITH TRACE: \n' \
'{}\n' \
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' \
''.format(error_str, tb_full_file_name, tb_line_number, tb_function_name, trace_str)
else:
result_string = 'CHAIN INTERNAL RESULT. CODE EXCEPTION "{}" AT "{}":{} in {}'.format(
error_str, tb_full_file_name, tb_line_number, tb_function_name)
chain.push_result(False, ChainInternalResult(
ChainInternalResultType.external_exception, result_string, exc))
# print(result_string)
# _chain_reader_runner__chain.push_result(False, sys.exc_info()[1])
if chain._raise_exceptions:
need_to_save_link_result = False
chain._save_link_result()
chain.raise_bad_links()
# raise
else:
if chain._current_link_result is None:
# _chain_reader_runner__chain.push_result(True)
chain.push_result(False, ChainInternalResult(
ChainInternalResultType.link_did_not_returned_an_answer,
'CHAIN INTERNAL RESULT. Link DID NOT RETURN RESULT', None))
finally:
if need_to_save_link_result:
chain._save_link_result(ignore_link_result_criteria)
if link_results_criteria is not None:
chain._pop_criteria()
chain._pop_link_info()
def link__function(target_function):
"""
Parameters: chain__chain= (required)
, chain__link_id= (required)
, chain__link_info= (optional)
, chain__link_results_criteria= (optional)
, chain__ignore_link_result_criteria= (optional).
Parameters passed to the target_function: chain__chain (after local link configuration).
:param target_function: function
:return:
"""
def new_target_function(*args, **kwargs):
chain = None
if 'chain__chain' in kwargs:
chain = kwargs['chain__chain']
del kwargs['chain__chain']
else:
raise ChainFunctionParameterNeeded('chain__chain')
link_id = None
if 'chain__link_id' in kwargs:
link_id = kwargs['chain__link_id']
del kwargs['chain__link_id']
else:
raise ChainFunctionParameterNeeded('chain__link_id')
link_info = None
if 'chain__link_info' in kwargs:
link_info = kwargs['chain__link_info']
del kwargs['chain__link_info']
link_results_criteria = None
if 'chain__link_results_criteria' in kwargs:
link_results_criteria = kwargs['chain__link_results_criteria']
del kwargs['chain__link_results_criteria']
ignore_link_result_criteria = None
if 'chain__ignore_link_result_criteria' in kwargs:
ignore_link_result_criteria = kwargs['chain__ignore_link_result_criteria']
del kwargs['chain__ignore_link_result_criteria']
target_function_result = None
with link(chain, link_id, link_info, link_results_criteria, ignore_link_result_criteria) as \
context:
kwargs['chain__chain'] = context
target_function_result = target_function(*args, **kwargs)
return target_function_result
return new_target_function
class _ChainRunner:
def __init__(self, current_globals, chain, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None, function_result_criteria=None):
self._chain_runner__current_globals = current_globals
self._chain_runner__chain = chain
self._chain_runner__link_id = link_id
self._chain_runner__link_info = link_info
self._chain_runner__link_results_criteria = link_results_criteria
self._chain_runner__ignore_link_result_criteria = ignore_link_result_criteria
def __getattr__(self, name):
target_functor = None
if name in self._chain_runner__current_globals:
target_functor = self._chain_runner__current_globals[name]
else:
raise AttributeError(name)
def new_target_function(*args, **kwargs):
target_function_result = None
link_id = self._chain_runner__link_id or str(target_functor)
with link(self._chain_runner__chain, link_id, self._chain_runner__link_info,
self._chain_runner__link_results_criteria,
self._chain_runner__ignore_link_result_criteria) as context:
kwargs['chain__chain'] = context
target_function_result = target_functor(*args, **kwargs)
return target_function_result
return new_target_function
class ChainRunner:
def __init__(self, current_globals, chain):
self.current_globals = current_globals
self.chain = chain
def __call__(self, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None):
result = _ChainRunner(self.current_globals, self.chain, link_id, link_info, link_results_criteria,
ignore_link_result_criteria)
return result
class _ChainCallRunner:
def __init__(self, chain, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None, function_result_criteria=None):
self._chain_runner__chain = chain
self._chain_runner__link_id = link_id
self._chain_runner__link_info = link_info
self._chain_runner__link_results_criteria = link_results_criteria
self._chain_runner__ignore_link_result_criteria = ignore_link_result_criteria
def __call__(self, target_functor, *args, **kwargs):
target_function_result = None
link_id = self._chain_runner__link_id or str(target_functor)
with link(self._chain_runner__chain, link_id, self._chain_runner__link_info,
self._chain_runner__link_results_criteria,
self._chain_runner__ignore_link_result_criteria) as context:
kwargs['chain__chain'] = context
target_function_result = target_functor(*args, **kwargs)
return target_function_result
class ChainCallRunner:
def __init__(self, chain):
self.chain = chain
def __call__(self, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None):
result = _ChainCallRunner(self.chain, link_id, link_info, link_results_criteria,
ignore_link_result_criteria)
return result
def link__function__simple(target_function):
"""
Parameters: chain__chain= (required)
, chain__link_id= (optional) (default value == str(target_function))
, chain__link_results_criteria= (optional)
, chain__ignore_link_result_criteria= (optional).
Parameters passed to the target_function: .
:param target_function: function
:return:
"""
def new_target_function(*args, **kwargs):
chain = None
if 'chain__chain' in kwargs:
chain = kwargs['chain__chain']
del kwargs['chain__chain']
else:
raise ChainFunctionParameterNeeded('chain__chain')
# link_id = '__UNNAMED_FUNCTION_SIMPLE_LINK__'
link_id = str(target_function)
if 'chain__link_id' in kwargs:
link_id = kwargs['chain__link_id']
del kwargs['chain__link_id']
link_results_criteria = None
if 'chain__link_results_criteria' in kwargs:
link_results_criteria = kwargs['chain__link_results_criteria']
del kwargs['chain__link_results_criteria']
ignore_link_result_criteria = None
if 'chain__ignore_link_result_criteria' in kwargs:
ignore_link_result_criteria = kwargs['chain__ignore_link_result_criteria']
del kwargs['chain__ignore_link_result_criteria']
target_function_result = None
with link(chain, link_id, None, link_results_criteria, ignore_link_result_criteria) as context:
if context:
target_function_result = target_function(*args, **kwargs)
context.push_result(True, target_function_result)
return target_function_result
return new_target_function
class _ChainRunnerSimple:
def __init__(self, current_globals, chain, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None, function_result_criteria=None):
self._chain_runner_simple__current_globals = current_globals
self._chain_runner_simple__chain = chain
self._chain_runner_simple__link_id = link_id
self._chain_runner_simple__link_info = link_info
self._chain_runner_simple__link_results_criteria = link_results_criteria
self._chain_runner_simple__ignore_link_result_criteria = ignore_link_result_criteria
self._chain_runner_simple__function_result_criteria = function_result_criteria or (lambda result: True)
def __getattr__(self, name):
target_functor = None
if name in self._chain_runner_simple__current_globals:
target_functor = self._chain_runner_simple__current_globals[name]
else:
raise AttributeError(name)
def new_target_function(*args, **kwargs):
target_function_result = None
link_id = self._chain_runner_simple__link_id or str(target_functor)
with link(self._chain_runner_simple__chain, link_id, self._chain_runner_simple__link_info,
self._chain_runner_simple__link_results_criteria,
self._chain_runner_simple__ignore_link_result_criteria) as context:
if context:
target_function_result = target_functor(*args, **kwargs)
is_good_result = self._chain_runner_simple__function_result_criteria(target_function_result)
context.push_result(is_good_result, target_function_result)
return target_function_result
return new_target_function
class ChainUniRunner:
def __init__(self, current_globals, chain, simple_mode=False, function_result_criteria=None):
self.current_globals = current_globals
self.chain = chain
self.simple_mode = simple_mode
self.default_function_result_criteria = function_result_criteria or (lambda result: True)
self.function_result_criteria = self.default_function_result_criteria
self.runner_class = _ChainRunner
if self.simple_mode:
self.runner_class = _ChainRunnerSimple
def set_function_result_criteria(self, result_criteria_computer):
self.function_result_criteria = result_criteria_computer
def reset_function_result_criteria(self):
self.function_result_criteria = self.default_function_result_criteria
def __call__(self, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None):
result = self.runner_class(self.current_globals, self.chain, link_id, link_info,
link_results_criteria, ignore_link_result_criteria, self.function_result_criteria)
return result
class _ChainCallRunnerSimple:
def __init__(self, chain, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None, function_result_criteria=None):
self._chain_runner_simple__chain = chain
self._chain_runner_simple__link_id = link_id
self._chain_runner_simple__link_info = link_info
self._chain_runner_simple__link_results_criteria = link_results_criteria
self._chain_runner_simple__ignore_link_result_criteria = ignore_link_result_criteria
self._chain_runner_simple__function_result_criteria = function_result_criteria or (lambda result: True)
def __call__(self, target_functor, *args, **kwargs):
target_function_result = None
link_id = self._chain_runner_simple__link_id or str(target_functor)
with link(self._chain_runner_simple__chain, link_id, self._chain_runner_simple__link_info,
self._chain_runner_simple__link_results_criteria,
self._chain_runner_simple__ignore_link_result_criteria) as context:
if context:
target_function_result = target_functor(*args, **kwargs)
is_good_result = self._chain_runner_simple__function_result_criteria(target_function_result)
context.push_result(is_good_result, target_function_result)
return target_function_result
class ChainUniCallRunner:
def __init__(self, chain, simple_mode=False, function_result_criteria=None):
self.chain = chain
self.simple_mode = simple_mode
self.default_function_result_criteria = function_result_criteria or (lambda result: True)
self.function_result_criteria = self.default_function_result_criteria
self.runner_class = _ChainCallRunner
if self.simple_mode:
self.runner_class = _ChainCallRunnerSimple
def set_function_result_criteria(self, result_criteria_computer):
self.function_result_criteria = result_criteria_computer
def reset_function_result_criteria(self):
self.function_result_criteria = self.default_function_result_criteria
def __call__(self, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None):
result = self.runner_class(self.chain, link_id, link_info,
link_results_criteria, ignore_link_result_criteria, self.function_result_criteria)
return result
class _ChainValRunner:
def __init__(self, chain, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None, function_result_criteria=None, reaction_to_the_result=None):
self._chain_runner_simple__chain = chain
self._chain_runner_simple__link_id = link_id
self._chain_runner_simple__link_info = link_info
self._chain_runner_simple__link_results_criteria = link_results_criteria
self._chain_runner_simple__ignore_link_result_criteria = ignore_link_result_criteria
self._chain_runner_simple__function_result_criteria = function_result_criteria or (lambda result: True)
self._chain_runner_simple__reaction_to_the_result = reaction_to_the_result
def __call__(self, functor_result):
target_function_result = functor_result
link_id = self._chain_runner_simple__link_id
with link(self._chain_runner_simple__chain, link_id, self._chain_runner_simple__link_info,
self._chain_runner_simple__link_results_criteria,
self._chain_runner_simple__ignore_link_result_criteria) as context:
if context:
is_good_result = self._chain_runner_simple__function_result_criteria(target_function_result)
if self._chain_runner_simple__reaction_to_the_result is not None:
verdict = self._chain_runner_simple__reaction_to_the_result(is_good_result, target_function_result)
target_function_result = (target_function_result, verdict)
context.push_result(is_good_result, target_function_result)
return functor_result
class ChainValRunner:
def __init__(self, chain, function_result_criteria=None, reaction_to_the_result=None):
self.chain = chain
self.default_function_result_criteria = function_result_criteria or (lambda result: True)
self.function_result_criteria = self.default_function_result_criteria
self.reaction_to_the_result = reaction_to_the_result
def set_function_result_criteria(self, result_criteria_computer):
self.function_result_criteria = result_criteria_computer
def reset_function_result_criteria(self):
self.function_result_criteria = self.default_function_result_criteria
def __call__(self, link_id=None, link_info=None, link_results_criteria=None,
ignore_link_result_criteria=None):
result = _ChainValRunner(self.chain, link_id, link_info,
link_results_criteria, ignore_link_result_criteria,
self.function_result_criteria, self.reaction_to_the_result)
return result
@contextmanager
def chain_reader(chain, link_results_criteria=None, close=False):
if link_results_criteria is not None:
chain._push_criteria(link_results_criteria)
try:
yield chain
except:
raise
finally:
if link_results_criteria is not None:
chain._pop_criteria()
if close:
chain.close()
def chain_reader__function(target_function):
"""
Parameters: chain__chain= (required), chain__link_results_criteria= (optional), chain__close= (optional).
Parameters passed to the target_function: chain__chain (after local link configuration).
:param target_function: function
:return:
"""
def new_target_function(*args, **kwargs):
chain = None
if 'chain__chain' in kwargs:
chain = kwargs['chain__chain']
del kwargs['chain__chain']
else:
raise ChainFunctionParameterNeeded('chain__chain')
link_results_criteria = None
if 'chain__link_results_criteria' in kwargs:
link_results_criteria = kwargs['chain__link_results_criteria']
del kwargs['chain__link_results_criteria']
close = None
if 'chain__close' in kwargs:
close = kwargs['chain__close']
del kwargs['chain__close']
target_function_result = None
with chain_reader(chain, link_results_criteria, close) as context:
kwargs['chain__chain'] = context
target_function_result = target_function(*args, **kwargs)
return target_function_result
return new_target_function
class _ChainReaderRunner:
def __init__(self, current_globals, chain, link_results_criteria=None, close=False):
self._chain_reader_runner__current_globals = current_globals
self._chain_reader_runner__chain = chain
self._chain_reader_runner__link_results_criteria = link_results_criteria
self._chain_reader_runner__close = close
def __getattr__(self, name):
target_functor = None
if name in self._chain_reader_runner__current_globals:
target_functor = self._chain_reader_runner__current_globals[name]
else:
raise AttributeError(name)
def new_target_function(*args, **kwargs):
target_function_result = None
with chain_reader(self._chain_reader_runner__chain,
self._chain_reader_runner__link_results_criteria,
self._chain_reader_runner__close) as context:
kwargs['chain__chain'] = context
target_function_result = target_functor(*args, **kwargs)
return target_function_result
return new_target_function
class ChainReaderRunner:
def __init__(self, current_globals, chain):
self.current_globals = current_globals
self.chain = chain
def __call__(self, link_results_criteria=None, close=False):
result = _ChainReaderRunner(self.current_globals, self.chain, link_results_criteria, close)
return result
class _ChainReaderCallRunner:
def __init__(self, chain, link_results_criteria=None, close=False):
self._chain_reader_runner__chain = chain
self._chain_reader_runner__link_results_criteria = link_results_criteria
self._chain_reader_runner__close = close
def __call__(self, target_functor, *args, **kwargs):
target_function_result = None
with chain_reader(self._chain_reader_runner__chain,
self._chain_reader_runner__link_results_criteria,
self._chain_reader_runner__close) as context:
kwargs['chain__chain'] = context
target_function_result = target_functor(*args, **kwargs)
return target_function_result
class ChainReaderCallRunner:
def __init__(self, chain):
self.chain = chain
def __call__(self, link_results_criteria=None, close=False):
result = _ChainReaderCallRunner(self.chain, link_results_criteria, close)
return result
| 42.368231
| 123
| 0.673171
|
4056322a83e015c7dcf0404b7d749bd06aa4f198
| 1,435
|
py
|
Python
|
iot/mqttsn/mqttsn_messages/WillMsgUpd.py
|
mobius-software-ltd/iotbroker.cloud-python-client
|
48242f5c2a7ffeadb9e03e7aa4ab4a961e09af63
|
[
"RSA-MD"
] | 2
|
2021-06-25T10:32:32.000Z
|
2022-01-20T02:04:21.000Z
|
iot/mqttsn/mqttsn_messages/WillMsgUpd.py
|
mobius-software-ltd/iotbroker.cloud-python-client
|
48242f5c2a7ffeadb9e03e7aa4ab4a961e09af63
|
[
"RSA-MD"
] | 2
|
2022-01-07T09:12:32.000Z
|
2022-03-27T12:57:37.000Z
|
iot/mqttsn/mqttsn_messages/WillMsgUpd.py
|
mobius-software-ltd/iotbroker.cloud-python-client
|
48242f5c2a7ffeadb9e03e7aa4ab4a961e09af63
|
[
"RSA-MD"
] | 1
|
2019-02-28T21:22:16.000Z
|
2019-02-28T21:22:16.000Z
|
"""
# Mobius Software LTD
# Copyright 2015-2018, Mobius Software LTD
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
"""
from iot.mqttsn.mqttsn_classes.MQTTSN_messageType import *
class WillMsgUpd(object):
def __init__(self, content):
self.content = content
def getLength(self):
length = 2
if self.content is not None and len(self.content) > 0:
length += len(self.content)
if len(self.content) > 253:
length += 2
return length
def getType(self):
return MQTTSN_messageType.SN_WILL_MSG_UPD.value[0]
def getContent(self):
return self.content
def setContent(self, content):
self.content = content
| 35
| 69
| 0.696864
|
b7249450d404aab3115fd39b23b33cf45cac19bc
| 4,320
|
py
|
Python
|
Bio/Wise/dnal.py
|
ntamas/biopython
|
ff12c3dd533274678113ecdbd88b0136fb77e565
|
[
"PostgreSQL"
] | 1
|
2022-01-18T22:33:06.000Z
|
2022-01-18T22:33:06.000Z
|
Bio/Wise/dnal.py
|
ntamas/biopython
|
ff12c3dd533274678113ecdbd88b0136fb77e565
|
[
"PostgreSQL"
] | null | null | null |
Bio/Wise/dnal.py
|
ntamas/biopython
|
ff12c3dd533274678113ecdbd88b0136fb77e565
|
[
"PostgreSQL"
] | null | null | null |
#!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Bio.Wise contains modules for running and processing the output of
# some of the models in the Wise2 package by Ewan Birney available from:
# ftp://ftp.ebi.ac.uk/pub/software/unix/wise2/
# http://www.ebi.ac.uk/Wise2/
#
# Bio.Wise.psw is for protein Smith-Waterman alignments
# Bio.Wise.dnal is for Smith-Waterman DNA alignments
from __future__ import print_function
import commands
import itertools
import re
from Bio import Wise
_SCORE_MATCH = 4
_SCORE_MISMATCH = -1
_SCORE_GAP_START = -5
_SCORE_GAP_EXTENSION = -1
_CMDLINE_DNAL = ["dnal", "-alb", "-nopretty"]
def _build_dnal_cmdline(match, mismatch, gap, extension):
res = _CMDLINE_DNAL[:]
res.extend(["-match", str(match)])
res.extend(["-mis", str(mismatch)])
res.extend(["-gap", str(-gap)]) # negative: convert score to penalty
res.extend(["-ext", str(-extension)]) # negative: convert score to penalty
return res
_CMDLINE_FGREP_COUNT = "fgrep -c '%s' %s"
def _fgrep_count(pattern, file):
return int(commands.getoutput(_CMDLINE_FGREP_COUNT % (pattern, file)))
_re_alb_line2coords = re.compile(r"^\[([^:]+):[^\[]+\[([^:]+):")
def _alb_line2coords(line):
return tuple([int(coord)+1 # one-based -> zero-based
for coord
in _re_alb_line2coords.match(line).groups()])
def _get_coords(filename):
alb = file(filename)
start_line = None
end_line = None
for line in alb:
if line.startswith("["):
if not start_line:
start_line = line # rstrip not needed
else:
end_line = line
if end_line is None: # sequence is too short
return [(0, 0), (0, 0)]
return zip(*map(_alb_line2coords, [start_line, end_line])) # returns [(start0, end0), (start1, end1)]
def _any(seq, pred=bool):
"Returns True if pred(x) is True at least one element in the iterable"
return True in itertools.imap(pred, seq)
class Statistics(object):
"""
Calculate statistics from an ALB report
"""
def __init__(self, filename, match, mismatch, gap, extension):
self.matches = _fgrep_count('"SEQUENCE" %s' % match, filename)
self.mismatches = _fgrep_count('"SEQUENCE" %s' % mismatch, filename)
self.gaps = _fgrep_count('"INSERT" %s' % gap, filename)
if gap == extension:
self.extensions = 0
else:
self.extensions = _fgrep_count('"INSERT" %s' % extension, filename)
self.score = (match*self.matches +
mismatch*self.mismatches +
gap*self.gaps +
extension*self.extensions)
if _any([self.matches, self.mismatches, self.gaps, self.extensions]):
self.coords = _get_coords(filename)
else:
self.coords = [(0, 0), (0,0)]
def identity_fraction(self):
return self.matches/(self.matches+self.mismatches)
header = "identity_fraction\tmatches\tmismatches\tgaps\textensions"
def __str__(self):
return "\t".join([str(x) for x in (self.identity_fraction(), self.matches, self.mismatches, self.gaps, self.extensions)])
def align(pair, match=_SCORE_MATCH, mismatch=_SCORE_MISMATCH, gap=_SCORE_GAP_START, extension=_SCORE_GAP_EXTENSION, **keywds):
cmdline = _build_dnal_cmdline(match, mismatch, gap, extension)
temp_file = Wise.align(cmdline, pair, **keywds)
try:
return Statistics(temp_file.name, match, mismatch, gap, extension)
except AttributeError:
try:
keywds['dry_run']
return None
except KeyError:
raise
def main():
import sys
stats = align(sys.argv[1:3])
print("\n".join(["%s: %s" % (attr, getattr(stats, attr))
for attr in
("matches", "mismatches", "gaps", "extensions")]))
print("identity_fraction: %s" % stats.identity_fraction())
print("coords: %s" % stats.coords)
def _test(*args, **keywds):
import doctest
import sys
doctest.testmod(sys.modules[__name__], *args, **keywds)
if __name__ == "__main__":
if __debug__:
_test()
main()
| 30.20979
| 129
| 0.636806
|
05ed3a9614636fd8226bfba8c25d91e8ba18c5a3
| 643
|
py
|
Python
|
controle_financeiro/controle_financeiro/settings/test.py
|
douglaspands/controle-financeiro
|
1f8f44dca6132b730e92ccf62447dede4119b28e
|
[
"MIT"
] | null | null | null |
controle_financeiro/controle_financeiro/settings/test.py
|
douglaspands/controle-financeiro
|
1f8f44dca6132b730e92ccf62447dede4119b28e
|
[
"MIT"
] | null | null | null |
controle_financeiro/controle_financeiro/settings/test.py
|
douglaspands/controle-financeiro
|
1f8f44dca6132b730e92ccf62447dede4119b28e
|
[
"MIT"
] | 1
|
2021-06-15T22:14:22.000Z
|
2021-06-15T22:14:22.000Z
|
import os
from decouple import config
from .base import *
SETTING_NAME = "test"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db_test.sqlite3",
}
}
DEBUG = True
ALLOWED_HOSTS = []
INTERNAL_IPS = [
"127.0.0.1",
]
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
MEDIA_URL = config("MEDIA_URL", default="/media/", cast=str)
MEDIA_ROOT = config("MEDIA_ROOT", default=os.path.join(BASE_DIR, "media"), cast=str)
| 19.484848
| 84
| 0.671851
|
70730cc03695fe860dd95be030451c28849506a9
| 41
|
py
|
Python
|
installer/py2exe/test/test_noZipFile.py
|
ifwe/digsby
|
f5fe00244744aa131e07f09348d10563f3d8fa99
|
[
"Python-2.0"
] | 35
|
2015-08-15T14:32:38.000Z
|
2021-12-09T16:21:26.000Z
|
installer/py2exe/test/test_noZipFile.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 4
|
2015-09-12T10:42:57.000Z
|
2017-02-27T04:05:51.000Z
|
installer/py2exe/test/test_noZipFile.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 15
|
2015-07-10T23:58:07.000Z
|
2022-01-23T22:16:33.000Z
|
if __name__ == "__main__":
print 2+2
| 13.666667
| 26
| 0.609756
|
6035672698bd808db2de34233726f4f5536fb283
| 18,673
|
py
|
Python
|
synapse/metrics/__init__.py
|
ThiefMaster/synapse
|
f2af3e4fc550e7e93be1b0f425c3e9c484b96293
|
[
"Apache-2.0"
] | 1
|
2020-07-21T17:51:02.000Z
|
2020-07-21T17:51:02.000Z
|
synapse/metrics/__init__.py
|
mjvaldez/synapse
|
de119063f248981510e961e83f1515a3add19a21
|
[
"Apache-2.0"
] | null | null | null |
synapse/metrics/__init__.py
|
mjvaldez/synapse
|
de119063f248981510e961e83f1515a3add19a21
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import gc
import logging
import os
import platform
import threading
import time
from typing import Callable, Dict, Iterable, Optional, Tuple, Union
import attr
from prometheus_client import Counter, Gauge, Histogram
from prometheus_client.core import (
REGISTRY,
CounterMetricFamily,
GaugeMetricFamily,
HistogramMetricFamily,
)
from twisted.internet import reactor
import synapse
from synapse.metrics._exposition import (
MetricsResource,
generate_latest,
start_http_server,
)
from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
METRICS_PREFIX = "/_synapse/metrics"
running_on_pypy = platform.python_implementation() == "PyPy"
all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge, BucketCollector]]
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
class RegistryProxy(object):
@staticmethod
def collect():
for metric in REGISTRY.collect():
if not metric.name.startswith("__"):
yield metric
@attr.s(hash=True)
class LaterGauge(object):
name = attr.ib(type=str)
desc = attr.ib(type=str)
labels = attr.ib(hash=False, type=Optional[Iterable[str]])
# callback: should either return a value (if there are no labels for this metric),
# or dict mapping from a label tuple to a value
caller = attr.ib(type=Callable[[], Union[Dict[Tuple[str, ...], float], float]])
def collect(self):
g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
try:
calls = self.caller()
except Exception:
logger.exception("Exception running callback for LaterGauge(%s)", self.name)
yield g
return
if isinstance(calls, dict):
for k, v in calls.items():
g.add_metric(k, v)
else:
g.add_metric([], calls)
yield g
def __attrs_post_init__(self):
self._register()
def _register(self):
if self.name in all_gauges.keys():
logger.warning("%s already registered, reregistering" % (self.name,))
REGISTRY.unregister(all_gauges.pop(self.name))
REGISTRY.register(self)
all_gauges[self.name] = self
class InFlightGauge(object):
"""Tracks number of things (e.g. requests, Measure blocks, etc) in flight
at any given time.
Each InFlightGauge will create a metric called `<name>_total` that counts
the number of in flight blocks, as well as a metrics for each item in the
given `sub_metrics` as `<name>_<sub_metric>` which will get updated by the
callbacks.
Args:
name (str)
desc (str)
labels (list[str])
sub_metrics (list[str]): A list of sub metrics that the callbacks
will update.
"""
def __init__(self, name, desc, labels, sub_metrics):
self.name = name
self.desc = desc
self.labels = labels
self.sub_metrics = sub_metrics
# Create a class which have the sub_metrics values as attributes, which
# default to 0 on initialization. Used to pass to registered callbacks.
self._metrics_class = attr.make_class(
"_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True
)
# Counts number of in flight blocks for a given set of label values
self._registrations = {} # type: Dict
# Protects access to _registrations
self._lock = threading.Lock()
self._register_with_collector()
def register(self, key, callback):
"""Registers that we've entered a new block with labels `key`.
`callback` gets called each time the metrics are collected. The same
value must also be given to `unregister`.
`callback` gets called with an object that has an attribute per
sub_metric, which should be updated with the necessary values. Note that
the metrics object is shared between all callbacks registered with the
same key.
Note that `callback` may be called on a separate thread.
"""
with self._lock:
self._registrations.setdefault(key, set()).add(callback)
def unregister(self, key, callback):
"""Registers that we've exited a block with labels `key`.
"""
with self._lock:
self._registrations.setdefault(key, set()).discard(callback)
def collect(self):
"""Called by prometheus client when it reads metrics.
Note: may be called by a separate thread.
"""
in_flight = GaugeMetricFamily(
self.name + "_total", self.desc, labels=self.labels
)
metrics_by_key = {}
# We copy so that we don't mutate the list while iterating
with self._lock:
keys = list(self._registrations)
for key in keys:
with self._lock:
callbacks = set(self._registrations[key])
in_flight.add_metric(key, len(callbacks))
metrics = self._metrics_class()
metrics_by_key[key] = metrics
for callback in callbacks:
callback(metrics)
yield in_flight
for name in self.sub_metrics:
gauge = GaugeMetricFamily(
"_".join([self.name, name]), "", labels=self.labels
)
for key, metrics in metrics_by_key.items():
gauge.add_metric(key, getattr(metrics, name))
yield gauge
def _register_with_collector(self):
if self.name in all_gauges.keys():
logger.warning("%s already registered, reregistering" % (self.name,))
REGISTRY.unregister(all_gauges.pop(self.name))
REGISTRY.register(self)
all_gauges[self.name] = self
@attr.s(hash=True)
class BucketCollector(object):
"""
Like a Histogram, but allows buckets to be point-in-time instead of
incrementally added to.
Args:
name (str): Base name of metric to be exported to Prometheus.
data_collector (callable -> dict): A synchronous callable that
returns a dict mapping bucket to number of items in the
bucket. If these buckets are not the same as the buckets
given to this class, they will be remapped into them.
buckets (list[float]): List of floats/ints of the buckets to
give to Prometheus. +Inf is ignored, if given.
"""
name = attr.ib()
data_collector = attr.ib()
buckets = attr.ib()
def collect(self):
# Fetch the data -- this must be synchronous!
data = self.data_collector()
buckets = {} # type: Dict[float, int]
res = []
for x in data.keys():
for i, bound in enumerate(self.buckets):
if x <= bound:
buckets[bound] = buckets.get(bound, 0) + data[x]
for i in self.buckets:
res.append([str(i), buckets.get(i, 0)])
res.append(["+Inf", sum(data.values())])
metric = HistogramMetricFamily(
self.name, "", buckets=res, sum_value=sum(x * y for x, y in data.items())
)
yield metric
def __attrs_post_init__(self):
self.buckets = [float(x) for x in self.buckets if x != "+Inf"]
if self.buckets != sorted(self.buckets):
raise ValueError("Buckets not sorted")
self.buckets = tuple(self.buckets)
if self.name in all_gauges.keys():
logger.warning("%s already registered, reregistering" % (self.name,))
REGISTRY.unregister(all_gauges.pop(self.name))
REGISTRY.register(self)
all_gauges[self.name] = self
#
# Detailed CPU metrics
#
class CPUMetrics(object):
def __init__(self):
ticks_per_sec = 100
try:
# Try and get the system config
ticks_per_sec = os.sysconf("SC_CLK_TCK")
except (ValueError, TypeError, AttributeError):
pass
self.ticks_per_sec = ticks_per_sec
def collect(self):
if not HAVE_PROC_SELF_STAT:
return
with open("/proc/self/stat") as s:
line = s.read()
raw_stats = line.split(") ", 1)[1].split(" ")
user = GaugeMetricFamily("process_cpu_user_seconds_total", "")
user.add_metric([], float(raw_stats[11]) / self.ticks_per_sec)
yield user
sys = GaugeMetricFamily("process_cpu_system_seconds_total", "")
sys.add_metric([], float(raw_stats[12]) / self.ticks_per_sec)
yield sys
REGISTRY.register(CPUMetrics())
#
# Python GC metrics
#
gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"])
gc_time = Histogram(
"python_gc_time",
"Time taken to GC (sec)",
["gen"],
buckets=[
0.0025,
0.005,
0.01,
0.025,
0.05,
0.10,
0.25,
0.50,
1.00,
2.50,
5.00,
7.50,
15.00,
30.00,
45.00,
60.00,
],
)
class GCCounts(object):
def collect(self):
cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
for n, m in enumerate(gc.get_count()):
cm.add_metric([str(n)], m)
yield cm
if not running_on_pypy:
REGISTRY.register(GCCounts())
#
# PyPy GC / memory metrics
#
class PyPyGCStats(object):
def collect(self):
# @stats is a pretty-printer object with __str__() returning a nice table,
# plus some fields that contain data from that table.
# unfortunately, fields are pretty-printed themselves (i. e. '4.5MB').
stats = gc.get_stats(memory_pressure=False) # type: ignore
# @s contains same fields as @stats, but as actual integers.
s = stats._s # type: ignore
# also note that field naming is completely braindead
# and only vaguely correlates with the pretty-printed table.
# >>>> gc.get_stats(False)
# Total memory consumed:
# GC used: 8.7MB (peak: 39.0MB) # s.total_gc_memory, s.peak_memory
# in arenas: 3.0MB # s.total_arena_memory
# rawmalloced: 1.7MB # s.total_rawmalloced_memory
# nursery: 4.0MB # s.nursery_size
# raw assembler used: 31.0kB # s.jit_backend_used
# -----------------------------
# Total: 8.8MB # stats.memory_used_sum
#
# Total memory allocated:
# GC allocated: 38.7MB (peak: 41.1MB) # s.total_allocated_memory, s.peak_allocated_memory
# in arenas: 30.9MB # s.peak_arena_memory
# rawmalloced: 4.1MB # s.peak_rawmalloced_memory
# nursery: 4.0MB # s.nursery_size
# raw assembler allocated: 1.0MB # s.jit_backend_allocated
# -----------------------------
# Total: 39.7MB # stats.memory_allocated_sum
#
# Total time spent in GC: 0.073 # s.total_gc_time
pypy_gc_time = CounterMetricFamily(
"pypy_gc_time_seconds_total", "Total time spent in PyPy GC", labels=[],
)
pypy_gc_time.add_metric([], s.total_gc_time / 1000)
yield pypy_gc_time
pypy_mem = GaugeMetricFamily(
"pypy_memory_bytes",
"Memory tracked by PyPy allocator",
labels=["state", "class", "kind"],
)
# memory used by JIT assembler
pypy_mem.add_metric(["used", "", "jit"], s.jit_backend_used)
pypy_mem.add_metric(["allocated", "", "jit"], s.jit_backend_allocated)
# memory used by GCed objects
pypy_mem.add_metric(["used", "", "arenas"], s.total_arena_memory)
pypy_mem.add_metric(["allocated", "", "arenas"], s.peak_arena_memory)
pypy_mem.add_metric(["used", "", "rawmalloced"], s.total_rawmalloced_memory)
pypy_mem.add_metric(["allocated", "", "rawmalloced"], s.peak_rawmalloced_memory)
pypy_mem.add_metric(["used", "", "nursery"], s.nursery_size)
pypy_mem.add_metric(["allocated", "", "nursery"], s.nursery_size)
# totals
pypy_mem.add_metric(["used", "totals", "gc"], s.total_gc_memory)
pypy_mem.add_metric(["allocated", "totals", "gc"], s.total_allocated_memory)
pypy_mem.add_metric(["used", "totals", "gc_peak"], s.peak_memory)
pypy_mem.add_metric(["allocated", "totals", "gc_peak"], s.peak_allocated_memory)
yield pypy_mem
if running_on_pypy:
REGISTRY.register(PyPyGCStats())
#
# Twisted reactor metrics
#
tick_time = Histogram(
"python_twisted_reactor_tick_time",
"Tick time of the Twisted reactor (sec)",
buckets=[0.001, 0.002, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 5],
)
pending_calls_metric = Histogram(
"python_twisted_reactor_pending_calls",
"Pending calls",
buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000],
)
#
# Federation Metrics
#
sent_transactions_counter = Counter("synapse_federation_client_sent_transactions", "")
events_processed_counter = Counter("synapse_federation_client_events_processed", "")
event_processing_loop_counter = Counter(
"synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
)
event_processing_loop_room_count = Counter(
"synapse_event_processing_loop_room_count",
"Rooms seen per event processing loop iteration",
["name"],
)
# Used to track where various components have processed in the event stream,
# e.g. federation sending, appservice sending, etc.
event_processing_positions = Gauge("synapse_event_processing_positions", "", ["name"])
# Used to track the current max events stream position
event_persisted_position = Gauge("synapse_event_persisted_position", "")
# Used to track the received_ts of the last event processed by various
# components
event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"])
# Used to track the lag processing events. This is the time difference
# between the last processed event's received_ts and the time it was
# finished being processed.
event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
event_processing_lag_by_event = Histogram(
"synapse_event_processing_lag_by_event",
"Time between an event being persisted and it being queued up to be sent to the relevant remote servers",
["name"],
)
# Build info of the running server.
build_info = Gauge(
"synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
)
build_info.labels(
" ".join([platform.python_implementation(), platform.python_version()]),
get_version_string(synapse),
" ".join([platform.system(), platform.release()]),
).set(1)
last_ticked = time.time()
class ReactorLastSeenMetric(object):
def collect(self):
cm = GaugeMetricFamily(
"python_twisted_reactor_last_seen",
"Seconds since the Twisted reactor was last seen",
)
cm.add_metric([], time.time() - last_ticked)
yield cm
REGISTRY.register(ReactorLastSeenMetric())
def runUntilCurrentTimer(func):
@functools.wraps(func)
def f(*args, **kwargs):
now = reactor.seconds()
num_pending = 0
# _newTimedCalls is one long list of *all* pending calls. Below loop
# is based off of impl of reactor.runUntilCurrent
for delayed_call in reactor._newTimedCalls:
if delayed_call.time > now:
break
if delayed_call.delayed_time > 0:
continue
num_pending += 1
num_pending += len(reactor.threadCallQueue)
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
# record the amount of wallclock time spent running pending calls.
# This is a proxy for the actual amount of time between reactor polls,
# since about 25% of time is actually spent running things triggered by
# I/O events, but that is harder to capture without rewriting half the
# reactor.
tick_time.observe(end - start)
pending_calls_metric.observe(num_pending)
# Update the time we last ticked, for the metric to test whether
# Synapse's reactor has frozen
global last_ticked
last_ticked = end
if running_on_pypy:
return ret
# Check if we need to do a manual GC (since its been disabled), and do
# one if necessary.
threshold = gc.get_threshold()
counts = gc.get_count()
for i in (2, 1, 0):
if threshold[i] < counts[i]:
if i == 0:
logger.debug("Collecting gc %d", i)
else:
logger.info("Collecting gc %d", i)
start = time.time()
unreachable = gc.collect(i)
end = time.time()
gc_time.labels(i).observe(end - start)
gc_unreachable.labels(i).set(unreachable)
return ret
return f
try:
# Ensure the reactor has all the attributes we expect
reactor.runUntilCurrent
reactor._newTimedCalls
reactor.threadCallQueue
# runUntilCurrent is called when we have pending calls. It is called once
# per iteratation after fd polling.
reactor.runUntilCurrent = runUntilCurrentTimer(reactor.runUntilCurrent)
# We manually run the GC each reactor tick so that we can get some metrics
# about time spent doing GC,
if not running_on_pypy:
gc.disable()
except AttributeError:
pass
__all__ = [
"MetricsResource",
"generate_latest",
"start_http_server",
"LaterGauge",
"InFlightGauge",
"BucketCollector",
]
| 31.974315
| 113
| 0.619825
|
594c92a85130dcc3de3de1b414510b7973b0524a
| 4,176
|
py
|
Python
|
troposphere_mate/canned/iam/awslambda.py
|
tsuttsu305/troposphere_mate-project
|
15ee94cc913efb32bc991979efcad943c992074c
|
[
"MIT"
] | 10
|
2019-07-08T14:52:16.000Z
|
2021-10-15T22:18:22.000Z
|
troposphere_mate/canned/iam/awslambda.py
|
tsuttsu305/troposphere_mate-project
|
15ee94cc913efb32bc991979efcad943c992074c
|
[
"MIT"
] | 1
|
2019-07-08T00:36:50.000Z
|
2019-07-08T00:36:50.000Z
|
troposphere_mate/canned/iam/awslambda.py
|
tsuttsu305/troposphere_mate-project
|
15ee94cc913efb32bc991979efcad943c992074c
|
[
"MIT"
] | 2
|
2020-03-22T14:44:54.000Z
|
2020-08-05T02:08:01.000Z
|
# -*- coding: utf-8 -*-
from troposphere_mate import Template, iam
from ...core.canned import MultiEnvBasicConfig, Constant
from ...core.mate import DEFAULT_LABELS_FIELD
from .const_aws_service_name import create_assume_role_policy_document, AWSServiceName
from .const_aws_managed_policy_arn import AWSManagedPolicyArn
class Metadata:
iam_role_lbd_basic_exec = "iam_role_lbd_basic_exec"
iam_role_lbd_s3_read_and_write = "iam_role_lbd_s3_read_and_write"
iam_role_lbd_s3_restricted_bucket_read_and_write = "iam_role_lbd_s3_restricted_bucket_read_and_write"
class CannedCommonLambdaFunctionIamRole(MultiEnvBasicConfig):
template = None # type: Template
iam_role_lbd_basic_exec = None # type: iam.Role
iam_role_lbd_s3_read_and_write = None # type: iam.Role
iam_role_lbd_s3_restricted_bucket_read_and_write = None # type: iam.Role
S3_RESTRICTED_BUCKETS = Constant(default="")
def create_template(self):
self.template = Template()
self.iam_role_lbd_basic_exec = iam.Role(
"IamRoleLambdaBasicExecution",
template=self.template,
Metadata={
DEFAULT_LABELS_FIELD: [Metadata.iam_role_lbd_basic_exec, ]
},
RoleName="{}-lbd-basic-exec".format(
self.ENVIRONMENT_NAME.get_value()),
AssumeRolePolicyDocument=create_assume_role_policy_document(
[AWSServiceName.aws_Lambda]),
ManagedPolicyArns=[AWSManagedPolicyArn.awsLambdaBasicExecutionRole]
)
self.iam_role_lbd_s3_read_and_write = iam.Role(
"IamRoleLambdaS3Execution",
template=self.template,
Metadata={
DEFAULT_LABELS_FIELD: [Metadata.iam_role_lbd_s3_read_and_write, ]
},
RoleName="{}-lbd-s3-exec".format(
self.ENVIRONMENT_NAME.get_value()),
AssumeRolePolicyDocument=create_assume_role_policy_document([
AWSServiceName.aws_Lambda,
]),
ManagedPolicyArns=[
AWSManagedPolicyArn.awsLambdaExecute
],
)
if self.S3_RESTRICTED_BUCKETS.get_value():
bucket_name_list = [
bucket_name.strip()
for bucket_name in self.S3_RESTRICTED_BUCKETS.get_value().split(",")
]
self.iam_role_lbd_s3_restricted_bucket_read_and_write = iam.Role(
"IamRoleLambdaS3RestrictedBucketExecution",
template=self.template,
Metadata={
DEFAULT_LABELS_FIELD: [Metadata.iam_role_lbd_s3_restricted_bucket_read_and_write, ]
},
RoleName="{}-lbd-s3-exec".format(
self.ENVIRONMENT_NAME.get_value()),
AssumeRolePolicyDocument=create_assume_role_policy_document(
[AWSServiceName.aws_Lambda]),
Policies=[
iam.Policy(
PolicyName="",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject"
],
"Resource": [
"arn:aws:s3:::{}*".format(bucket_name)
for bucket_name in bucket_name_list
]
}
]
}
)
]
)
return self.template
| 40.543689
| 105
| 0.515326
|
4faea2b3068977dfd3dd9cbefcc0c912506a8b1c
| 54,556
|
py
|
Python
|
amber/convert_amber.py
|
hannahbrucemacdonald/openmm-forcefields
|
11a9ed2edf0745e77d3e362f27a6c1d5b77abfc5
|
[
"MIT"
] | null | null | null |
amber/convert_amber.py
|
hannahbrucemacdonald/openmm-forcefields
|
11a9ed2edf0745e77d3e362f27a6c1d5b77abfc5
|
[
"MIT"
] | null | null | null |
amber/convert_amber.py
|
hannahbrucemacdonald/openmm-forcefields
|
11a9ed2edf0745e77d3e362f27a6c1d5b77abfc5
|
[
"MIT"
] | null | null | null |
# AMBER --> OpenMM force-field conversion script
# Author: Rafal P. Wiewiora, ChoderaLab
from __future__ import print_function, division
import parmed
from parmed.utils.six import iteritems
from parmed.utils.six.moves import StringIO, zip
import simtk.openmm.app as app
import simtk.unit as u
import simtk
import os
import sys
import re
import tempfile
import yaml
from distutils.spawn import find_executable
import hashlib
from collections import OrderedDict
import glob
import argparse
from lxml import etree as et
import csv
import logging
import warnings
import xml.etree.ElementTree as etree
from copy import deepcopy
from parmed.exceptions import ParameterWarning
warnings.filterwarnings('error', category=ParameterWarning)
_loadoffre = re.compile(r'loadoff (\S*)', re.I)
_sourcere = re.compile(r'source (\S*)', re.I)
# check for AMBERHOME, find from tleap location if not set, exception if can't
if os.getenv('AMBERHOME'):
AMBERHOME = os.getenv('AMBERHOME')
else:
if not find_executable('tleap'):
raise Exception('AMBERHOME not set and tleap not available from PATH')
tleap_path = find_executable('tleap')
AMBERHOME = os.path.split(tleap_path)[0]
AMBERHOME = os.path.join(AMBERHOME, '../')
parmed.amber.AMBERHOME = AMBERHOME
# set global defaults for verbose and log
verbose = False
no_log = False
# set files that are ignored in leaprc's
# solvents and ions converted separately; leaprc.ff10 calls phosphoaa10.lib
# which does not exist anymore, LeAP skips it on error so we do too
ignore = {'solvents.lib', 'atomic_ions.lib', 'ions94.lib', 'ions91.lib',
'phosphoaa10.lib'}
# define NEARLYZERO to replace numerical comparisons to zero
NEARLYZERO = 1e-10
class LeapException(Exception):
def __init__(self, leaprc_filename):
msg = 'Something went wrong in processing this LEaP input file:\n'
msg += '\n'
infile = open(leaprc_filename, 'rt')
contents = infile.read()
msg += contents
msg += '\n'
super(LeapException, self).__init__(msg)
def main():
global verbose
global no_log
global logger
# argparse
parser = argparse.ArgumentParser(description='AMBER --> OpenMM forcefield '
'conversion script')
parser.add_argument('--input', '-i', default='master.yaml',
help='path of the input file. Default: "master.yaml"')
parser.add_argument('--input-format', '-if', default='yaml',
help='format of the input file: "yaml" or "leaprc". Default: "yaml"')
parser.add_argument('--output-dir', '-od', help='path of the output directory. '
'Default: "ffxml/" for yaml, "./" for leaprc')
parser.add_argument('--verbose', '-v', action='store_true',
help='turns verbosity on')
parser.add_argument('--log', action='store', dest='log_filename', default=None,
help='log energies for tests to specified CSV file')
parser.add_argument('--protein-test', action='store_true',
help='validate resulting XML through protein tests')
parser.add_argument('--nucleic-test', action='store_true',
help='validate resulting XML through nucleic acid tests')
parser.add_argument('--protein-ua-test', action='store_true',
help='validate resulting XML through united-atom protein tests')
parser.add_argument('--phospho-protein-test', action='store_true',
help='validate resulting XML through phosphorylated protein tests')
parser.add_argument('--gaff-test', action='store_true',
help='validate resulting XML through small-molecule (GAFF) test')
parser.add_argument('--lipids-test', action='store_true',
help='validate resulting XML through lipids tests')
args = parser.parse_args()
verbose = args.verbose
if args.log_filename:
logger = Logger(args.log_filename) # log to file
else:
logger = Logger() # be silent
# input is either a YAML or a leaprc - default is leaprc
# output directory hardcoded here for ffxml/
if args.input_format == 'yaml':
if args.output_dir is None:
convert_yaml(args.input, ffxml_dir='ffxml/')
else:
convert_yaml(args.input, ffxml_dir=args.output_dir)
# if leaprc converted - output to the same dir
elif args.input_format == 'leaprc':
if args.output_dir is None:
ffxml_name = convert_leaprc(args.input, ffxml_dir='./')
else:
ffxml_name = convert_leaprc(args.input, ffxml_dir=args.output_dir)
if args.protein_test:
validate_protein(ffxml_name, args.input)
if args.nucleic_test:
validate_nucleic(ffxml_name, args.input)
if args.protein_ua_test:
validate_protein(ffxml_name, args.input, united_atom=True)
if args.phospho_protein_test:
validate_phospho_protein(ffxml_name, args.input)
if args.gaff_test:
validate_gaff(ffxml_name, args.input)
if args.lipids_test:
validate_lipids(ffxml_name, args.input)
else:
sys.exit('Wrong input_format chosen.')
logger.close()
def read_lines(filename):
"""
Read lines from file, stripping comments and newlines.
"""
with open(filename, 'rt') as f:
lines = [ line if '#' not in line else line[:line.index('#')] for line in f.readlines() ]
return lines
def write_file(file, contents):
"""
Write text to file.
Parameters
----------
filename : str
The file to write to
contents : str
Text contents to be written to file
"""
if type(file) == str:
outfile = open(file, 'w')
else:
outfile = os.fdopen(file, 'w')
outfile.write(contents)
outfile.close()
def convert_leaprc(files, split_filename=False, ffxml_dir='./', ignore=ignore,
provenance=None, write_unused=False, filter_warnings='error'):
if verbose: print('\nConverting %s to ffxml...' % files)
# allow for multiple source files - further code assuming list is passed
if not isinstance(files, list):
files = [files]
basename = ''
for f in files:
f_basename = os.path.basename(f)
if split_filename:
f_basename = f_basename.split('.')[1:]
f_basename = '.'.join(f_basename)
if not basename:
basename = f_basename
else:
basename += '_'
basename += f_basename
ffxml_name = os.path.join(ffxml_dir, (basename + '.xml'))
if not os.path.exists(ffxml_dir):
os.mkdir(ffxml_dir)
if verbose: print('Preprocessing the leaprc for %s...' % basename)
# do source processing
new_files = []
for fil in files:
lines = read_lines(fil)
for line in lines:
if _sourcere.findall(line):
replace_leaprc = _sourcere.findall(line)[0]
replace_leaprc_path = os.path.join(os.path.join(AMBERHOME,
'dat/leap/cmd', replace_leaprc))
new_files.append(replace_leaprc_path)
new_files.append(fil)
# now do ignore processing and join multiple leaprc's
files = new_files
new_lines = []
for fil in files:
lines = read_lines(fil)
fil_new_lines = []
for line in lines:
if (ignore is not None and _loadoffre.findall(line) and
_loadoffre.findall(line)[0] in ignore):
continue
fil_new_lines += line
new_lines += fil_new_lines
leaprc = StringIO(''.join(new_lines))
if verbose: print('Converting to ffxml %s...' % ffxml_name)
params = parmed.amber.AmberParameterSet.from_leaprc(leaprc)
params = parmed.openmm.OpenMMParameterSet.from_parameterset(params, remediate_residues=(not write_unused))
if filter_warnings != 'error':
with warnings.catch_warnings():
warnings.filterwarnings(filter_warnings, category=ParameterWarning)
params.write(ffxml_name, provenance=provenance, write_unused=write_unused, improper_dihedrals_ordering='amber')
else:
params.write(ffxml_name, provenance=provenance, write_unused=write_unused, improper_dihedrals_ordering='amber')
if verbose: print('%s successfully written!' % ffxml_name)
return ffxml_name
def convert_gaff(files, ffxml_basename='', split_filename=False, ffxml_dir='./', ignore=ignore,
provenance=None, write_unused=False, filter_warnings='error'):
if verbose: print('\nConverting %s to ffxml...' % files)
# allow for multiple source files - further code assuming list is passed
if not isinstance(files, list):
files = [files]
# Create ffxml
ffxml_name = os.path.join(ffxml_dir, (ffxml_basename + '.xml'))
if not os.path.exists(ffxml_dir):
os.mkdir(ffxml_dir)
# Process parameter file
params = parmed.amber.AmberParameterSet(files)
params = parmed.openmm.OpenMMParameterSet.from_parameterset(params, remediate_residues=(not write_unused))
if filter_warnings != 'error':
with warnings.catch_warnings():
warnings.filterwarnings(filter_warnings, category=ParameterWarning)
params.write(ffxml_name, provenance=provenance, write_unused=write_unused, improper_dihedrals_ordering='amber')
else:
params.write(ffxml_name, provenance=provenance, write_unused=write_unused, improper_dihedrals_ordering='amber')
if verbose: print('%s successfully written!' % ffxml_name)
return ffxml_name
def convert_recipe(files, solvent_file=None, ffxml_dir='./', provenance=None, ffxml_basename=None,
filter_warnings='always'):
if verbose: print('\nConverting %s to ffxml...' % files)
ffxml_name = os.path.join(ffxml_dir, (ffxml_basename + '.xml'))
ffxml_temp_stringio = StringIO()
params = parmed.amber.AmberParameterSet(files)
print(params.atom_types.keys())
params = parmed.openmm.OpenMMParameterSet.from_parameterset(params)
# Change atom type naming
# atom_types
new_atom_types = OrderedDict()
for name, atom_type in iteritems(params.atom_types):
new_name = ffxml_basename + '-' + name
new_atom_types[new_name] = atom_type
params.atom_types = new_atom_types
# atoms in residues
for name, residue in iteritems(params.residues):
for atom in residue:
new_type = ffxml_basename + '-' + atom.type
atom.type = new_type
if solvent_file is None:
# this means this file does not include a water model - hard-coded assumption it is
# then a 'multivalent' file - set overrideLevel to 1 for all residue templates
for name, residue in iteritems(params.residues):
residue.override_level = 1
with warnings.catch_warnings():
warnings.filterwarnings(filter_warnings, category=ParameterWarning)
params.write(ffxml_name, provenance=provenance, write_unused=False, improper_dihedrals_ordering='amber')
else:
with warnings.catch_warnings():
warnings.filterwarnings(filter_warnings, category=ParameterWarning)
params.write(ffxml_temp_stringio, provenance=provenance, write_unused=False, improper_dihedrals_ordering='amber')
ffxml_temp_stringio.seek(0)
if verbose: print('Modifying converted ffxml to append solvent parameters')
tree_main = et.parse(ffxml_temp_stringio)
tree_water = et.parse(solvent_file)
root_main = tree_main.getroot()
root_water = tree_water.getroot()
with open(ffxml_name, 'wb') as f:
f.write(b'<ForceField>\n ')
f.write(et.tostring(root_main.findall('Info')[0]))
f.write(b'<AtomTypes>\n ')
for subelement in root_main.findall('AtomTypes')[0]:
f.write(et.tostring(subelement))
f.write(b' ')
for subelement in root_water.findall('AtomTypes')[0]:
f.write(et.tostring(subelement))
f.write(b'</AtomTypes>\n <Residues>\n ')
for subelement in root_main.findall('Residues')[0]:
f.write(et.tostring(subelement))
f.write(b' ')
for subelement in root_water.findall('Residues')[0]:
f.write(et.tostring(subelement))
f.write(b'</Residues>\n <HarmonicBondForce>\n ')
for subelement in root_water.findall('HarmonicBondForce')[0]:
f.write(et.tostring(subelement))
f.write(b'</HarmonicBondForce>\n <HarmonicAngleForce>\n ')
for subelement in root_water.findall('HarmonicAngleForce')[0]:
f.write(et.tostring(subelement))
f.write(b'</HarmonicAngleForce>\n ')
f.write(('<NonbondedForce coulomb14scale="%s" lj14scale="%s">\n ' %
(root_main.findall('NonbondedForce')[0].attrib['coulomb14scale'],
root_main.findall('NonbondedForce')[0].attrib['lj14scale'])
).encode('utf-8'))
for subelement in root_main.findall('NonbondedForce')[0]:
f.write(et.tostring(subelement))
f.write(b' ')
for subelement in root_water.findall('NonbondedForce')[0]:
if subelement.tag == 'UseAttributeFromResidue': continue
f.write(et.tostring(subelement))
f.write(b'</NonbondedForce>\n</ForceField>')
if verbose: print('%s successfully written!' % ffxml_name)
return ffxml_name
def convert_yaml(yaml_name, ffxml_dir, ignore=ignore):
data = yaml.load(open(yaml_name), Loader=yaml.FullLoader)
# TODO: Verify that the version that is installed via conda matches sourcePackageVersion
# Default yaml reading mode is leaprc
ALLOWED_MODES = ('LEAPRC', 'RECIPE', 'GAFF')
for entry in data:
# Handle MODE switching
if 'MODE' in entry:
MODE = entry['MODE']
if not MODE in ALLOWED_MODES:
raise Exception(f'MODE definition must be one of {ALLOWED_MODES}')
continue
# Handle definition of source packages
if 'sourcePackage' in entry:
source_pack = entry['sourcePackage']
source_pack_ver = entry['sourcePackageVersion']
continue
if 'sourcePackage2' in entry:
# Switch mode to RECIPE processing
source_pack2 = entry['sourcePackage2']
source_pack_ver2 = entry['sourcePackageVersion2']
continue
# Extract source files, reference, and test files
source_files = entry['Source']
reference = entry['Reference']
test_filename = entry['Test']
# Make sure source_files is a list
if isinstance(source_files, str):
source_files = [source_files]
# Recipes require extra definitions
if MODE == 'RECIPE':
recipe_name = entry['Name']
solvent_name = entry['Solvent']
if 'Solvent_source' in entry:
recipe_source2 = entry['Solvent_source']
else:
recipe_source2 = None
if 'Standard' in entry:
standard_ffxml = os.path.join(ffxml_dir, (entry['Standard'] + '.xml'))
else:
standard_ffxml = None
elif MODE == 'GAFF':
recipe_name = entry['Name']
# Create provenance object
provenance = OrderedDict()
files = []
source = provenance['Source'] = []
for source_file in source_files:
if MODE == 'LEAPRC':
_filename = os.path.join(AMBERHOME, 'dat/leap/cmd', source_file)
elif MODE == 'RECIPE':
_filename = os.path.join(AMBERHOME, 'dat/leap/', source_file)
elif MODE == 'GAFF':
_filename = os.path.join('gaff', 'dat', source_file)
files.append(_filename)
source.append(OrderedDict())
source[-1]['Source'] = source_file
md5 = hashlib.md5()
with open(_filename, 'rb') as f:
md5.update(f.read())
md5 = md5.hexdigest()
source[-1]['md5hash'] = md5
source[-1]['sourcePackage'] = source_pack
source[-1]['sourcePackageVersion'] = source_pack_ver
# For recipes, add water file and source info for it
if MODE == 'RECIPE' and recipe_source2 is not None:
_filename = os.path.join('files', recipe_source2)
solvent_file = _filename
source.append(OrderedDict())
source[-1]['Source'] = recipe_source2
md5 = hashlib.md5()
with open(_filename, 'rb') as f:
md5.update(f.read())
md5 = md5.hexdigest()
source[-1]['md5hash'] = md5
source[-1]['sourcePackage'] = source_pack2
source[-1]['sourcePackageVersion'] = source_pack_ver2
elif MODE == 'RECIPE' and recipe_source2 is None:
solvent_file = None
provenance['Reference'] = reference
# set default conversion options
write_unused = False
filter_warnings = 'error'
# set conversion options if present
if 'Options' in entry:
for option in entry['Options']:
if option == 'write_unused':
write_unused = entry['Options'][option]
elif option == 'filter_warnings':
filter_warnings = entry['Options'][option]
elif option == 'ffxml_dir':
ffxml_dir = entry['Options'][option]
else:
raise Exception("Wrong option used in Options for %s"
% source_files)
# Convert files
if MODE == 'LEAPRC':
ffxml_name = convert_leaprc(files, ffxml_dir=ffxml_dir, ignore=ignore,
provenance=provenance, write_unused=write_unused,
filter_warnings=filter_warnings, split_filename=True)
elif MODE == 'RECIPE':
ffxml_name = convert_recipe(files, solvent_file=solvent_file,
ffxml_dir=ffxml_dir, provenance=provenance,
ffxml_basename=recipe_name)
elif MODE == 'GAFF':
ffxml_name = convert_gaff(files, ffxml_basename=recipe_name, ffxml_dir=ffxml_dir, ignore=ignore,
provenance=provenance, write_unused=write_unused,
filter_warnings=filter_warnings, split_filename=True)
if 'CharmmFFXMLFilename' in entry:
charmm_ffxml_filename = entry['CharmmFFXMLFilename']
charmm_lipid2amber_filename = entry['CharmmLipid2AmberFilename']
if verbose: print('Merging lipid entries...')
merge_lipids(ffxml_name, charmm_ffxml_filename, charmm_lipid2amber_filename)
if 'Prefix' in entry:
prefix = entry['Prefix']
if verbose: print('Rewriting %s to append prefix "%s"...' % (ffxml_name, prefix))
add_prefix_to_ffxml(ffxml_name, prefix)
if verbose: print('Validating the conversion...')
tested = False
for test in test_filename:
if test == 'protein':
validate_protein(ffxml_name, entry['Source'])
tested = True
elif test == 'nucleic':
validate_dna(ffxml_name, entry['Source'])
validate_rna(ffxml_name, entry['Source'])
tested = True
elif test == 'protein_ua':
validate_protein(ffxml_name, entry['Source'], united_atom=True)
tested = True
elif test == 'protein_phospho':
validate_phospho_protein(ffxml_name, entry['Source'])
tested = True
elif test == 'gaff':
validate_gaff(ffxml_name, entry['leaprc'], entry['Source'])
tested = True
elif test == 'water_ion':
validate_water_ion(ffxml_name, files, solvent_name, recipe_name,
standard_ffxml=standard_ffxml)
tested = True
elif test == 'dna':
validate_dna(ffxml_name, entry['Source'])
tested = True
elif test == 'rna':
validate_rna(ffxml_name, entry['Source'])
tested = True
elif test == 'lipids':
#validate_lipids(ffxml_name, source_files)
validate_merged_lipids(ffxml_name, entry['Source'])
tested = True
if not tested:
raise Exception('No validation tests have been run for %s' %
source_files)
def merge_lipids(ffxml_filename, charmm_ffxml_filename, charmm_lipid2amber_filename):
"""
Merge lipid residue definitions in AMBER ffxml file according to entries in a CHARMM ffxml file.
Parameters
----------
ffxml_filename : str
AMBER lipids ffxml filename with AMBER split lipids.
charmm_ffxml_filename : str
CHARMM ffxml lipids
charmmlipid2amber_filename : str
CHARMM CSV file containing translation from CHARMM -> AMBER
"""
# Read the input files.
charmmff = etree.parse(charmm_ffxml_filename)
amberff = etree.parse(ffxml_filename)
charmmResidues = charmmff.getroot().find('Residues').findall('Residue')
amberResidues = amberff.getroot().find('Residues').findall('Residue')
amberResMap = {}
for res in amberResidues:
atoms = dict((atom.attrib['name'], atom) for atom in res.findall('Atom'))
amberResMap[res.attrib['name']] = atoms
translations = {}
with open(charmm_lipid2amber_filename) as input:
# Skip the first two lines.
input.readline()
input.readline()
for line in input:
fields = line.split(',')
mergedRes = fields[0]
mergedAtom = fields[2].split()[0]
originalAtom, originalRes = fields[3].split()
translations[(mergedRes, mergedAtom)] = (originalRes, originalAtom)
# Remove all residues from the Amber file.
parentNode = amberff.getroot().find('Residues')
for res in amberResidues:
parentNode.remove(res)
# Copy over the CHARMM residues, making appropriate replacements.
def translateResidue(residue):
newres = deepcopy(residue)
# Translate atom properties
for atom in newres.findall('Atom'):
key = (residue.attrib['name'], atom.attrib['name'])
if key not in translations:
return None # We don't have a translation.
amberResName, amberAtomName = translations[key]
if amberResName not in amberResMap or amberAtomName not in amberResMap[amberResName]:
return None # We don't have a translation.
amberAtom = amberResMap[amberResName][amberAtomName]
for attrib in amberAtom.attrib:
if attrib != 'name':
atom.attrib[attrib] = amberAtom.attrib[attrib]
# Remove Patches from CHARMM residues
for patch in newres.findall('AllowPatch'):
newres.remove(patch)
return newres
# Iterate over CHARMM lipid residue templates and replace components with AMBER parameters
for residue in charmmResidues:
copy = translateResidue(residue)
if copy is not None:
parentNode.append(copy)
# Write merged lipid ffxml file (overwriting original file)
amberff.write(ffxml_filename)
def add_prefix_to_ffxml(ffxml_filename, prefix):
"""
Replace the contents of an ffxml file with a modified version in which every atom type is prefixed with `prefix`.
Parameters
----------
ffxml_filename : str
OpenMM ffxml filename (will be overwritten)
prefix : str
Prefix
"""
import re
import sys
inTypes = False
replacements = {}
modified_contents = ''
with open(ffxml_filename, 'r') as infile:
for line in infile:
if '<AtomTypes>' in line:
inTypes = True
if '</AtomTypes>' in line:
inTypes = False
if inTypes:
match = re.search('name="(.*?)"', line)
if match is not None:
name = match.group(1)
newName = prefix + '-' + name
line = line.replace('name="%s"' % name, 'name="%s"' % newName)
replacements['type="%s"' % name] = 'type="%s"' % newName
replacements['type1="%s"' % name] = 'type1="%s"' % newName
replacements['type2="%s"' % name] = 'type2="%s"' % newName
replacements['type3="%s"' % name] = 'type3="%s"' % newName
replacements['type4="%s"' % name] = 'type4="%s"' % newName
else:
for key in replacements:
if key in line:
line = line.replace(key, replacements[key])
if line.endswith('\n'):
line = line[:-1]
modified_contents += line + '\n'
with open(ffxml_filename, 'w') as outfile:
outfile.write(modified_contents)
def assert_energies(prmtop, inpcrd, ffxml, system_name='unknown', tolerance=2.5e-5,
improper_tolerance=1e-2, units=u.kilojoules_per_mole, openmm_topology=None, openmm_positions=None):
# AMBER
parm_amber = parmed.load_file(prmtop, inpcrd)
system_amber = parm_amber.createSystem(splitDihedrals=True)
amber_energies = parmed.openmm.energy_decomposition_system(parm_amber,
system_amber, nrg=units)
# OpenMM-ffxml
if isinstance(ffxml, str):
ff = app.ForceField(ffxml)
else:
ff = app.ForceField(*ffxml)
if openmm_positions is None:
openmm_positions = parm_amber.positions
if openmm_topology is not None:
system_omm = ff.createSystem(openmm_topology)
parm_omm = parmed.openmm.load_topology(openmm_topology, system_omm,
xyz=openmm_positions)
else:
system_omm = ff.createSystem(parm_amber.topology)
parm_omm = parmed.openmm.load_topology(parm_amber.topology, system_omm,
xyz=parm_amber.positions)
system_omm = parm_omm.createSystem(splitDihedrals=True)
omm_energies = parmed.openmm.energy_decomposition_system(parm_omm,
system_omm, nrg=units, platform='Reference')
# calc rel energies and assert
energies = []
rel_energies = []
for i, j in zip(amber_energies, omm_energies):
if i[0] != j[0]:
raise Exception('Mismatch in energy tuples naming.')
if abs(i[1]) > NEARLYZERO:
rel_energies.append((i[0], abs((i[1]-j[1])/i[1])))
else:
if abs(j[1]) > NEARLYZERO:
raise AssertionError('One of AMBER %s energies (%s) for %s is zero, '
'while the corresponding OpenMM energy is non-zero' %
(system_name, i[0], ffxml))
rel_energies.append((i[0], 0))
dihedrals_done = False
for (i, amber_energy, openmm_energy) in zip(rel_energies, amber_energies, omm_energies):
if i[0] != 'PeriodicTorsionForce':
if i[1] > tolerance:
raise AssertionError('%s relative energy error (%s, %f) outside of allowed tolerance (%f) for %s: AMBER %s OpenMM %s' %
(system_name, i[0], i[1], tolerance, ffxml, amber_energy, openmm_energy))
else:
if not dihedrals_done:
if i[1] > tolerance:
raise AssertionError('%s relative energy error (%s, %f) outside of allowed tolerance (%f) for %s: AMBER %s OpenMM %s' %
(system_name, i[0], i[1], tolerance, ffxml, amber_energy, openmm_energy))
dihedrals_done = True
else: #impropers
if i[1] > improper_tolerance:
raise AssertionError('%s relative energy error (%s-impropers, %f) outside of allowed tolerance (%f) for %s: AMBER %s OpenMM %s' %
(system_name, i[0], i[1], improper_tolerance, ffxml, amber_energy, openmm_energy))
# logging
if not no_log:
amber_energies_log = dict()
omm_energies_log = dict()
rel_energies_log = dict()
amber_energies_log['ffxml_name'] = ffxml
amber_energies_log['test_system'] = system_name
amber_energies_log['data_type'] = 'AMBER'
amber_energies_log['units'] = units
omm_energies_log['ffxml_name'] = ffxml
omm_energies_log['test_system'] = system_name
omm_energies_log['data_type'] = 'OpenMM'
omm_energies_log['units'] = units
rel_energies_log['ffxml_name'] = ffxml
rel_energies_log['test_system'] = system_name
rel_energies_log['data_type'] = 'abs((AMBER-OpenMM)/AMBER)'
dihedrals_done = False
for item in amber_energies:
if item[0] == 'PeriodicTorsionForce' and not dihedrals_done:
amber_energies_log['PeriodicTorsionForce_dihedrals'] = item[1]
dihedrals_done = True
elif item[0] == 'PeriodicTorsionForce' and dihedrals_done:
amber_energies_log['PeriodicTorsionForce_impropers'] = item[1]
elif item[0] == 'CMMotionRemover':
continue
else:
amber_energies_log[item[0]] = item[1]
dihedrals_done = False
for item in omm_energies:
if item[0] == 'PeriodicTorsionForce' and not dihedrals_done:
omm_energies_log['PeriodicTorsionForce_dihedrals'] = item[1]
dihedrals_done = True
elif item[0] == 'PeriodicTorsionForce' and dihedrals_done:
omm_energies_log['PeriodicTorsionForce_impropers'] = item[1]
elif item[0] == 'CMMotionRemover':
continue
else:
omm_energies_log[item[0]] = item[1]
dihedrals_done = False
for item in rel_energies:
if item[0] == 'PeriodicTorsionForce' and not dihedrals_done:
rel_energies_log['PeriodicTorsionForce_dihedrals'] = item[1]
dihedrals_done = True
elif item[0] == 'PeriodicTorsionForce' and dihedrals_done:
rel_energies_log['PeriodicTorsionForce_impropers'] = item[1]
elif item[0] == 'CMMotionRemover':
continue
else:
rel_energies_log[item[0]] = item[1]
logger.log(amber_energies_log)
logger.log(omm_energies_log)
logger.log(rel_energies_log)
def validate_protein(ffxml_name, leaprc_name, united_atom=False):
if verbose: print('Protein energy validation for %s' % ffxml_name)
if verbose: print('Preparing temporary files for validation...')
ala3_top = tempfile.mkstemp()
ala3_crd = tempfile.mkstemp()
villin_top = tempfile.mkstemp()
villin_crd = tempfile.mkstemp()
leap_script_ala3_file = tempfile.mkstemp()
leap_script_villin_file = tempfile.mkstemp()
if verbose: print('Preparing LeaP scripts...')
if not united_atom:
leap_script_ala3_string = """source %s
x = loadPdb files/ala3.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, ala3_top[1], ala3_crd[1])
leap_script_villin_string = """source %s
x = loadPdb files/villin.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, villin_top[1], villin_crd[1])
else:
leap_script_ala3_string = """source %s
x = loadPdb files/ala3_ua.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, ala3_top[1], ala3_crd[1])
leap_script_villin_string = """source %s
x = loadPdb files/villin_ua.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, villin_top[1], villin_crd[1])
write_file(leap_script_ala3_file[0], leap_script_ala3_string)
write_file(leap_script_villin_file[0], leap_script_villin_string)
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_ala3_file[1], os.devnull))
if os.path.getsize(ala3_top[1]) == 0 or os.path.getsize(ala3_crd[1]) == 0:
raise LeapException(leap_script_ala3_file[1])
os.system('tleap -f %s > %s' % (leap_script_villin_file[1], os.devnull))
if os.path.getsize(villin_top[1]) == 0 or os.path.getsize(villin_crd[1]) == 0:
raise LeapException(leap_script_villin_file[1])
try:
if verbose: print('Calculating and validating ala_ala_ala energies...')
assert_energies(ala3_top[1], ala3_crd[1], ffxml_name,
system_name='protein-ala_ala_ala')
if verbose: print('Ala_ala_ala energy validation successful!')
if verbose: print('Calculating and validating villin headpiece energies...')
assert_energies(villin_top[1], villin_crd[1], ffxml_name,
system_name='protein-villin headpiece')
if verbose: print('Villin headpiece energy validation successful!')
finally:
if verbose: print('Deleting temp files...')
for f in (ala3_top, ala3_crd, villin_top, villin_crd, leap_script_ala3_file,
leap_script_villin_file):
os.unlink(f[1])
if verbose: print('Protein energy validation for %s done!' % ffxml_name)
def validate_dna(ffxml_name, leaprc_name):
if verbose: print('DNA energy validation for %s' % ffxml_name)
if verbose: print('Preparing temporary files for validation...')
dna_top = tempfile.mkstemp()
dna_crd = tempfile.mkstemp()
leap_script_dna_file = tempfile.mkstemp()
if verbose: print('Preparing LeaP scripts...')
leap_script_dna_string = """addPdbAtomMap {
{ "H1'" "H1*" }
{ "H2'" "H2'1" }
{ "H2''" "H2'2" }
{ "H3'" "H3*" }
{ "H4'" "H4*" }
{ "H5'" "H5'1" }
{ "H5''" "H5'2" }
{ "HO2'" "HO'2" }
{ "HO5'" "H5T" }
{ "HO3'" "H3T" }
{ "OP1" "O1P" }
{ "OP2" "O2P" }
}
source %s
addPdbResMap {
{ 0 "DG" "DG5" } { 1 "DG" "DG3" }
{ 0 "DA" "DA5" } { 1 "DA" "DA3" }
{ 0 "DC" "DC5" } { 1 "DC" "DC3" }
{ 0 "DT" "DT5" } { 1 "DT" "DT3" }
}
x = loadPdb files/4rzn_dna.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, dna_top[1], dna_crd[1])
write_file(leap_script_dna_file[0], leap_script_dna_string)
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_dna_file[1], os.devnull))
if os.path.getsize(dna_top[1]) == 0 or os.path.getsize(dna_crd[1]) == 0:
raise LeapException(leap_script_dna_file[1])
try:
if verbose: print('Calculating and validating DNA energies...')
assert_energies(dna_top[1], dna_crd[1], ffxml_name,
system_name='nucleic-DNA')
if verbose: print('DNA energy validation successful!')
finally:
if verbose: print('Deleting temp files...')
for f in (dna_top, dna_crd, leap_script_dna_file):
os.unlink(f[1])
if verbose: print('DNA energy validation for %s done!' % ffxml_name)
def validate_rna(ffxml_name, leaprc_name):
if verbose: print('RNA energy validation for %s' % ffxml_name)
if verbose: print('Preparing temporary files for validation...')
rna_top = tempfile.mkstemp()
rna_crd = tempfile.mkstemp()
leap_script_rna_file = tempfile.mkstemp()
leap_script_rna_file_alt = tempfile.mkstemp()
if verbose: print('Preparing LeaP scripts...')
leap_script_rna_string = """
addPdbAtomMap {
{ "H1'" "H1*" }
{ "H2'" "H2'1" }
{ "H2''" "H2'2" }
{ "H3'" "H3*" }
{ "H4'" "H4*" }
{ "H5'" "H5'1" }
{ "H5''" "H5'2" }
{ "HO2'" "HO'2" }
{ "HO5'" "H5T" }
{ "HO3'" "H3T" }
{ "OP1" "O1P" }
{ "OP2" "O2P" }
}
source %s
addPdbResMap {
{ 0 "G" "G5" } { 1 "G" "G3" } { "G" "G" }
{ 0 "A" "A5" } { 1 "A" "A3" } { "A" "A" }
{ 0 "C" "C5" } { 1 "C" "C3" } { "C" "C" }
{ 0 "U" "U5" } { 1 "U" "U3" } { "U" "U" }
}
x = loadPdb files/5c5w_rna.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, rna_top[1], rna_crd[1])
leap_script_rna_string_alt = """
addPdbAtomMap {
{ "H1'" "H1*" }
{ "H2'" "H2'1" }
{ "H2''" "H2'2" }
{ "H3'" "H3*" }
{ "H4'" "H4*" }
{ "H5'" "H5'1" }
{ "H5''" "H5'2" }
{ "HO2'" "HO'2" }
{ "HO5'" "H5T" }
{ "HO3'" "H3T" }
{ "OP1" "O1P" }
{ "OP2" "O2P" }
}
source %s
addPdbResMap {
{ 0 "G" "RG5" } { 1 "G" "RG3" } { "G" "RG" }
{ 0 "A" "RA5" } { 1 "A" "RA3" } { "A" "RA" }
{ 0 "C" "RC5" } { 1 "C" "RC3" } { "C" "RC" }
{ 0 "U" "RU5" } { 1 "U" "RU3" } { "U" "RU" }
}
x = loadPdb files/5c5w_rna.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, rna_top[1], rna_crd[1])
write_file(leap_script_rna_file[0], leap_script_rna_string)
write_file(leap_script_rna_file_alt[0], leap_script_rna_string_alt)
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_rna_file[1], os.devnull))
if os.path.getsize(rna_top[1]) == 0 or os.path.getsize(rna_crd[1]) == 0:
# try alternative name mappings
os.system('tleap -f %s > %s' % (leap_script_rna_file_alt[1], os.devnull))
if os.path.getsize(rna_top[1]) == 0 or os.path.getsize(rna_crd[1]) == 0:
raise LeapException(leap_script_rna_file_alt[1])
try:
if verbose: print('Calculating and validating RNA energies...')
# improper testing turned off pending solution to problems
assert_energies(rna_top[1], rna_crd[1], ffxml_name,
system_name='nucleic-RNA')
if verbose: print('RNA energy validation successful!')
finally:
if verbose: print('Deleting temp files...')
for f in (rna_top, rna_crd, leap_script_rna_file, leap_script_rna_file_alt):
os.unlink(f[1])
if verbose: print('RNA energy validation for %s done!' % ffxml_name)
def validate_gaff(ffxml_name, leaprc_name, gaff_dat_name):
if verbose: print('GAFF energy validation for %s' % ffxml_name)
if verbose: print('Preparing temporary files for validation...')
imatinib_top = tempfile.mkstemp()
imatinib_crd = tempfile.mkstemp()
leap_script_imatinib_file = tempfile.mkstemp()
if verbose: print('Preparing LeaP scripts...')
leap_script_imatinib_string = """\
source %s
loadamberparams gaff/dat/%s
loadamberparams files/frcmod.imatinib
x = loadMol2 files/imatinib.mol2
saveAmberParm x %s %s
quit""" % (leaprc_name, gaff_dat_name, imatinib_top[1], imatinib_crd[1])
write_file(leap_script_imatinib_file[0], leap_script_imatinib_string)
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_imatinib_file[1], os.devnull))
if os.path.getsize(imatinib_top[1]) == 0 or os.path.getsize(imatinib_crd[1]) == 0:
raise LeapException(leap_script_imatinib_file[1])
try:
if verbose: print('Calculating and validating imatinib energies...')
assert_energies(imatinib_top[1], imatinib_crd[1], (ffxml_name,
'files/imatinib.xml', 'files/imatinib_frcmod.xml'),
system_name='gaff-imatinib')
if verbose: print('Imatinib energy validation successful!')
finally:
if verbose: print('Deleting temp files...')
for f in (imatinib_top, imatinib_crd, leap_script_imatinib_file):
os.unlink(f[1])
if verbose: print('GAFF energy validation for %s done!' % ffxml_name)
def validate_phospho_protein(ffxml_name, leaprc_name,
supp_leaprc_name = 'oldff/leaprc.ff99SBildn',
supp_ffxml_name='ffxml/ff99SBildn.xml'):
# this function assumes ffxml/ff14SB.xml already exists
if verbose: print('Phosphorylated protein energy validation for %s' %
ffxml_name)
for pdbname in glob.iglob('files/phospho/*.pdb'):
if verbose: print('Now testing with pdb %s' % os.path.basename(pdbname))
if verbose: print('Preparing temporary files for validation...')
top = tempfile.mkstemp()
crd = tempfile.mkstemp()
leap_script_file = tempfile.mkstemp()
if verbose: print('Preparing LeaP scripts...')
leap_script_string = """source %s
source %s
x = loadPdb %s
saveAmberParm x %s %s
quit""" % (supp_leaprc_name, leaprc_name, pdbname, top[1], crd[1])
write_file(leap_script_file[0], leap_script_string)
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_file[1], os.devnull))
if os.path.getsize(top[1]) == 0 or os.path.getsize(crd[1]) == 0:
raise LeapException(leap_script_file[1])
try:
if verbose: print('Calculating and validating energies...')
assert_energies(top[1], crd[1], (supp_ffxml_name, ffxml_name),
system_name='phospho_protein: %s'
% os.path.basename(pdbname))
if verbose: print('Energy validation successful!')
finally:
if verbose: print('Deleting temp files...')
for f in (top, crd, leap_script_file):
os.unlink(f[1])
if verbose: print('Phosphorylated protein energy validation for %s done!'
% ffxml_name)
def validate_water_ion(ffxml_name, source_recipe_files, solvent_name, recipe_name,
standard_ffxml=None):
if verbose: print('Water and ions energy validation for %s' %
ffxml_name)
if solvent_name == 'tip3p':
HOH = 'TP3'
solvent_frcmod = None
elif solvent_name == 'tip4pew':
HOH = 'T4E'
solvent_frcmod = 'frcmod.tip4pew'
elif solvent_name == 'spce':
HOH = 'SPC'
solvent_frcmod = 'frcmod.spce'
elif solvent_name == 'tip3pfb':
HOH = 'FB3'
solvent_frcmod = 'frcmod.tip3pfb'
elif solvent_name == 'tip4pfb':
HOH = 'FB4'
solvent_frcmod = 'frcmod.tip4pfb'
pdb_name = 'files/water_ion/' + recipe_name + '.pdb'
if verbose: print('Preparing temporary files for validation...')
top = tempfile.mkstemp()
crd = tempfile.mkstemp()
leap_script_file = tempfile.mkstemp()
if verbose: print('Preparing LeaP scripts...')
leap_script_string_part1 = """loadamberparams parm10.dat
loadamberparams %s
loadamberparams %s\n""" % (source_recipe_files[0], source_recipe_files[1])
leap_script_string_part2 = """\nloadOff atomic_ions.lib
loadoff solvents.lib
HOH = %s
# for TIP4PEW
addPdbAtomMap {{ "M" "EPW" }}
x = loadPdb %s
saveAmberParm x %s %s
quit""" % (HOH, pdb_name, top[1], crd[1])
if solvent_frcmod:
leap_script_string = (leap_script_string_part1 + ('loadamberparams %s'
% solvent_frcmod) + leap_script_string_part2)
else:
leap_script_string = leap_script_string_part1 + leap_script_string_part2
write_file(leap_script_file[0], leap_script_string)
# this test does it's own energy assertion because of differences
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_file[1], os.devnull))
if os.path.getsize(top[1]) == 0 or os.path.getsize(crd[1]) == 0:
raise LeapException(leap_script_file[1])
try:
if verbose: print('Calculating and validating energies...')
pdb = app.PDBFile(pdb_name, extraParticleIdentifier='')
if standard_ffxml is None:
ff = app.ForceField(ffxml_name)
else:
ff = app.ForceField(ffxml_name, standard_ffxml)
system_omm = ff.createSystem(pdb.topology)
parm_omm = parmed.openmm.load_topology(pdb.topology, xyz=pdb.positions)
parm_amber = parmed.load_file(top[1], crd[1])
system_amber = parm_amber.createSystem()
omm_energies = parmed.openmm.energy_decomposition_system(parm_omm,
system_omm, nrg=u.kilojoules_per_mole)
for entry in omm_energies:
if entry[0] == 'NonbondedForce':
omm_nonbonded = entry[1]
amber_energies = parmed.openmm.energy_decomposition_system(parm_amber,
system_amber, nrg=u.kilojoules_per_mole)
for entry in amber_energies:
if entry[0] == 'NonbondedForce':
amber_nonbonded = entry[1]
rel_nonbonded = abs((amber_nonbonded-omm_nonbonded) / amber_nonbonded)
if rel_nonbonded > 1e-5:
raise AssertionError('NonbondedForce Water and ions energy (%f) outside of '
'allowed tolerance (%f) for %s:' % (rel_nonbonded, 1e-5, ffxml_name))
if verbose: print('Energy validation successful!')
finally:
if verbose: print('Deleting temp files...')
for f in (top, crd, leap_script_file):
os.unlink(f[1])
# logging
if not no_log:
amber_energies_log = dict()
omm_energies_log = dict()
rel_energies_log = dict()
amber_energies_log['ffxml_name'] = ffxml_name
amber_energies_log['test_system'] = 'water_ion'
amber_energies_log['data_type'] = 'AMBER'
amber_energies_log['NonbondedForce'] = amber_nonbonded
amber_energies_log['units'] = u.kilojoules_per_mole
omm_energies_log['ffxml_name'] = ffxml_name
omm_energies_log['test_system'] = 'water_ion'
omm_energies_log['data_type'] = 'OpenMM'
omm_energies_log['NonbondedForce'] = omm_nonbonded
omm_energies_log['units'] = u.kilojoules_per_mole
rel_energies_log['ffxml_name'] = ffxml_name
rel_energies_log['test_system'] = 'water_ion'
rel_energies_log['data_type'] = 'abs((AMBER-OpenMM)/AMBER)'
rel_energies_log['NonbondedForce'] = rel_nonbonded
logger.log(amber_energies_log)
logger.log(omm_energies_log)
logger.log(rel_energies_log)
if verbose: print('Water and ions energy validation for %s done!'
% ffxml_name)
def validate_impropers(ffxml_name, leaprc_name):
if verbose: print('Impropers validation for %s' % ffxml_name)
if verbose: print('Preparing temporary files for validation...')
top_villin = tempfile.mkstemp()
crd_villin = tempfile.mkstemp()
top_dna = tempfile.mkstemp()
crd_dna = tempfile.mkstemp()
top_rna = tempfile.mkstemp()
crd_rna = tempfile.mkstemp()
leap_script_file = tempfile.mkstemp()
if verbose: print('Preparing LeaP scripts...')
leap_script_string = """source %s
x = loadPdb files/villin.pdb
y = loadPdb files/4rzn_dna.pdb
z = loadPdb files/5c5w_rna.pdb
saveAmberParm x %s %s
saveAmberParm y %s %s
saveAmberParm z %s %s
quit""" % (leaprc_name, top_villin[1], crd_villin[1], top_dna[1], crd_dna[1],
top_rna[1], crd_rna[1])
write_file(leap_script_file[0], leap_script_string)
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_file[1], os.devnull))
if os.path.getsize(top_villin[1]) == 0 or os.path.getsize(crd_villin[1]) == 0:
raise LeapException(leap_script_file[1])
if os.path.getsize(top_dna[1]) == 0 or os.path.getsize(crd_dna[1]) == 0:
raise LeapException(leap_script_file[1])
if os.path.getsize(top_rna[1]) == 0 or os.path.getsize(crd_rna[1]) == 0:
raise LeapException(leap_script_file[1])
# load into parmed
parm_amber_villin = parmed.load_file(top_villin[1])
parm_amber_dna = parmed.load_file(top_dna[1])
parm_amber_rna = parmed.load_file(top_rna[1])
# OpenMM
ff = app.ForceField(ffxml_name)
sys_omm_villin = ff.createSystem(parm_amber_villin.topology)
sys_omm_dna = ff.createSystem(parm_amber_dna.topology)
sys_omm_rna = ff.createSystem(parm_amber_rna.topology)
parm_omm_villin = parmed.openmm.load_topology(parm_amber_villin.topology,
sys_omm_villin)
parm_omm_dna = parmed.openmm.load_topology(parm_amber_dna.topology,
sys_omm_dna)
parm_omm_rna = parmed.openmm.load_topology(parm_amber_rna.topology,
sys_omm_rna)
# prepare sets of idxs
set_amber_villin = set([(dih.atom1.idx, dih.atom2.idx, dih.atom3.idx,
dih.atom4.idx) for dih in parm_amber_villin.dihedrals if dih.improper])
set_amber_dna = set([(dih.atom1.idx, dih.atom2.idx, dih.atom3.idx,
dih.atom4.idx) for dih in parm_amber_dna.dihedrals if dih.improper])
set_amber_rna = set([(dih.atom1.idx, dih.atom2.idx, dih.atom3.idx,
dih.atom4.idx) for dih in parm_amber_rna.dihedrals if dih.improper])
set_omm_villin = set([(dih.atom1.idx, dih.atom2.idx, dih.atom3.idx,
dih.atom4.idx) for dih in parm_omm_villin.dihedrals if dih.improper])
set_omm_dna = set([(dih.atom1.idx, dih.atom2.idx, dih.atom3.idx,
dih.atom4.idx) for dih in parm_omm_dna.dihedrals if dih.improper])
set_omm_rna = set([(dih.atom1.idx, dih.atom2.idx, dih.atom3.idx,
dih.atom4.idx) for dih in parm_omm_rna.dihedrals if dih.improper])
try:
if (set_amber_villin - set_omm_villin != set() or
set_omm_villin - set_amber_villin != set()):
raise AssertionError("""Impropers validation fail for %s (villin)
set_amber - set_omm: %s
set_omm - set_amber: %s""" % (ffxml_name,
set_amber_villin-set_omm_villin,
set_omm_villin-set_amber_villin))
if (set_amber_dna - set_omm_dna != set() or
set_omm_dna - set_amber_dna != set()):
raise AssertionError("""Impropers validation fail for %s (DNA)
set_amber - set_omm: %s
set_omm - set_amber: %s""" % (ffxml_name,
set_amber_dna-set_omm_dna,
set_omm_dna-set_amber_dna))
if (set_amber_rna - set_omm_rna != set() or
set_omm_rna - set_amber_rna != set()):
raise AssertionError("""Impropers validation fail for %s (RNA)
set_amber - set_omm: %s
set_omm - set_amber: %s""" % (ffxml_name,
set_amber_rna-set_omm_rna,
set_omm_rna-set_amber_rna))
finally:
if verbose: print('Deleting temp files...')
for f in (top_villin, crd_villin, top_dna, crd_dna, top_rna, crd_rna,
leap_script_file):
os.unlink(f[1])
if verbose: print('Improper validation for %s done!' % ffxml_name)
def validate_lipids(ffxml_name, leaprc_name):
if verbose: print('Lipids energy validation for %s' % ffxml_name)
if verbose: print('Preparing temporary files for validation...')
lipids_top = tempfile.mkstemp()
lipids_crd = tempfile.mkstemp()
leap_script_lipids_file = tempfile.mkstemp()
if verbose: print('Preparing LeaP scripts...')
leap_script_lipids_string = """source %s
x = loadPdb files/POPC-nowater-amber.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, lipids_top[1], lipids_crd[1])
write_file(leap_script_lipids_file[0], leap_script_lipids_string)
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_lipids_file[1], os.devnull))
if os.path.getsize(lipids_top[1]) == 0 or os.path.getsize(lipids_crd[1]) == 0:
raise LeapException(leap_script_lipids_file[1])
try:
if verbose: print('Calculating and validating lipids energies...')
assert_energies(lipids_top[1], lipids_crd[1], ffxml_name,
system_name='lipids')
if verbose: print('Lipids energy validation successful!')
finally:
if verbose: print('Deleting temp files...')
for f in (lipids_top, lipids_crd, leap_script_lipids_file):
os.unlink(f[1])
if verbose: print('Lipids energy validation for %s done!' % ffxml_name)
def validate_merged_lipids(ffxml_name, leaprc_name):
if verbose: print('Lipids (merged) energy validation for %s' % ffxml_name)
if verbose: print('Preparing temporary files for validation...')
lipids_top = tempfile.mkstemp()
lipids_crd = tempfile.mkstemp()
leap_script_lipids_file = tempfile.mkstemp()
pdbfile = app.PDBFile('files/POPC-nowater-charmm.pdb')
if verbose: print('Preparing LeaP scripts...')
leap_script_lipids_string = """source %s
x = loadPdb files/POPC-nowater-amber.pdb
saveAmberParm x %s %s
quit""" % (leaprc_name, lipids_top[1], lipids_crd[1])
write_file(leap_script_lipids_file[0], leap_script_lipids_string)
if verbose: print('Running LEaP...')
os.system('tleap -f %s > %s' % (leap_script_lipids_file[1], os.devnull))
if os.path.getsize(lipids_top[1]) == 0 or os.path.getsize(lipids_crd[1]) == 0:
raise LeapException(leap_script_lipids_file[1])
try:
if verbose: print('Calculating and validating lipids energies...')
assert_energies(lipids_top[1], lipids_crd[1], ffxml_name,
system_name='lipids',
openmm_topology=pdbfile.topology, openmm_positions=pdbfile.positions)
if verbose: print('Lipids energy validation successful!')
finally:
if verbose: print('Deleting temp files...')
for f in (lipids_top, lipids_crd, leap_script_lipids_file):
os.unlink(f[1])
if verbose: print('Lipids energy validation for %s done!' % ffxml_name)
class Logger():
"""
Log energy discrepancies to a file.
Parameters
----------
log_filename : str
Name of CSV file to write to
"""
# logs testing energies into csv
def __init__(self, log_filename=None):
if log_filename:
csvfile = open(log_filename, 'w')
fieldnames = ['ffxml_name', 'data_type', 'test_system', 'units', 'HarmonicBondForce',
'HarmonicAngleForce', 'PeriodicTorsionForce_dihedrals',
'PeriodicTorsionForce_impropers', 'NonbondedForce']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
self.csvfile = csvfile
self.writer = writer
else:
self.csvfile = None
self.writer = None
def close(self):
if self.csvfile:
self.csvfile.close()
def log(self, energies):
if self.writer:
self.writer.writerow(energies)
if __name__ == '__main__':
main()
| 42.688576
| 149
| 0.620518
|
f5023075ad24645c9d3c45169a5f3b8b057a6dc7
| 8,860
|
py
|
Python
|
FortniteAPI/Stats.py
|
tww0003/Fortnite-Tracker-Python-Wrapper
|
46ca061aace98b319937260881949301843fc8cb
|
[
"WTFPL"
] | 1
|
2020-12-30T03:32:40.000Z
|
2020-12-30T03:32:40.000Z
|
FortniteAPI/Stats.py
|
tww0003/Fortnite-Tracker-Python-Wrapper
|
46ca061aace98b319937260881949301843fc8cb
|
[
"WTFPL"
] | null | null | null |
FortniteAPI/Stats.py
|
tww0003/Fortnite-Tracker-Python-Wrapper
|
46ca061aace98b319937260881949301843fc8cb
|
[
"WTFPL"
] | null | null | null |
from enum import Enum
class FortnitePlaylist(Enum):
SOLO = "p2"
DUO = "p10"
SQUAD = "p9"
CURRENT_SOLO = "curr_p2"
CURRENT_DUO = "curr_p10"
CURRENT_SQUAD = "curr_p9"
class FortniteStats:
def __init__(self, json=None):
if json is not None:
self.json_data = json
self.CURRENT_SOLO_SCORE = self.get_score(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_SCORE = self.get_score(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_SCORE = self.get_score(FortnitePlaylist.CURRENT_SQUAD)
self.CURRENT_SOLO_WINS = self.get_top_one(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_WINS = self.get_top_one(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_WINS = self.get_top_one(FortnitePlaylist.CURRENT_SQUAD)
self.CURRENT_SOLO_TOP_TWELVE = self.get_top_twelve(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_TOP_TWELVE = self.get_top_twelve(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_TOP_TWELVE = self.get_top_twelve(FortnitePlaylist.CURRENT_SQUAD)
self.CURRENT_SOLO_TOP_TWENTY_FIVE = self.get_top_twenty_five(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_TOP_TWENTY_FIVE = self.get_top_twenty_five(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_TWENTY_FIVE = self.get_top_twenty_five(FortnitePlaylist.CURRENT_SQUAD)
self.CURRENT_SOLO_KD = self.get_kd(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_KD = self.get_kd(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_KD = self.get_kd(FortnitePlaylist.CURRENT_SQUAD)
self.CURRENT_SOLO_MATCHES = self.get_matches(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_MATCHES = self.get_matches(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_MATCHES = self.get_matches(FortnitePlaylist.CURRENT_SQUAD)
self.CURRENT_SOLO_KILLS = self.get_kills(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_KILLS = self.get_kills(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_KILLS = self.get_kills(FortnitePlaylist.CURRENT_SQUAD)
self.CURRENT_SOLO_KPG = self.get_kpg(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_KPG = self.get_kpg(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_KPG = self.get_kpg(FortnitePlaylist.CURRENT_SQUAD)
self.CURRENT_SOLO_SCORE_PER_MATCH = self.get_score_per_match(FortnitePlaylist.CURRENT_SOLO)
self.CURRENT_DUO_SCORE_PER_MATCH = self.get_score_per_match(FortnitePlaylist.CURRENT_DUO)
self.CURRENT_SQUAD_SCORE_PER_MATCH = self.get_score_per_match(FortnitePlaylist.CURRENT_SQUAD)
self.LIFETIME_SOLO_SCORE = self.get_score(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_SCORE = self.get_score(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_SCORE = self.get_score(FortnitePlaylist.SQUAD)
self.LIFETIME_SOLO_WINS = self.get_top_one(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_WINS = self.get_top_one(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_WINS = self.get_top_one(FortnitePlaylist.SQUAD)
self.LIFETIME_SOLO_TOP_TWELVE = self.get_top_twelve(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_TOP_TWELVE = self.get_top_twelve(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_TOP_TWELVE = self.get_top_twelve(FortnitePlaylist.SQUAD)
self.LIFETIME_SOLO_TOP_TWENTY_FIVE = self.get_top_twenty_five(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_TOP_TWENTY_FIVE = self.get_top_twenty_five(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_TWENTY_FIVE = self.get_top_twenty_five(FortnitePlaylist.SQUAD)
self.LIFETIME_SOLO_KD = self.get_kd(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_KD = self.get_kd(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_KD = self.get_kd(FortnitePlaylist.SQUAD)
self.LIFETIME_SOLO_MATCHES = self.get_matches(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_MATCHES = self.get_matches(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_MATCHES = self.get_matches(FortnitePlaylist.SQUAD)
self.LIFETIME_SOLO_KILLS = self.get_kills(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_KILLS = self.get_kills(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_KILLS = self.get_kills(FortnitePlaylist.SQUAD)
self.LIFETIME_SOLO_KPG = self.get_kpg(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_KPG = self.get_kpg(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_KPG = self.get_kpg(FortnitePlaylist.SQUAD)
self.LIFETIME_SOLO_SCORE_PER_MATCH = self.get_score_per_match(FortnitePlaylist.SOLO)
self.LIFETIME_DUO_SCORE_PER_MATCH = self.get_score_per_match(FortnitePlaylist.DUO)
self.LIFETIME_SQUAD_SCORE_PER_MATCH = self.get_score_per_match(FortnitePlaylist.SQUAD)
self.LIFETIME_KD = self.get_lifetime_kd()
self.LIFETIME_MATCHES = self.get_lifetime_matches_played()
self.LIFETIME_KILLS = self.get_lifetime_kills()
self.LIFETIME_WINS = self.get_lifetime_wins()
self.LIFETIME_SCORE = self.get_lifetime_score()
self.LIFETIME_WIN_PERCENTAGE = self.get_lifetime_win_percentage()
self.EPIC_USER_HANDLE = self.get_epic_user_handle()
def __str__(self):
return 'Fortnite stats for: ' + self.EPIC_USER_HANDLE
def get_score(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["top10"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_top_one(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["top1"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_top_twelve(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["top12"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_top_twenty_five(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["top25"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_kd(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["kd"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_matches(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["matches"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_kills(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["kills"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_kpg(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["kpg"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_score_per_match(self, playlist):
try:
return_data = self.json_data["stats"][playlist.value]["scorePerMatch"]["value"]
except KeyError:
return_data = "0"
return return_data
def get_lifetime_score(self):
try:
return_data = self.json_data["lifeTimeStats"][6]["value"]
except KeyError:
return_data = "0"
return return_data
def get_lifetime_matches_played(self):
try:
return_data = self.json_data["lifeTimeStats"][7]["value"]
except KeyError:
return_data = "0"
return return_data
def get_lifetime_wins(self):
try:
return_data = self.json_data["lifeTimeStats"][8]["value"]
except KeyError:
return_data = "0"
return return_data
def get_lifetime_win_percentage(self):
try:
return_data = self.json_data["lifeTimeStats"][9]["value"]
except KeyError:
return_data = "0"
return return_data
def get_lifetime_kills(self):
try:
return_data = self.json_data["lifeTimeStats"][10]["value"]
except KeyError:
return_data = "0"
return return_data
def get_lifetime_kd(self):
try:
return_data = self.json_data["lifeTimeStats"][11]["value"]
except KeyError:
return_data = "0"
return return_data
def get_epic_user_handle(self):
try:
return_data = self.json_data["epicUserHandle"]
except KeyError:
return_data = "No Epic User Handle"
return return_data
| 41.596244
| 105
| 0.675056
|
73ce5bd1653ec9af3c6bc45c483f489eb8bb088e
| 3,773
|
py
|
Python
|
atomate/vasp/fireworks/exchange.py
|
rkingsbury/atomate
|
d26b65d5c46882d9585f14188514d9a65276336c
|
[
"BSD-3-Clause-LBNL"
] | 167
|
2017-01-26T00:14:19.000Z
|
2022-03-18T20:47:58.000Z
|
atomate/vasp/fireworks/exchange.py
|
rkingsbury/atomate
|
d26b65d5c46882d9585f14188514d9a65276336c
|
[
"BSD-3-Clause-LBNL"
] | 422
|
2016-12-16T18:21:15.000Z
|
2022-03-23T22:13:19.000Z
|
atomate/vasp/fireworks/exchange.py
|
rkingsbury/atomate
|
d26b65d5c46882d9585f14188514d9a65276336c
|
[
"BSD-3-Clause-LBNL"
] | 158
|
2016-12-16T18:28:00.000Z
|
2022-03-28T11:40:03.000Z
|
from fireworks import Firework
from atomate.vasp.firetasks.exchange import (
HeisenbergModelMapping,
HeisenbergModelToDb,
VampireMC,
VampireToDb,
)
from atomate.vasp.config import DB_FILE
__author__ = "Nathan C. Frey"
__email__ = "ncfrey@lbl.gov"
class HeisenbergModelFW(Firework):
def __init__(
self,
wf_uuid,
parent_structure,
parents,
db_file=DB_FILE,
heisenberg_settings=None,
name="heisenberg model",
structures=None,
energies=None,
):
"""
Takes a set of low-energy magnetic orderings and energies and maps
them to a Heisenberg Model to compute exchange params.
* heisenberg_settings:
cutoff (float): Starting point for nearest neighbor search.
tol (float): Tolerance for equivalent NN bonds.
Args:
wf_uuid (int): Unique id for record keeping.
parent_structure (Structure): Magnetic ground state.
parents (FireWorks): Parent FWs.
db_file (str): Path to file containing db credentials.
heisenberg_settings (dict): A config dict for Heisenberg model
mapping, detailed above.
name (str): Labels the FW.
structures (list): Magnetic structures.
energies (list): Total energies of magnetic structures.
TODO:
* Test a range of nn cutoffs and add convergence check.
"""
heisenberg_settings = heisenberg_settings or {}
fw_name = f"{parent_structure.composition.reduced_formula} {name}"
additional_fields = {
"task_label": fw_name,
"exchange": {"calc_type": name, "wf_uuids": [], "_source_wf_uuid": wf_uuid},
}
tasks = []
tasks.append(
HeisenbergModelMapping(
structures=structures,
energies=energies,
heisenberg_settings=heisenberg_settings,
)
)
tasks.append(HeisenbergModelToDb(db_file=db_file, wf_uuid=wf_uuid))
super().__init__(tasks=tasks, name=fw_name, parents=parents)
class VampireCallerFW(Firework):
def __init__(
self,
wf_uuid,
parent_structure,
parents,
db_file=DB_FILE,
mc_settings=None,
name="vampire caller",
):
"""Run Vampire Monte Carlo from a HeisenbergModel.
* mc_settings:
mc_box_size (float): MC simulation box size in nm.
equil_timesteps (int): Number of MC equilibration moves.
mc_timesteps (int): Number of MC moves for averaging.
avg (bool): Compute only <J>.
Args:
wf_uuid (int): Unique id for record keeping.
parent_structure (Structure): Magnetic ground state.
parents (FireWorks): Parent FWs.
db_file (str): Path to file containing db credentials.
mc_settings (dict): A configuration dict for monte carlo.
name (str): Labels the FW.
"""
fw_name = f"{parent_structure.composition.reduced_formula} {name}"
additional_fields = {
"task_label": fw_name,
"exchange": {"calc_type": name, "wf_uuids": [], "_source_wf_uuid": wf_uuid},
}
tasks = []
# tasks.append(
# HeisenbergConvergence(
# db_file=db_file,
# wf_uuid=wf_uuid,
# average=average,
# )
# )
tasks.append(
VampireMC(db_file=db_file, wf_uuid=wf_uuid, mc_settings=mc_settings)
)
tasks.append(VampireToDb(db_file=db_file, wf_uuid=wf_uuid))
super().__init__(tasks=tasks, name=fw_name, parents=parents)
| 29.708661
| 88
| 0.593692
|
a5fe04d96b34d393824953631cb21e8a75a50b5c
| 2,903
|
py
|
Python
|
tests/validator_integration_test.py
|
drewjj/ode-output-validator-library
|
8d104ee85e60feb06f6b4c0f353478866f4badff
|
[
"Apache-2.0"
] | null | null | null |
tests/validator_integration_test.py
|
drewjj/ode-output-validator-library
|
8d104ee85e60feb06f6b4c0f353478866f4badff
|
[
"Apache-2.0"
] | null | null | null |
tests/validator_integration_test.py
|
drewjj/ode-output-validator-library
|
8d104ee85e60feb06f6b4c0f353478866f4badff
|
[
"Apache-2.0"
] | null | null | null |
from odevalidator import TestCase, ValidatorException
import unittest
import queue
from tests import assert_results
class ValidatorIntegrationTest(unittest.TestCase):
def test_good_file_does_good_things(self):
data_file = 'tests/testfiles/good.json'
results = self._validate_file(data_file)
assert_results(self, results, 0)
def test_good_bsmTx_file_passes_sequential_checks(self):
data_file = 'tests/testfiles/good_bsmTx.json'
config_file = 'odevalidator/configs/config_bsm.ini'
results = self._validate_file(data_file, config_file)
assert_results(self, results, 0)
def test_good_braodcast_tim(self):
data_file = 'tests/testfiles/good_broadcast_tim.json'
config_file = 'odevalidator/configs/config_tim.ini'
results = self._validate_file(data_file, config_file)
assert_results(self, results, 0)
def test_good_rxMsg_BSMonly(self):
data_file = 'tests/testfiles/good_rxMsg_BSMonly.json'
results = self._validate_file(data_file)
assert_results(self, results, 0)
def test_good_json_alt_value(self):
data_file = 'tests/testfiles/good_altValPercent.json'
config_file = 'odevalidator/configs/configAltPercent.ini'
results = self._validate_file(data_file, config_file)
assert_results(self, results, 0)
def test_csv_file(self):
data_file = 'tests/testfiles/good_vsl.csv'
config_file = 'odevalidator/configs/csvconfig.ini'
results = self._validate_file(data_file, config_file)
assert_results(self, results, 0)
def test_csv_timestamp_file(self):
data_file = 'tests/testfiles/good_vsl_timestamp.csv'
config_file = 'odevalidator/configs/csv_timestamp_config.ini'
results = self._validate_file(data_file, config_file)
assert_results(self, results, 0)
def test_bad_csv_file(self):
data_file = 'tests/testfiles/bad_vsl.csv'
config_file = 'odevalidator/configs/csvconfig.ini'
results = self._validate_file(data_file, config_file)
assert_results(self, results, 4)
def test_bad_file_does_bad_things(self):
data_file = 'tests/testfiles/bad.json'
results = self._validate_file(data_file)
assert_results(self, results, 29)
def _validate_file(self, data_file, config_file = 'odevalidator/configs/config.ini'):
validator = TestCase(config_file, )
with open(data_file) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
#msgs = [json.loads(line) for line in content]
q = queue.Queue()
for msg in content:
if msg and not msg.startswith('#'):
q.put(msg)
results = validator.validate_queue(q)
return results
| 38.706667
| 89
| 0.69032
|
13227c4242d5cc5aee61a9cf30813736e492ddaa
| 218
|
py
|
Python
|
kbsite/urls.py
|
PerpetuumCommunity/simplePostKB
|
f5afa9711bc9352d9032ede3e92d26bc088f1603
|
[
"MIT"
] | 1
|
2018-05-21T10:55:37.000Z
|
2018-05-21T10:55:37.000Z
|
kbsite/urls.py
|
PerpetuumCommunity/simplePostKB
|
f5afa9711bc9352d9032ede3e92d26bc088f1603
|
[
"MIT"
] | null | null | null |
kbsite/urls.py
|
PerpetuumCommunity/simplePostKB
|
f5afa9711bc9352d9032ede3e92d26bc088f1603
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
import views
urlpatterns = [
url(r'^$', views.home, name="home"),
url(r'^(?P<pagenum>\d+)$', views.home, name="home"),
url(r'^postkill', views.postkill, name="postkill"),
]
| 27.25
| 56
| 0.62844
|
5aeaa52c99d1563647e9b0620d586c5d96c05032
| 1,961
|
py
|
Python
|
src/FrontEnd/Elaborator/fe_types_elaborator.py
|
Typee-Language/Typee
|
adf61fee616bd798dae3bdc6710fd819e188e925
|
[
"MIT"
] | 9
|
2018-07-07T23:07:20.000Z
|
2021-06-26T19:37:54.000Z
|
src/FrontEnd/Elaborator/fe_types_elaborator.py
|
Typee-Language/Typee
|
adf61fee616bd798dae3bdc6710fd819e188e925
|
[
"MIT"
] | 147
|
2018-07-07T23:05:58.000Z
|
2021-07-26T14:02:45.000Z
|
src/FrontEnd/Elaborator/fe_types_elaborator.py
|
Typee-Language/Typee
|
adf61fee616bd798dae3bdc6710fd819e188e925
|
[
"MIT"
] | 2
|
2018-07-08T16:30:50.000Z
|
2018-07-08T22:32:03.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018-2021 Philippe Schmouker, Typee project, http://www.typee.ovh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#=============================================================================
from FrontEnd.Elaborator.fe_elaborator import FEElaborator
#=============================================================================
class FETypesElaborator( FEElaborator ):
"""
Class description.
"""
#-------------------------------------------------------------------------
def __init__(self, params) -> None:
'''
Constructor.
description
Args:
...
Returns:
...
Raises:
...
'''
pass
#-------------------------------------------------------------------------
#===== end of FrontEnd.Elaborator.fe_types_elaborator =====#
| 36.314815
| 79
| 0.566548
|
f310ed88eea5316b8b9359a47c63d008485a0e8c
| 1,897
|
py
|
Python
|
fpga/utils/fanglobs.py
|
bugblat/fan
|
854b2c0c6b94270bd08a2cf58bfa1871d34bb8ef
|
[
"MIT"
] | 5
|
2016-01-21T21:22:04.000Z
|
2022-02-16T22:26:59.000Z
|
fpga/utils/fanglobs.py
|
bugblat/fan
|
854b2c0c6b94270bd08a2cf58bfa1871d34bb8ef
|
[
"MIT"
] | null | null | null |
fpga/utils/fanglobs.py
|
bugblat/fan
|
854b2c0c6b94270bd08a2cf58bfa1871d34bb8ef
|
[
"MIT"
] | 2
|
2015-12-29T15:32:52.000Z
|
2017-06-22T15:23:08.000Z
|
##---------------------------------------------------------
# Name: fanglobs.py
# Purpose: globals
#
# Author: Tim
#
# Created: 08/07/2014
# Copyright: (c) Tim Eccles and Bugblat Ltd. 2014
# Licence: see the LICENSE.txt file
##---------------------------------------------------------
##---------------------------------------------------------
# XO2 configuration memory
CFG_PAGE_SIZE = 16
UFM_PAGE_SIZE = 16
## XO2-1200
CFG_PAGE_COUNT_1200 = 2175
UFM_PAGE_COUNT_1200 = 512
## XO2-2000
CFG_PAGE_COUNT_2000 = 3198
UFM_PAGE_COUNT_2000 = 640
## XO2-4000
CFG_PAGE_COUNT_4000 = 5758
UFM_PAGE_COUNT_4000 = 768
## XO2-7000
CFG_PAGE_COUNT_7000 = 9212
UFM_PAGE_COUNT_7000 = 2048
UNRECOGNIZED = 'unrecognized'
##---------------------------------------------------------
# register addresses.
# these _must_ match the addresses defined for the hardware in fandefs.vhd
# read-only ID register
R_ID = 0
# write-only Scratch and Misc registers
W_SCRATCH_REG = 1
W_MISC_REG = 2
# misc register LED control values
LED_ALTERNATING = 0
LED_SYNC = 1
LED_OFF = 2
##---------------------------------------------------------
# write command format
# the basic idea is to send an address, followed by a series of data values
# both address and data are six bit values
# the byte format is as follows, two top bits ssignal address or data
# bit number 76543210
# address format 00aaaaaa aaaaaa is the register address
# data format 01dddddd dddddd is the data for the register
ADDRESS_MASK = 0
DATA_MASK = 0x40
##---------------------------------------------------------
STR_LEDS_ALT = 'alternating'
STR_LEDS_SYNC = 'synchronized'
STR_LEDS_OFF = 'off'
# EOF -----------------------------------------------------
| 27.492754
| 75
| 0.519241
|
0c402cdebb7ee06f0bdad0a203ef73a13d887c8a
| 646
|
py
|
Python
|
fixedwords/migrations/0002_auto_20180122_1234.py
|
notkrd/languagegames
|
d0d5a0a0673c039de4f699c1fe5cfd60b8eeaa70
|
[
"MIT"
] | null | null | null |
fixedwords/migrations/0002_auto_20180122_1234.py
|
notkrd/languagegames
|
d0d5a0a0673c039de4f699c1fe5cfd60b8eeaa70
|
[
"MIT"
] | null | null | null |
fixedwords/migrations/0002_auto_20180122_1234.py
|
notkrd/languagegames
|
d0d5a0a0673c039de4f699c1fe5cfd60b8eeaa70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-22 20:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fixedwords', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='justtext',
name='author',
field=models.CharField(default='', max_length=200),
),
migrations.AddField(
model_name='justtext',
name='title',
field=models.CharField(default='', max_length=200),
),
]
| 24.846154
| 64
| 0.56192
|
372211223051bf0ef4dc391e8d4e247b505a3fd0
| 8,204
|
py
|
Python
|
scene_generation/scene_generation/vis.py
|
LUGUANSONG/i2g2i
|
ec532f2e128301472478c3d8fe4c72929e2967a4
|
[
"MIT"
] | null | null | null |
scene_generation/scene_generation/vis.py
|
LUGUANSONG/i2g2i
|
ec532f2e128301472478c3d8fe4c72929e2967a4
|
[
"MIT"
] | 3
|
2021-06-08T21:42:14.000Z
|
2022-01-13T02:48:20.000Z
|
scene_generation/scene_generation/vis.py
|
LUGUANSONG/i2g2i
|
ec532f2e128301472478c3d8fe4c72929e2967a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import matplotlib.pyplot as plt
import numpy as np
import torch
from imageio import imread
from matplotlib.patches import Rectangle
"""
Utilities for making visualizations.
"""
def draw_layout(vocab, objs, boxes, masks=None, size=256,
show_boxes=False, bgcolor=(0, 0, 0)):
if bgcolor == 'white':
bgcolor = (255, 255, 255)
cmap = plt.get_cmap('rainbow')
colors = cmap(np.linspace(0, 1, len(objs)))
with torch.no_grad():
objs = objs.cpu().clone()
boxes = boxes.cpu().clone()
boxes *= size
if masks is not None:
masks = masks.cpu().clone()
bgcolor = np.asarray(bgcolor)
bg = np.ones((size, size, 1)) * bgcolor
plt.imshow(bg.astype(np.uint8))
plt.gca().set_xlim(0, size)
plt.gca().set_ylim(size, 0)
plt.gca().set_aspect(1.0, adjustable='box')
for i, obj in enumerate(objs):
name = vocab['object_idx_to_name'][obj]
if name == '__image__':
continue
box = boxes[i]
if masks is None:
continue
mask = masks[i].numpy()
mask /= mask.max()
r, g, b, a = colors[i]
colored_mask = mask[:, :, None] * np.asarray(colors[i])
x0, y0, x1, y1 = box
plt.imshow(colored_mask, extent=(x0, x1, y1, y0),
interpolation='bicubic', alpha=1.0)
if show_boxes:
for i, obj in enumerate(objs):
name = vocab['object_idx_to_name'][obj]
if name == '__image__':
continue
box = boxes[i]
draw_box(box, colors[i], name)
def add_boxes_to_layout(img, objs, boxes, image_path, size=256, colors=None):
if colors is None:
cmap = plt.get_cmap('rainbow')
colors = cmap(np.linspace(0, 1, len(objs)))
plt.clf()
with torch.no_grad():
boxes = boxes.cpu().clone()
boxes *= size
plt.imshow(img)
plt.gca().set_xlim(0, size)
plt.gca().set_ylim(size, 0)
plt.gca().set_aspect(1.0, adjustable='box')
for i, obj in enumerate(objs):
if obj == '__image__':
continue
draw_box(boxes[i], colors[i], obj, alpha=0.8)
plt.axis('off')
plt.savefig(image_path, bbox_inches='tight', pad_inches=0)
def draw_box(box, color, text=None, alpha=1.0):
"""
Draw a bounding box using pyplot, optionally with a text box label.
Inputs:
- box: Tensor or list with 4 elements: [x0, y0, x1, y1] in [0, W] x [0, H]
coordinate system.
- color: pyplot color to use for the box.
- text: (Optional) String; if provided then draw a label for this box.
"""
TEXT_BOX_HEIGHT = 10
if torch.is_tensor(box) and box.dim() == 2:
box = box.view(-1)
assert box.size(0) == 4
x0, y0, x1, y1 = box
assert y1 > y0, box
assert x1 > x0, box
w, h = x1 - x0, y1 - y0
rect = Rectangle((x0, y0), w, h, fc='none', lw=2, ec=color, alpha=alpha)
plt.gca().add_patch(rect)
if text is not None:
text_rect = Rectangle((x0, y0), w, TEXT_BOX_HEIGHT, fc=color, alpha=0.5)
plt.gca().add_patch(text_rect)
tx = 0.5 * (x0 + x1)
ty = y0 + TEXT_BOX_HEIGHT / 2.0
plt.text(tx, ty, text, va='center', ha='center')
def draw_scene_graph(objs, triples, vocab=None, **kwargs):
"""
Use GraphViz to draw a scene graph. If vocab is not passed then we assume
that objs and triples are python lists containing strings for object and
relationship names.
Using this requires that GraphViz is installed. On Ubuntu 16.04 this is easy:
sudo apt-get install graphviz
"""
output_filename = kwargs.pop('output_filename', 'graph.png')
orientation = kwargs.pop('orientation', 'V')
edge_width = kwargs.pop('edge_width', 6)
arrow_size = kwargs.pop('arrow_size', 1.5)
binary_edge_weight = kwargs.pop('binary_edge_weight', 1.2)
ignore_dummies = kwargs.pop('ignore_dummies', True)
if orientation not in ['V', 'H']:
raise ValueError('Invalid orientation "%s"' % orientation)
rankdir = {'H': 'LR', 'V': 'TD'}[orientation]
if vocab is not None:
# Decode object and relationship names
assert torch.is_tensor(objs)
assert torch.is_tensor(triples)
objs_list, triples_list = [], []
idx_to_obj = ['__image__'] + vocab['my_idx_to_obj']
for i in range(objs.size(0)):
objs_list.append(idx_to_obj[objs[i].item()])
for i in range(triples.size(0)):
s = triples[i, 0].item()
p = vocab['pred_idx_to_name'][triples[i, 1].item()]
o = triples[i, 2].item()
triples_list.append([s, p, o])
objs, triples = objs_list, triples_list
# General setup, and style for object nodes
lines = [
'digraph{',
'graph [size="5,3",ratio="compress",dpi="300",bgcolor="transparent"]',
'rankdir=%s' % rankdir,
'nodesep="0.5"',
'ranksep="0.5"',
'node [shape="box",style="rounded,filled",fontsize="48",color="none"]',
'node [fillcolor="lightpink1"]',
]
# Output nodes for objects
for i, obj in enumerate(objs):
if ignore_dummies and obj == '__image__':
continue
lines.append('%d [label="%s"]' % (i, obj))
# Output relationships
next_node_id = len(objs)
lines.append('node [fillcolor="lightblue1"]')
for s, p, o in triples:
if ignore_dummies and p == '__in_image__':
continue
lines += [
'%d [label="%s"]' % (next_node_id, p),
'%d->%d [penwidth=%f,arrowsize=%f,weight=%f]' % (
s, next_node_id, edge_width, arrow_size, binary_edge_weight),
'%d->%d [penwidth=%f,arrowsize=%f,weight=%f]' % (
next_node_id, o, edge_width, arrow_size, binary_edge_weight)
]
next_node_id += 1
lines.append('}')
# Now it gets slightly hacky. Write the graphviz spec to a temporary
# text file
ff, dot_filename = tempfile.mkstemp()
with open(dot_filename, 'w') as f:
for line in lines:
f.write('%s\n' % line)
os.close(ff)
# Shell out to invoke graphviz; this will save the resulting image to disk,
# so we read it, delete it, then return it.
output_format = os.path.splitext(output_filename)[1][1:]
os.system('dot -T%s %s > %s' % (output_format, dot_filename, output_filename))
os.remove(dot_filename)
img = imread(output_filename)
os.remove(output_filename)
return img
if __name__ == '__main__':
o_idx_to_name = ['cat', 'dog', 'hat', 'skateboard']
p_idx_to_name = ['riding', 'wearing', 'on', 'next to', 'above']
o_name_to_idx = {s: i for i, s in enumerate(o_idx_to_name)}
p_name_to_idx = {s: i for i, s in enumerate(p_idx_to_name)}
vocab = {
'object_idx_to_name': o_idx_to_name,
'object_name_to_idx': o_name_to_idx,
'pred_idx_to_name': p_idx_to_name,
'pred_name_to_idx': p_name_to_idx,
}
objs = [
'cat',
'cat',
'skateboard',
'hat',
]
objs = torch.LongTensor([o_name_to_idx[o] for o in objs])
triples = [
[0, 'next to', 1],
[0, 'riding', 2],
[1, 'wearing', 3],
[3, 'above', 2],
]
triples = [[s, p_name_to_idx[p], o] for s, p, o in triples]
triples = torch.LongTensor(triples)
draw_scene_graph(objs, triples, vocab, orientation='V')
| 32.947791
| 82
| 0.588615
|
422e7e184ee8bdbd700ab7431c79c3b2b0f3fbaf
| 16,704
|
py
|
Python
|
src/renamesensorsspl/renamesensorsspl.py
|
Shadoward/renamesensors-spl
|
7f3c61401cda44716d90d6b250c87b97e2d18d94
|
[
"MIT"
] | null | null | null |
src/renamesensorsspl/renamesensorsspl.py
|
Shadoward/renamesensors-spl
|
7f3c61401cda44716d90d6b250c87b97e2d18d94
|
[
"MIT"
] | null | null | null |
src/renamesensorsspl/renamesensorsspl.py
|
Shadoward/renamesensors-spl
|
7f3c61401cda44716d90d6b250c87b97e2d18d94
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
###############################################################
# Author: patrice.ponchant@furgo.com (Fugro Brasil) #
# Created: 10/12/2020 #
# Python : 3.x #
###############################################################
# The future package will provide support for running your code on Python 2.6, 2.7, and 3.3+ mostly unchanged.
# http://python-future.org/quickstart.html
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
##### Basic packages #####
import datetime
import sys, os
import pandas as pd
import math
import re
##### CMD packages #####
from tqdm import tqdm
#from tabulate import tabulate
##### GUI packages #####
from gooey import Gooey, GooeyParser
from colored import stylize, attr, fg
# 417574686f723a205061747269636520506f6e6368616e74
##########################################################
# Main code #
##########################################################
# this needs to be *before* the @Gooey decorator!
# (this code allows to only use Gooey when no arguments are passed to the script)
if len(sys.argv) >= 2:
if not '--ignore-gooey' in sys.argv:
sys.argv.append('--ignore-gooey')
cmd = True
else:
cmd = False
# GUI Configuration
# Preparing your script for packaging https://chriskiehl.com/article/packaging-gooey-with-pyinstaller
# Prevent stdout buffering # https://github.com/chriskiehl/Gooey/issues/289
@Gooey(
program_name='Rename tool for sensors using the spreadsheet generated by splsensors',
progress_regex=r"^progress: (?P<current>\d+)/(?P<total>\d+)$",
progress_expr="current / total * 100",
hide_progress_msg=True,
richtext_controls=True,
#richtext_controls=True,
terminal_font_family = 'Courier New', # for tabulate table nice formatation
#dump_build_config=True,
#load_build_config="gooey_config.json",
default_size=(800, 750),
timing_options={
'show_time_remaining':True,
'hide_time_remaining_on_complete':True
},
tabbed_groups=True,
navigation='Tabbed',
header_bg_color = '#95ACC8',
#body_bg_color = '#95ACC8',
menu=[{
'name': 'File',
'items': [{
'type': 'AboutDialog',
'menuTitle': 'About',
'name': 'renamensensorsspl',
'description': 'Rename tool for sensors using the spreadsheet generated by splsensors',
'version': '0.2.0',
'copyright': '2020',
'website': 'https://github.com/Shadoward/renamesensors-spl',
'developer': 'patrice.ponchant@fugro.com',
'license': 'MIT'
}]
},{
'name': 'Help',
'items': [{
'type': 'Link',
'menuTitle': 'Documentation',
'url': ''
}]
}]
)
def main():
desc = "Rename tool for sensors using the spreadsheet generated by splsensors"
parser = GooeyParser(description=desc)
mainopt = parser.add_argument_group('Full Rename Options', gooey_options={'columns': 1})
lnopt = parser.add_argument_group('LineName Rename Options', gooey_options={'columns': 1})
revertopt = parser.add_argument_group('Reverse Renaming Options',
description='This option is to be used in csae or you need to rename back the renamed files',
gooey_options={'columns': 1})
# Full Rename Arguments
mainopt.add_argument(
'-i',
'--xlsxFile',
dest='xlsxFile',
metavar='sheets_combined.xlsx File Path',
help='This is the merge file with all the Final spreadsheet generated by the splsensors tool.\n Please be sure that you have QC the spreadsheet!',
widget='FileChooser',
gooey_options={'wildcard': "Excel Files(.xlsx)|*.xlsx"})
mainopt.add_argument(
'-n', '--filename',
dest='filename',
metavar='Filename',
widget='TextField',
type=str,
help='File name to be use to rename the file.\nYou can use the following wildcard to automate the linename:\n[V] = vessel;\n[LN] = Linename from SPL;\n[ST] = Sensor Type;\n[SD] = Start Date from the sensor (yyyymmdd_hhmmss);\n[N] = sequence number if the sensor have split.\ne.g: [V]_[LN]_[SD]_ASOW.')
mainopt.add_argument(
'-s', '--seqnumber',
dest='seqnumber',
metavar='Sequence Number Format',
widget='TextField',
#default='FugroBrasilis-CRP-Position',
help='Sequence number format for split files. e.g.: 000 or 00')
mainopt.add_argument(
'-t', '--timeFormat',
dest='timeFormat',
metavar='Timestamp Format',
widget='TextField',
default='%Y%m%d_%H%M',
help='Timestamp format to be use in the file name.\ne.g.: %Y%m%d_%H%M%S --> 20201224_152432')
# LineName Rename Arguments
lnopt.add_argument(
'-I',
'--xlsxFile2',
dest='xlsxFile2',
metavar='sheets_combined.xlsx File Path',
help='This is the merge file with all the Final spreadsheet generated by the splsensors tool.\n Please be sure that you have QC the spreadsheet!',
widget='FileChooser',
gooey_options={'wildcard': "Excel Files(.xlsx)|*.xlsx"})
# Additional Arguments
revertopt.add_argument(
'-r',
'--reverseFile',
dest='reverseFile',
metavar='reverse_rename.csv File Path',
help='This is the file generate by this tool after you have rename the files.\nThe file can be edited to remove what you do not need to reverse back the name.',
widget='FileChooser',
gooey_options={'wildcard': "Comma separated file (*.csv)|*reverse*.csv"})
# Use to create help readme.md. TO BE COMMENT WHEN DONE
# if len(sys.argv)==1:
# parser.print_help()
# sys.exit(1)
args = parser.parse_args()
process(args, cmd)
def process(args, cmd):
"""
Uses this if called as __main__.
"""
xlsxFile = args.xlsxFile
xlsxFile2 = args.xlsxFile2
filename = args.filename
reverseFile = args.reverseFile
seqnumber = args.seqnumber if args.seqnumber is not None else "000"
timeFormat = args.timeFormat
Fseqnumber = len(seqnumber)
##########################################################
# Checking before continuing #
##########################################################
# Check if Final merge spreadsheet is selected
if not xlsxFile and not xlsxFile2:
print ('')
sys.exit(stylize('Final spreadsheet was not selected. Please select the Final spreadsheet created by splsensors tool, quitting', fg('red')))
if xlsxFile:
try:
xl = pd.read_excel(xlsxFile, sheet_name=None, engine='openpyxl')
sheets = xl.keys()
except IOError:
print('')
sys.exit(stylize(f'The following file is lock ({xlsxFile}). Please close the files, quitting.', fg('red')))
if xlsxFile2:
try:
xl = pd.read_excel(xlsxFile2, sheet_name=None, engine='openpyxl')
sheets = xl.keys()
except IOError:
print('')
sys.exit(stylize(f'The following file is lock ({xlsxFile}). Please close the files, quitting.', fg('red')))
if not any(key in list(sheets) for key in ['Full_List', 'Rename_LN']):
print ('')
sys.exit(stylize('Correct Final spreadsheet was not selected. Please select a correct Final spreadsheet created by splsensors tool, quitting', fg('red')))
# Check if filename is defined
if xlsxFile and not filename:
print ('')
sys.exit(stylize('Filename empty. Please define the new file name, quitting', fg('red')))
##########################################################
# Reverse Naming #
##########################################################
if args.reverseFile is not None:
print('', flush = True)
print('##################################################', flush = True)
print('RENAMING BACK THE FILES. PLEASE WAIT....', flush = True)
print('##################################################', flush = True)
now = datetime.datetime.now() # record time of the subprocess
dfreverse = pd.read_csv(reverseFile, usecols=["OldName","NewName"])
pbar = tqdm(total=len(dfreverse)) if cmd else print(f"Renaming the files.\nNote: Output show file counting every {math.ceil(len(dfreverse)/10)}") #cmd vs GUI
for index, row in dfreverse.iterrows():
oldname = row['OldName']
newname = row['NewName']
if os.path.exists(newname):
os.rename(newname, oldname)
progressBar(cmd, pbar, index, dfreverse)
print('', flush = True)
print('##################################################', flush = True)
print('SUMMARY', flush = True)
print('##################################################', flush = True)
print('', flush = True)
print(f'A total of {len(dfreverse)} files were renamed back.\n', flush = True)
print("Subprocess Duration: ", (datetime.datetime.now() - now), flush = True)
sys.exit()
# Remove old reverse log
if xlsxFile:
xlsxFilePath = os.path.dirname(os.path.abspath(xlsxFile))
else:
xlsxFilePath = os.path.dirname(os.path.abspath(xlsxFile2))
if os.path.exists(xlsxFilePath + '\\reverse_rename.csv'):
try:
os.remove(xlsxFilePath + '\\reverse_rename.csv')
except IOError:
print('')
sys.exit(stylize(f'The reverse_rename.csv file is lock. Please close the files, quitting.', fg('red')))
##########################################################
# Listing the files #
##########################################################
print('', flush = True)
print('##################################################', flush = True)
print('READING THE SPREADSHEET AND RENAMING THE FILES.', flush = True)
print('PLEASE WAIT....', flush = True)
print('##################################################', flush = True)
if args.xlsxFile2 is not None:
dfreverse = lnrename(xlsxFile2)
else:
dfreverse = fullrename(xlsxFile, timeFormat, Fseqnumber, filename)
dfreverse.to_csv(xlsxFilePath + '\\reverse_rename.csv', index=True)
print('', flush = True)
print('##################################################', flush = True)
print('SUMMARY', flush = True)
print('##################################################', flush = True)
print('', flush = True)
print(f'A total of {len(dfreverse)} files were renamed.\n', flush = True)
print('')
print(f'Reverse Log can be found in {xlsxFilePath}.\n', flush = True)
##########################################################
# Functions #
##########################################################
def lnrename(xlsxFile2):
dfRename = pd.read_excel(xlsxFile2, sheet_name='Rename_LN', engine='openpyxl')
dfreverse = pd.DataFrame(columns = ["OldName", "NewName"])
pbar = tqdm(total=len(dfRename)) if cmd else print(f"Renaming the files.\nNote: Output show file counting every {math.ceil(len(dfRename)/10)}") #cmd vs GUI
for index, row in dfRename.iterrows():
FilePath = row['FilePath']
path = os.path.dirname(os.path.abspath(FilePath))
ext = os.path.splitext(os.path.basename(FilePath))[1]
newname = row['New LineName']
# Renaming
if os.path.exists(FilePath):
os.rename(FilePath, os.path.join(path, newname + ext))
# Generate log reverse
dfreverse = dfreverse.append(pd.Series([FilePath, os.path.join(path, newname + ext)],
index=dfreverse.columns), ignore_index=True)
progressBar(cmd, pbar, index, dfRename)
return dfreverse
def fullrename(xlsxFile, timeFormat, Fseqnumber, filename):
dfRename = pd.read_excel(xlsxFile, sheet_name='Full_List', engine='openpyxl')
coldrop = ['Summary', 'Difference Start [s]', 'Session Start', 'Session End', 'Session Name', 'Session MaxGap', 'Sensor FileName']
dfRename.drop(columns=coldrop, inplace=True)
dfRename.dropna(subset=['SPL LineName'], inplace = True)
dfRename['Incremental'] = None
# https://stackoverflow.com/questions/59875334/add-incremental-value-for-duplicates
# https://stackoverflow.com/questions/56137222/pandas-group-by-then-apply-throwing-a-warning # Try using .loc[row_indexer,col_indexer] = value instead
dftmp = dfRename[dfRename.duplicated(subset='SPL LineName', keep=False)]
dfRename.loc[dftmp.index, 'Incremental'] = dftmp.groupby(['SPL LineName']).cumcount() + 1
dfRename.update(dftmp)
dfreverse = pd.DataFrame(columns = ["OldName", "NewName", "Incremental", "Sensor Type", "Vessel Name"])
pbar = tqdm(total=len(dfRename)) if cmd else print(f"Renaming the files.\nNote: Output show file counting every {math.ceil(len(dfRename)/10)}") #cmd vs GUI
for index, row in dfRename.iterrows():
FilePath = row['FilePath']
path = os.path.dirname(os.path.abspath(FilePath))
ext = os.path.splitext(os.path.basename(FilePath))[1]
SensorStart = datetime.datetime.strftime(row['Sensor Start'], timeFormat)
VesselName = row['Vessel Name']
SensorType = row['Sensor Type']
SPLLineName = row['SPL LineName']
Incremental = row['Incremental']
# https://stackoverflow.com/questions/6116978/how-to-replace-multiple-substrings-of-a-string
if Incremental is not None:
seqnumber = str(int(Incremental)).zfill(Fseqnumber)
rep = {"[V]": VesselName, "[LN]": SPLLineName, "[ST]": SensorType, "[SD]": SensorStart, "[N]": str(seqnumber)} # define desired replacements here
else:
rep = {"[V]": VesselName, "[LN]": SPLLineName, "[ST]": SensorType, "[SD]": SensorStart, "[N]": ''}
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(rep.keys()))
if '[N]' in filename:
newname = pattern.sub(lambda m: rep[re.escape(m.group(0))], filename)
else:
seqnumber = str(int(Incremental)).zfill(3) if Incremental else None
newname = pattern.sub(lambda m: rep[re.escape(m.group(0))], filename) + '_' + str(seqnumber)
newname = newname.replace('__', '_').replace('_None', '')
# Renaming
if os.path.exists(FilePath):
os.rename(FilePath, os.path.join(path, newname + ext))
# Generate log reverse
dfreverse = dfreverse.append(pd.Series([FilePath, os.path.join(path, newname + ext), Incremental, SensorType, VesselName],
index=dfreverse.columns), ignore_index=True)
#print(f'OldName: {oldname}\nNewName: {newname}')
progressBar(cmd, pbar, index, dfRename)
return dfreverse
# from https://www.pakstech.com/blog/python-gooey/
def print_progress(index, total):
print(f"progress: {index+1}/{total}", flush = True)
# Progrees bar GUI and CMD
def progressBar(cmd, pbar, index, ls):
if cmd:
pbar.update(1)
else:
print_progress(index, len(ls)) # to have a nice progress bar in the GU
if index % math.ceil(len(ls)/10) == 0: # decimate print
print(f"Files Process: {index+1}/{len(ls)}", flush = True)
##########################################################
# __main__ #
##########################################################
if __name__ == "__main__":
now = datetime.datetime.now() # time the process
main()
print('')
print("Process Duration: ", (datetime.datetime.now() - now)) # print the processing time. It is handy to keep an eye on processing performance.
| 45.024259
| 316
| 0.551066
|
c601a41cfd2b3f75247c64814f0824e640ffecd3
| 8,688
|
py
|
Python
|
ddi_search_engine/Bio/AlignAce/Motif.py
|
dbmi-pitt/DIKB-Evidence-analytics
|
9ffd629db30c41ced224ff2afdf132ce9276ae3f
|
[
"MIT"
] | 3
|
2015-06-08T17:58:54.000Z
|
2022-03-10T18:49:44.000Z
|
ddi_search_engine/Bio/AlignAce/Motif.py
|
dbmi-pitt/DIKB-Evidence-analytics
|
9ffd629db30c41ced224ff2afdf132ce9276ae3f
|
[
"MIT"
] | null | null | null |
ddi_search_engine/Bio/AlignAce/Motif.py
|
dbmi-pitt/DIKB-Evidence-analytics
|
9ffd629db30c41ced224ff2afdf132ce9276ae3f
|
[
"MIT"
] | null | null | null |
# Copyright 2003 by Bartek Wilczynski. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Implementation of sequence motifs.
Changes:
9.2007 (BW) : added the to_faste() and .weblogo() methods allowing to use the Berkeley weblogo server at http://weblogo.berkeley.edu/
"""
from __future__ import generators
from Bio.SubsMat import FreqTable
class Motif(object):
"""
A class representing sequence motifs.
"""
def __init__(self):
self.instances = []
self.score = 0.0
self.mask = []
self._pwm_is_current = 0
self._pwm = []
self.alphabet=None
self.length=None
def _check_length(self, len):
if self.length==None:
self.length = len
elif self.length != len:
raise ValueError, "You can't change the length of the motif"
def _check_alphabet(self,alphabet):
if self.alphabet==None:
self.alphabet=alphabet
elif self.alphabet != alphabet:
raise ValueError, "Wrong Alphabet"
def add_instance(self,instance):
"""
adds new instance to the motif
"""
self._check_alphabet(instance.alphabet)
self._check_length(len(instance))
self.instances.append(instance)
self._pwm_is_current = False
def set_mask(self,mask):
"""
sets the mask for the motif
The mask should be a string containing asterisks in the position of significant columns and spaces in other columns
"""
self._check_length(len(mask))
self.mask=[]
for char in mask:
if char=="*":
self.mask.append(1)
elif char==" ":
self.mask.append(0)
else:
raise ValueError("Mask should contain only '*' or ' ' and not a '%s'"%char)
def pwm(self):
"""
returns the PWM computed for the set of instances
"""
if self._pwm_is_current:
return self._pwm
#we need to compute new pwm
self._pwm = []
for i in xrange(len(self.mask)):
dict = {}
#filling the dict with 0's
for letter in self.alphabet.letters:
dict[letter]=0
#counting the occurences of letters in instances
for seq in self.instances:
dict[seq[i]]=dict[seq[i]]+1
self._pwm.append(FreqTable.FreqTable(dict,FreqTable.COUNT,self.alphabet))
self._pwm_is_current=True
return self._pwm
def search_instances(self,sequence):
"""
a generator function, returning found positions of instances of the motif in a given sequence
"""
for pos in xrange(0,len(sequence)-self.length+1):
for instance in self.instances:
if instance.tostring()==sequence[pos:pos+self.length].tostring():
yield(pos,instance)
break # no other instance will fit (we don't want to return multiple hits)
def score_hit(self,sequence,position,normalized=1,masked=0):
"""
give the pwm score for a given position
"""
score = 0.0
for pos in xrange(self.length):
if not masked or self.mask[pos]:
score += self.pwm()[pos][sequence[position+pos]]
if normalized:
if not masked:
score/=self.length
else:
score/=len(filter(lambda x: x, self.mask))
return score
def search_pwm(self,sequence,threshold=0.0,normalized=1,masked=1):
"""
a generator function, returning found hits in a given sequence with the pwm score higher than the threshold
"""
for pos in xrange(0,len(sequence)-self.length+1):
score = self.score_hit(sequence,pos,normalized,masked)
if score > threshold:
yield (pos,score)
def sim(self, motif, masked = 0):
"""
return the similarity score for the given motif against self.
We use the Pearson's correlation of the respective probabilities.
If the motifs have different length or mask raise the ValueError.
"""
from math import sqrt
if self.alphabet != motif.alphabet:
raise ValueError("Wrong alphabet")
if self.length != motif.length:
raise ValueError("Wrong length")
if masked and self.mask!=motif.mask:
raise ValueError("Wrong mask")
sxx = 0 # \sum x^2
sxy = 0 # \sum x \cdot y
sx = 0 # \sum x
sy = 0 # \sum y
syy = 0 # \sum x^2
for pos in xrange(self.length):
if not masked or self.mask:
for l in self.alphabet.letters:
xi = self.pwm()[pos][l]
yi = motif.pwm()[pos][l]
sx = sx + xi
sy = sy + yi
sxx = sxx + xi * xi
syy = syy + yi * yi
sxy = sxy + xi * yi
if masked:
norm = len(filter(lambda x: x,self.mask))
else:
norm = self.length
norm *= len(self.alphabet.letters)
s1 = (sxy - sx*sy*1.0/norm)
s2 = (sxx - sx*sx*1.0/norm)*(syy- sy*sy*1.0/norm)
return s1/sqrt(s2)
def read(self,stream):
"""
reads the motif from the stream
the self.alphabet variable must be set before
"""
from Bio.Seq import Seq
while 1:
ln = stream.readline()
if "*" in ln:
self.set_mask(ln.strip("\n\c"))
break
self.add_instance(Seq(ln.strip(),self.alphabet))
def __str__(self):
"""
string representation of motif
"""
str = ""
for inst in self.instances:
str = str + inst.tostring() + "\n"
for i in xrange(self.length):
if self.mask[i]:
str = str + "*"
else:
str = str + " "
str = str + "\n"
return str
def write(self,stream):
"""
writes the motif to the stream
"""
stream.write(self.__str__())
def to_fasta(self):
"""
FASTA representation of motif
"""
str = ""
for i,inst in enumerate(self.instances):
str = str + "> instance %d\n"%i + inst.tostring() + "\n"
return str
def weblogo(self,fname,format="PNG",**kwds):
"""
uses the Berkeley weblogo service to download and save a weblogo of itself
requires an internet connection.
The parameters from **kwds are passed directly to the weblogo server.
"""
import urllib
import urllib2
#import Image
al= self.to_fasta()
url = 'http://weblogo.berkeley.edu/logo.cgi'
values = {'sequence' : al,
'format' : format,
'logowidth' : '18',
'logoheight' : '5',
'logounits' : 'cm',
'kind' : 'AUTO',
'firstnum' : "1",
'command' : 'Create Logo',
'smallsamplecorrection' : "on",
'symbolsperline' : 32,
'res' : '96',
'res_units' : 'ppi',
'antialias' : 'on',
'title' : '',
'barbits' : '',
'xaxis': 'on',
'xaxis_label' : '',
'yaxis': 'on',
'yaxis_label' : '',
'showends' : 'on',
'shrink' : '0.5',
'fineprint' : 'on',
'ticbits' : '1',
'colorscheme' : 'DEFAULT',
'color1' : 'green',
'color2' : 'blue',
'color3' : 'red',
'color4' : 'black',
'color5' : 'purple',
'color6' : 'orange',
'color1' : 'black',
}
for k,v in kwds.items():
values[k]=str(v)
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
f=open(fname,"w")
im=response.read()
f.write(im)
f.close()
| 31.478261
| 133
| 0.499194
|
ad4793fcbef8a253abc3135d750b32ed689ca542
| 5,313
|
py
|
Python
|
render_jinja2_templates.py
|
gcarling/bossylobster-blog
|
723ce3226d6c4cd09476cf688938a416b199720b
|
[
"Apache-2.0"
] | 1
|
2021-04-09T17:30:52.000Z
|
2021-04-09T17:30:52.000Z
|
render_jinja2_templates.py
|
gcarling/bossylobster-blog
|
723ce3226d6c4cd09476cf688938a416b199720b
|
[
"Apache-2.0"
] | 40
|
2015-01-07T00:49:33.000Z
|
2022-02-07T19:31:32.000Z
|
render_jinja2_templates.py
|
gcarling/bossylobster-blog
|
723ce3226d6c4cd09476cf688938a416b199720b
|
[
"Apache-2.0"
] | 2
|
2019-05-10T03:53:39.000Z
|
2020-12-03T20:24:33.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import glob
import hashlib
import json
import os
import re
import subprocess
import tempfile
import jinja2
import py.path
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
FORCE_RENDER = "FORCE_RENDER" in os.environ
TEMPLATES_DIR = os.path.join(BASE_DIR, "templated_content")
ENV = jinja2.Environment(
loader=jinja2.PackageLoader(__name__, "templated_content")
)
KATEX_PATH = os.path.join(BASE_DIR, "node_modules", "katex")
NODE_SCRIPT_TEMPLATE_INLINE = u"""\
katex = require(%(katex_path)s);
value = katex.renderToString("%%s");
console.log(value);
""" % {
"katex_path": json.dumps(KATEX_PATH)
}
NODE_SCRIPT_TEMPLATE_MATH_MODE = u"""\
katex = require(%(katex_path)s);
value = katex.renderToString("%%s", {displayMode: true});
console.log(value);
""" % {
"katex_path": json.dumps(KATEX_PATH)
}
KATEX_BLOCK_TEMPLATE = u"""\
<div class="katex-elt"><blockquote>
%s
</blockquote></div>"""
TEMPLATE_HASHES_FILENAME = os.path.join(TEMPLATES_DIR, "template_hashes.json")
with open(TEMPLATE_HASHES_FILENAME, "r") as fh:
TEMPLATE_HASHES = json.load(fh)
RENDERED_DIR = os.path.join(BASE_DIR, "content")
def escape_string(latex_str):
return latex_str.replace("\\", r"\\")
def utf8_to_html_entity(char_val):
ordinal_val = ord(char_val)
if ordinal_val < 127:
return char_val
return "&#%04d;" % (ordinal_val,)
def utf8_to_html_entities(str_val):
chars = [utf8_to_html_entity(char_val) for char_val in str_val]
return "".join(chars)
def get_katex(latex_str, blockquote=False):
escaped = escape_string(latex_str)
if blockquote:
script_content = NODE_SCRIPT_TEMPLATE_MATH_MODE % (escaped,)
else:
script_content = NODE_SCRIPT_TEMPLATE_INLINE % (escaped,)
temp_script = tempfile.mktemp()
with open(temp_script, "w") as fh:
fh.write(script_content)
if py.path.local.sysfind("node") is None:
raise RuntimeError("`node` must be installed")
node_result = subprocess.check_output(["node", temp_script])
result = node_result.strip().decode("utf-8")
result = utf8_to_html_entities(result)
as_hex = binascii.hexlify(result.encode("utf-8")).decode("ascii")
wrapped_element = "<html-literal>{}</html-literal>".format(as_hex)
if blockquote:
return KATEX_BLOCK_TEMPLATE % (wrapped_element,)
else:
return wrapped_element
def verify_template(filename):
with open(filename, "r") as file_obj:
content = file_obj.read()
# Don't allow templated content to be on the boundary of a
# parenthetical expression.
if "({{" in content:
raise ValueError("Invalid content", filename)
if "}})" in content:
raise ValueError("Invalid content", filename)
# Don't allow templated content to be immediately followed by a comma.
if "}}," in content:
raise ValueError("Invalid content", filename)
def get_templates():
result = []
for match in glob.glob(os.path.join(TEMPLATES_DIR, "*.template")):
_, template_name = os.path.split(match)
template = ENV.get_template(template_name)
verify_template(template.filename)
result.append(template)
return result
def get_md5_sum(filename):
with open(filename, "rb") as fh:
hash_ = hashlib.md5()
hash_.update(fh.read())
digest_bytes = hash_.digest()
return binascii.hexlify(digest_bytes).decode("ascii")
def write_template(template):
name, ext = os.path.splitext(template.name)
if ext != ".template":
raise ValueError(template.name)
# This assumes we are running in the root of the repository.
new_filename = os.path.join(RENDERED_DIR, "{}.md".format(name))
md5_sum = get_md5_sum(template.filename)
relative_filename = os.path.relpath(template.filename, BASE_DIR)
if not FORCE_RENDER and md5_sum == TEMPLATE_HASHES.get(relative_filename):
if os.path.exists(new_filename):
print("Already up-to-date: {}".format(relative_filename))
return
print("Writing {}".format(new_filename))
if not os.path.isdir(RENDERED_DIR):
os.mkdir(RENDERED_DIR)
with open(new_filename, "w") as fh:
rendered_file = template.render(get_katex=get_katex)
fh.write(rendered_file)
# Make sure the file has a trailing newline.
if rendered_file[-1] != "\n":
fh.write("\n")
TEMPLATE_HASHES[relative_filename] = md5_sum
with open(TEMPLATE_HASHES_FILENAME, "w") as fh:
json.dump(
TEMPLATE_HASHES,
fh,
indent=2,
sort_keys=True,
separators=(",", ": "),
)
fh.write("\n")
if __name__ == "__main__":
for template in get_templates():
write_template(template)
| 31.070175
| 78
| 0.685112
|
4a893471c9f9fd27c8c61351eff3bd8e4da503ed
| 21,189
|
py
|
Python
|
google/monitoring/v3/monitoring-v3-py/google/cloud/monitoring_v3/types/metric_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/monitoring/v3/monitoring-v3-py/google/cloud/monitoring_v3/types/metric_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/monitoring/v3/monitoring-v3-py/google/cloud/monitoring_v3/types/metric_service.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.api import metric_pb2 # type: ignore
from google.api import monitored_resource_pb2 # type: ignore
from google.cloud.monitoring_v3.types import common
from google.cloud.monitoring_v3.types import metric as gm_metric
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.monitoring.v3',
manifest={
'ListMonitoredResourceDescriptorsRequest',
'ListMonitoredResourceDescriptorsResponse',
'GetMonitoredResourceDescriptorRequest',
'ListMetricDescriptorsRequest',
'ListMetricDescriptorsResponse',
'GetMetricDescriptorRequest',
'CreateMetricDescriptorRequest',
'DeleteMetricDescriptorRequest',
'ListTimeSeriesRequest',
'ListTimeSeriesResponse',
'CreateTimeSeriesRequest',
'CreateTimeSeriesError',
'CreateTimeSeriesSummary',
'QueryTimeSeriesRequest',
'QueryTimeSeriesResponse',
'QueryErrorList',
},
)
class ListMonitoredResourceDescriptorsRequest(proto.Message):
r"""The ``ListMonitoredResourceDescriptors`` request.
Attributes:
name (str):
Required. The
`project <https://cloud.google.com/monitoring/api/v3#project_name>`__
on which to execute the request. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]
filter (str):
An optional
`filter <https://cloud.google.com/monitoring/api/v3/filters>`__
describing the descriptors to be returned. The filter can
reference the descriptor's type and labels. For example, the
following filter returns only Google Compute Engine
descriptors that have an ``id`` label:
::
resource.type = starts_with("gce_") AND resource.label:id
page_size (int):
A positive number that is the maximum number
of results to return.
page_token (str):
If this field is not empty then it must contain the
``nextPageToken`` value returned by a previous call to this
method. Using this field causes the method to return
additional results from the previous method call.
"""
name = proto.Field(
proto.STRING,
number=5,
)
filter = proto.Field(
proto.STRING,
number=2,
)
page_size = proto.Field(
proto.INT32,
number=3,
)
page_token = proto.Field(
proto.STRING,
number=4,
)
class ListMonitoredResourceDescriptorsResponse(proto.Message):
r"""The ``ListMonitoredResourceDescriptors`` response.
Attributes:
resource_descriptors (Sequence[google.api.monitored_resource_pb2.MonitoredResourceDescriptor]):
The monitored resource descriptors that are available to
this project and that match ``filter``, if present.
next_page_token (str):
If there are more results than have been returned, then this
field is set to a non-empty value. To see the additional
results, use that value as ``page_token`` in the next call
to this method.
"""
@property
def raw_page(self):
return self
resource_descriptors = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=monitored_resource_pb2.MonitoredResourceDescriptor,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetMonitoredResourceDescriptorRequest(proto.Message):
r"""The ``GetMonitoredResourceDescriptor`` request.
Attributes:
name (str):
Required. The monitored resource descriptor to get. The
format is:
::
projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE]
The ``[RESOURCE_TYPE]`` is a predefined type, such as
``cloudsql_database``.
"""
name = proto.Field(
proto.STRING,
number=3,
)
class ListMetricDescriptorsRequest(proto.Message):
r"""The ``ListMetricDescriptors`` request.
Attributes:
name (str):
Required. The
`project <https://cloud.google.com/monitoring/api/v3#project_name>`__
on which to execute the request. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]
filter (str):
If this field is empty, all custom and system-defined metric
descriptors are returned. Otherwise, the
`filter <https://cloud.google.com/monitoring/api/v3/filters>`__
specifies which metric descriptors are to be returned. For
example, the following filter matches all `custom
metrics <https://cloud.google.com/monitoring/custom-metrics>`__:
::
metric.type = starts_with("custom.googleapis.com/")
page_size (int):
A positive number that is the maximum number
of results to return.
page_token (str):
If this field is not empty then it must contain the
``nextPageToken`` value returned by a previous call to this
method. Using this field causes the method to return
additional results from the previous method call.
"""
name = proto.Field(
proto.STRING,
number=5,
)
filter = proto.Field(
proto.STRING,
number=2,
)
page_size = proto.Field(
proto.INT32,
number=3,
)
page_token = proto.Field(
proto.STRING,
number=4,
)
class ListMetricDescriptorsResponse(proto.Message):
r"""The ``ListMetricDescriptors`` response.
Attributes:
metric_descriptors (Sequence[google.api.metric_pb2.MetricDescriptor]):
The metric descriptors that are available to the project and
that match the value of ``filter``, if present.
next_page_token (str):
If there are more results than have been returned, then this
field is set to a non-empty value. To see the additional
results, use that value as ``page_token`` in the next call
to this method.
"""
@property
def raw_page(self):
return self
metric_descriptors = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=metric_pb2.MetricDescriptor,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetMetricDescriptorRequest(proto.Message):
r"""The ``GetMetricDescriptor`` request.
Attributes:
name (str):
Required. The metric descriptor on which to execute the
request. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]
An example value of ``[METRIC_ID]`` is
``"compute.googleapis.com/instance/disk/read_bytes_count"``.
"""
name = proto.Field(
proto.STRING,
number=3,
)
class CreateMetricDescriptorRequest(proto.Message):
r"""The ``CreateMetricDescriptor`` request.
Attributes:
name (str):
Required. The
`project <https://cloud.google.com/monitoring/api/v3#project_name>`__
on which to execute the request. The format is: 4
projects/[PROJECT_ID_OR_NUMBER]
metric_descriptor (google.api.metric_pb2.MetricDescriptor):
Required. The new `custom
metric <https://cloud.google.com/monitoring/custom-metrics>`__
descriptor.
"""
name = proto.Field(
proto.STRING,
number=3,
)
metric_descriptor = proto.Field(
proto.MESSAGE,
number=2,
message=metric_pb2.MetricDescriptor,
)
class DeleteMetricDescriptorRequest(proto.Message):
r"""The ``DeleteMetricDescriptor`` request.
Attributes:
name (str):
Required. The metric descriptor on which to execute the
request. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]
An example of ``[METRIC_ID]`` is:
``"custom.googleapis.com/my_test_metric"``.
"""
name = proto.Field(
proto.STRING,
number=3,
)
class ListTimeSeriesRequest(proto.Message):
r"""The ``ListTimeSeries`` request.
Attributes:
name (str):
Required. The
`project <https://cloud.google.com/monitoring/api/v3#project_name>`__,
organization or folder on which to execute the request. The
format is:
::
projects/[PROJECT_ID_OR_NUMBER]
organizations/[ORGANIZATION_ID]
folders/[FOLDER_ID]
filter (str):
Required. A `monitoring
filter <https://cloud.google.com/monitoring/api/v3/filters>`__
that specifies which time series should be returned. The
filter must specify a single metric type, and can
additionally specify metric labels and other information.
For example:
::
metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND
metric.labels.instance_name = "my-instance-name".
interval (google.cloud.monitoring_v3.types.TimeInterval):
Required. The time interval for which results
should be returned. Only time series that
contain data points in the specified interval
are included in the response.
aggregation (google.cloud.monitoring_v3.types.Aggregation):
Specifies the alignment of data points in individual time
series as well as how to combine the retrieved time series
across specified labels.
By default (if no ``aggregation`` is explicitly specified),
the raw time series data is returned.
secondary_aggregation (google.cloud.monitoring_v3.types.Aggregation):
Apply a second aggregation after ``aggregation`` is applied.
May only be specified if ``aggregation`` is specified.
order_by (str):
Unsupported: must be left blank. The points
in each time series are currently returned in
reverse time order (most recent to oldest).
view (google.cloud.monitoring_v3.types.ListTimeSeriesRequest.TimeSeriesView):
Required. Specifies which information is
returned about the time series.
page_size (int):
A positive number that is the maximum number of results to
return. If ``page_size`` is empty or more than 100,000
results, the effective ``page_size`` is 100,000 results. If
``view`` is set to ``FULL``, this is the maximum number of
``Points`` returned. If ``view`` is set to ``HEADERS``, this
is the maximum number of ``TimeSeries`` returned.
page_token (str):
If this field is not empty then it must contain the
``nextPageToken`` value returned by a previous call to this
method. Using this field causes the method to return
additional results from the previous method call.
"""
class TimeSeriesView(proto.Enum):
r"""Controls which fields are returned by ``ListTimeSeries``."""
FULL = 0
HEADERS = 1
name = proto.Field(
proto.STRING,
number=10,
)
filter = proto.Field(
proto.STRING,
number=2,
)
interval = proto.Field(
proto.MESSAGE,
number=4,
message=common.TimeInterval,
)
aggregation = proto.Field(
proto.MESSAGE,
number=5,
message=common.Aggregation,
)
secondary_aggregation = proto.Field(
proto.MESSAGE,
number=11,
message=common.Aggregation,
)
order_by = proto.Field(
proto.STRING,
number=6,
)
view = proto.Field(
proto.ENUM,
number=7,
enum=TimeSeriesView,
)
page_size = proto.Field(
proto.INT32,
number=8,
)
page_token = proto.Field(
proto.STRING,
number=9,
)
class ListTimeSeriesResponse(proto.Message):
r"""The ``ListTimeSeries`` response.
Attributes:
time_series (Sequence[google.cloud.monitoring_v3.types.TimeSeries]):
One or more time series that match the filter
included in the request.
next_page_token (str):
If there are more results than have been returned, then this
field is set to a non-empty value. To see the additional
results, use that value as ``page_token`` in the next call
to this method.
execution_errors (Sequence[google.rpc.status_pb2.Status]):
Query execution errors that may have caused
the time series data returned to be incomplete.
unit (str):
The unit in which all ``time_series`` point values are
reported. ``unit`` follows the UCUM format for units as seen
in https://unitsofmeasure.org/ucum.html. If different
``time_series`` have different units (for example, because
they come from different metric types, or a unit is absent),
then ``unit`` will be "{not_a_unit}".
"""
@property
def raw_page(self):
return self
time_series = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gm_metric.TimeSeries,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
execution_errors = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
unit = proto.Field(
proto.STRING,
number=5,
)
class CreateTimeSeriesRequest(proto.Message):
r"""The ``CreateTimeSeries`` request.
Attributes:
name (str):
Required. The
`project <https://cloud.google.com/monitoring/api/v3#project_name>`__
on which to execute the request. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]
time_series (Sequence[google.cloud.monitoring_v3.types.TimeSeries]):
Required. The new data to be added to a list of time series.
Adds at most one data point to each of several time series.
The new data point must be more recent than any other point
in its time series. Each ``TimeSeries`` value must fully
specify a unique time series by supplying all label values
for the metric and the monitored resource.
The maximum number of ``TimeSeries`` objects per ``Create``
request is 200.
"""
name = proto.Field(
proto.STRING,
number=3,
)
time_series = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=gm_metric.TimeSeries,
)
class CreateTimeSeriesError(proto.Message):
r"""DEPRECATED. Used to hold per-time-series error status.
Attributes:
time_series (google.cloud.monitoring_v3.types.TimeSeries):
DEPRECATED. Time series ID that resulted in the ``status``
error.
status (google.rpc.status_pb2.Status):
DEPRECATED. The status of the requested write operation for
``time_series``.
"""
time_series = proto.Field(
proto.MESSAGE,
number=1,
message=gm_metric.TimeSeries,
)
status = proto.Field(
proto.MESSAGE,
number=2,
message=status_pb2.Status,
)
class CreateTimeSeriesSummary(proto.Message):
r"""Summary of the result of a failed request to write data to a
time series.
Attributes:
total_point_count (int):
The number of points in the request.
success_point_count (int):
The number of points that were successfully
written.
errors (Sequence[google.cloud.monitoring_v3.types.CreateTimeSeriesSummary.Error]):
The number of points that failed to be
written. Order is not guaranteed.
"""
class Error(proto.Message):
r"""Detailed information about an error category.
Attributes:
status (google.rpc.status_pb2.Status):
The status of the requested write operation.
point_count (int):
The number of points that couldn't be written because of
``status``.
"""
status = proto.Field(
proto.MESSAGE,
number=1,
message=status_pb2.Status,
)
point_count = proto.Field(
proto.INT32,
number=2,
)
total_point_count = proto.Field(
proto.INT32,
number=1,
)
success_point_count = proto.Field(
proto.INT32,
number=2,
)
errors = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=Error,
)
class QueryTimeSeriesRequest(proto.Message):
r"""The ``QueryTimeSeries`` request.
Attributes:
name (str):
Required. The
`project <https://cloud.google.com/monitoring/api/v3#project_name>`__
on which to execute the request. The format is:
::
projects/[PROJECT_ID_OR_NUMBER]
query (str):
Required. The query in the `Monitoring Query
Language <https://cloud.google.com/monitoring/mql/reference>`__
format. The default time zone is in UTC.
page_size (int):
A positive number that is the maximum number of
time_series_data to return.
page_token (str):
If this field is not empty then it must contain the
``nextPageToken`` value returned by a previous call to this
method. Using this field causes the method to return
additional results from the previous method call.
"""
name = proto.Field(
proto.STRING,
number=1,
)
query = proto.Field(
proto.STRING,
number=7,
)
page_size = proto.Field(
proto.INT32,
number=9,
)
page_token = proto.Field(
proto.STRING,
number=10,
)
class QueryTimeSeriesResponse(proto.Message):
r"""The ``QueryTimeSeries`` response.
Attributes:
time_series_descriptor (google.cloud.monitoring_v3.types.TimeSeriesDescriptor):
The descriptor for the time series data.
time_series_data (Sequence[google.cloud.monitoring_v3.types.TimeSeriesData]):
The time series data.
next_page_token (str):
If there are more results than have been returned, then this
field is set to a non-empty value. To see the additional
results, use that value as ``page_token`` in the next call
to this method.
partial_errors (Sequence[google.rpc.status_pb2.Status]):
Query execution errors that may have caused
the time series data returned to be incomplete.
The available data will be available in the
response.
"""
@property
def raw_page(self):
return self
time_series_descriptor = proto.Field(
proto.MESSAGE,
number=8,
message=gm_metric.TimeSeriesDescriptor,
)
time_series_data = proto.RepeatedField(
proto.MESSAGE,
number=9,
message=gm_metric.TimeSeriesData,
)
next_page_token = proto.Field(
proto.STRING,
number=10,
)
partial_errors = proto.RepeatedField(
proto.MESSAGE,
number=11,
message=status_pb2.Status,
)
class QueryErrorList(proto.Message):
r"""This is an error detail intended to be used with INVALID_ARGUMENT
errors.
Attributes:
errors (Sequence[google.cloud.monitoring_v3.types.QueryError]):
Errors in parsing the time series query
language text. The number of errors in the
response may be limited.
error_summary (str):
A summary of all the errors.
"""
errors = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gm_metric.QueryError,
)
error_summary = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 31.114537
| 103
| 0.615083
|
92244e60e737818b7c8d50ead8375f1d52d628cb
| 6,524
|
py
|
Python
|
sdk/python/kfp/components/_airflow_op.py
|
kamalmemon/pipelines
|
7e68991a2a7bfa767f893facfe58190690ca89ed
|
[
"Apache-2.0"
] | 6
|
2020-05-19T02:35:11.000Z
|
2020-05-29T17:58:42.000Z
|
sdk/python/kfp/components/_airflow_op.py
|
kamalmemon/pipelines
|
7e68991a2a7bfa767f893facfe58190690ca89ed
|
[
"Apache-2.0"
] | 1,932
|
2021-01-25T11:23:37.000Z
|
2022-03-31T17:10:18.000Z
|
sdk/python/kfp/components/_airflow_op.py
|
kamalmemon/pipelines
|
7e68991a2a7bfa767f893facfe58190690ca89ed
|
[
"Apache-2.0"
] | 11
|
2020-05-19T22:26:41.000Z
|
2021-01-25T09:56:21.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'create_component_from_airflow_op',
]
from typing import List
from ._python_op import _func_to_component_spec, _create_task_factory_from_component_spec
_default_airflow_base_image = 'apache/airflow:master-python3.6-ci' #TODO: Update a production release image once they become available: https://cwiki.apache.org/confluence/display/AIRFLOW/AIP-10+Multi-layered+and+multi-stage+official+Airflow+CI+image#AIP-10Multi-layeredandmulti-stageofficialAirflowCIimage-ProposedsetupoftheDockerHubandTravisCI . See https://issues.apache.org/jira/browse/AIRFLOW-5093
def create_component_from_airflow_op(
op_class: type,
base_image: str = _default_airflow_base_image,
variable_output_names: List[str] = None,
xcom_output_names: List[str] = None,
modules_to_capture: List[str] = None
):
'''
Creates component function from an Airflow operator class.
The inputs of the component are the same as the operator constructor parameters.
By default the component has the following outputs: "Result", "Variables" and "XComs". "Variables" and "XComs" are serialized JSON maps of all variables and xcoms produced by the operator during the execution.
Use the variable_output_names and xcom_output_names parameters to output individual variables/xcoms as separate outputs.
Args:
op_class: Reference to the Airflow operator class (e.g. EmailOperator or BashOperator) to convert to componenent.
base_image: Optional. The container image to use for the component. Default is apache/airflow. The container image must have the same python version as the environment used to run create_component_from_airflow_op. The image should have python 3.5+ with airflow package installed.
variable_output_names: Optional. A list of Airflow "variables" produced by the operator that should be returned as separate outputs.
xcom_output_names: Optional. A list of Airflow "XComs" produced by the operator that should be returned as separate outputs.
modules_to_capture: Optional. A list of names of additional modules that the operator depends on. By default only the module containing the operator class is captured. If the operator class uses the code from another module, the name of that module can be specified in this list.
'''
component_spec = _create_component_spec_from_airflow_op(
op_class=op_class,
base_image=base_image,
variables_to_output=variable_output_names,
xcoms_to_output=xcom_output_names,
modules_to_capture=modules_to_capture,
)
task_factory = _create_task_factory_from_component_spec(component_spec)
return task_factory
def _create_component_spec_from_airflow_op(
op_class: type,
base_image: str = _default_airflow_base_image,
result_output_name: str = 'Result',
variables_dict_output_name: str = 'Variables',
xcoms_dict_output_name: str = 'XComs',
variables_to_output: List[str] = None,
xcoms_to_output: List[str] = None,
modules_to_capture: List[str] = None,
):
variables_output_names = variables_to_output or []
xcoms_output_names = xcoms_to_output or []
modules_to_capture = modules_to_capture or [op_class.__module__]
modules_to_capture.append(_run_airflow_op.__module__)
output_names = []
if result_output_name is not None:
output_names.append(result_output_name)
if variables_dict_output_name is not None:
output_names.append(variables_dict_output_name)
if xcoms_dict_output_name is not None:
output_names.append(xcoms_dict_output_name)
output_names.extend(variables_output_names)
output_names.extend(xcoms_output_names)
from collections import namedtuple
returnType = namedtuple('AirflowOpOutputs', output_names)
def _run_airflow_op_closure(*op_args, **op_kwargs) -> returnType:
(result, variables, xcoms) = _run_airflow_op(op_class, *op_args, **op_kwargs)
output_values = {}
import json
if result_output_name is not None:
output_values[result_output_name] = str(result)
if variables_dict_output_name is not None:
output_values[variables_dict_output_name] = json.dumps(variables)
if xcoms_dict_output_name is not None:
output_values[xcoms_dict_output_name] = json.dumps(xcoms)
for name in variables_output_names:
output_values[name] = variables[name]
for name in xcoms_output_names:
output_values[name] = xcoms[name]
return returnType(**output_values)
# Hacking the function signature so that correct component interface is generated
import inspect
parameters = inspect.signature(op_class).parameters.values()
#Filtering out `*args` and `**kwargs` parameters that some operators have
parameters = [param for param in parameters if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD]
sig = inspect.Signature(
parameters=parameters,
return_annotation=returnType,
)
_run_airflow_op_closure.__signature__ = sig
_run_airflow_op_closure.__name__ = op_class.__name__
return _func_to_component_spec(_run_airflow_op_closure, base_image=base_image, use_code_pickling=True, modules_to_capture=modules_to_capture)
def _run_airflow_op(Op, *op_args, **op_kwargs):
from airflow.utils import db
db.initdb()
from datetime import datetime
from airflow import DAG, settings
from airflow.models import TaskInstance, Variable, XCom
dag = DAG(dag_id='anydag', start_date=datetime.now())
task = Op(*op_args, **op_kwargs, dag=dag, task_id='anytask')
ti = TaskInstance(task=task, execution_date=datetime.now())
result = task.execute(ti.get_template_context())
variables = {var.id: var.val for var in settings.Session().query(Variable).all()}
xcoms = {msg.key: msg.value for msg in settings.Session().query(XCom).all()}
return (result, variables, xcoms)
| 47.275362
| 402
| 0.754905
|
39adb3e52bbd23c73cd9df37c0a45f7bdc56546d
| 8,085
|
py
|
Python
|
mergify_engine/tests/unit/actions/test_merge.py
|
ErwanSimonetti/mergify-engine
|
9b82ac2ff9e1de2707f19320e119a48a54af34c9
|
[
"Apache-2.0"
] | null | null | null |
mergify_engine/tests/unit/actions/test_merge.py
|
ErwanSimonetti/mergify-engine
|
9b82ac2ff9e1de2707f19320e119a48a54af34c9
|
[
"Apache-2.0"
] | 24
|
2022-02-14T08:36:56.000Z
|
2022-03-31T07:35:47.000Z
|
mergify_engine/tests/unit/actions/test_merge.py
|
ErwanSimonetti/mergify-engine
|
9b82ac2ff9e1de2707f19320e119a48a54af34c9
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2020 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import pytest
import voluptuous
from mergify_engine import constants
from mergify_engine import context
from mergify_engine import github_types
from mergify_engine import rules
from mergify_engine import subscription
from mergify_engine.actions import merge
from mergify_engine.actions import merge_base
PR = {
"number": 43,
"state": "unknown",
"mergeable_state": "ok",
"merged_by": {"login": "me"},
"merged": False,
"merged_at": None,
"title": "My PR title",
"user": {"login": "jd"},
"head": {"ref": "fork", "sha": "shasha"},
"base": {
"ref": "master",
"user": {"login": "jd"},
"repo": {"name": "repo", "private": False},
"sha": "miaou",
},
"assignees": [],
"locked": False,
"labels": [],
"requested_reviewers": [],
"requested_teams": [],
"milestone": None,
}
@pytest.mark.parametrize(
"body, title, message, mode",
[
(
"""Hello world
# Commit Message
my title
my body""",
"my title",
"my body",
"default",
),
(
"""Hello world
# Commit Message:
my title
my body
is longer""",
"my title",
"my body\nis longer",
"default",
),
(
"""Hello world
# Commit Message
{{title}}
Authored-By: {{author}}
on two lines""",
"My PR title",
"Authored-By: jd\non two lines",
"default",
),
(
"""Hello world
again!
## Commit Message
My Title
CI worked:
{% for ci in status_success %}
- {{ci}}
{% endfor %}
""",
"My Title",
"CI worked:\n\n- my CI\n",
"default",
),
(
"""Hello world
# Commit Message
my title
my body""",
"my title",
"my body",
"default",
),
(
"""Hello world
# Commit Message
my title
WATCHOUT ^^^ there is empty spaces above for testing ^^^^
my body""", # noqa:W293,W291
"my title",
"WATCHOUT ^^^ there is empty spaces above for testing ^^^^\nmy body",
"default",
),
(
# Should return an empty message
"""Hello world
# Commit Message
my title
""",
"my title",
"",
"default",
),
("Here's my message", "My PR title (#43)", "Here's my message", "title+body"),
],
)
@pytest.mark.asyncio
async def test_merge_commit_message(body, title, message, mode):
pull = PR.copy()
pull["body"] = body
client = mock.MagicMock()
installation = context.Installation(123, "whatever", {}, client, None)
repository = context.Repository(installation, "whatever", 123)
repository._cache["branches"] = {"master": {"protection": {"enabled": False}}}
ctxt = await context.Context.create(repository=repository, pull=pull)
ctxt._cache["pull_statuses"] = [
github_types.GitHubStatus(
{
"target_url": "http://example.com",
"context": "my CI",
"state": "success",
"description": "foobar",
"avatar_url": "",
}
)
]
ctxt._cache["pull_check_runs"] = []
assert await ctxt.pull_request.get_commit_message(mode=mode) == (
title,
message,
)
@pytest.mark.parametrize(
"body",
[
(
"""Hello world
# Commit Message
{{title}}
here is my message {{foobar}}
on two lines"""
),
(
"""Hello world
# Commit Message
{{foobar}}
here is my message
on two lines"""
),
],
)
@pytest.mark.asyncio
async def test_merge_commit_message_undefined(body):
pull = PR.copy()
pull["body"] = body
client = mock.MagicMock()
installation = context.Installation(123, "whatever", {}, client, None)
repository = context.Repository(installation, "whatever", 123)
pr = await context.Context.create(repository=repository, pull=pull)
with pytest.raises(context.RenderTemplateFailure) as x:
await pr.pull_request.get_commit_message()
assert str(x) == "foobar"
@pytest.mark.parametrize(
"body,error",
[
(
"""Hello world
# Commit Message
{{title}}
here is my message {{ and broken template
""",
"lol",
),
],
)
@pytest.mark.asyncio
async def test_merge_commit_message_syntax_error(body, error, redis_cache):
pull = PR.copy()
pull["body"] = body
client = mock.MagicMock()
installation = context.Installation(123, "whatever", {}, client, redis_cache)
repository = context.Repository(installation, "whatever", 123)
pr = await context.Context.create(repository=repository, pull=pull)
with pytest.raises(context.RenderTemplateFailure) as rmf:
await pr.pull_request.get_commit_message()
assert str(rmf) == error
def gen_config(priorities):
return [{"priority": priority} for priority in priorities]
@pytest.mark.asyncio
async def test_queue_summary(redis_cache):
repository = mock.Mock(
get_pull_request_context=mock.AsyncMock(
return_value=mock.Mock(pull={"title": "foo"})
)
)
ctxt = mock.Mock(
repository=repository,
subscription=subscription.Subscription(
redis_cache,
123,
True,
"We're just testing",
frozenset({subscription.Features.PRIORITY_QUEUES}),
),
)
ctxt.missing_feature_reason = subscription.Subscription.missing_feature_reason
ctxt.pull = {
"base": {
"repo": {
"owner": {
"login": "Mergifyio",
},
},
},
}
q = mock.AsyncMock(installation_id=12345)
q.get_pulls.return_value = [1, 2, 3, 4, 5, 6, 7, 8, 9]
q.get_config.side_effect = gen_config(
[4000, 3000, 3000, 3000, 2000, 2000, 1000, 1000, 1000]
)
with mock.patch.object(merge.naive.Queue, "from_context", return_value=q):
action = merge.MergeAction(voluptuous.Schema(merge.MergeAction.validator)({}))
assert """**Required conditions for merge:**
**The following pull requests are queued:**
| | Pull request | Priority |
| ---: | :--- | :--- |
| 1 | foo #1 | 4000 |
| 2 | foo #2 | high |
| 3 | foo #3 | high |
| 4 | foo #4 | high |
| 5 | foo #5 | medium |
| 6 | foo #6 | medium |
| 7 | foo #7 | low |
| 8 | foo #8 | low |
| 9 | foo #9 | low |
---
""" + constants.MERGIFY_PULL_REQUEST_DOC == await action._get_queue_summary(
ctxt, mock.Mock(conditions=rules.RuleConditions([])), q
)
@pytest.mark.parametrize(
"test_input, expected",
[
(True, merge_base.StrictMergeParameter.true),
(False, merge_base.StrictMergeParameter.false),
("smart", merge_base.StrictMergeParameter.ordered),
("smart+ordered", merge_base.StrictMergeParameter.ordered),
("smart+fasttrack", merge_base.StrictMergeParameter.fasttrack),
("smart+fastpath", merge_base.StrictMergeParameter.fasttrack),
],
)
def test_strict_merge_parameter_ok(test_input, expected):
assert merge_base.strict_merge_parameter(test_input) == expected
def test_strict_merge_parameter_fail():
with pytest.raises(
ValueError,
match="toto is an unknown strict merge parameter",
):
merge_base.strict_merge_parameter("toto")
| 25.186916
| 86
| 0.586889
|
f3b8218078d970c6316fb597e98f40de60569335
| 1,644
|
py
|
Python
|
tests/test_utils/test_schema_as_dict.py
|
dalito/linkml-runtime
|
192a33962aed06f727ffad1a697003ac6ec85c2c
|
[
"CC0-1.0"
] | null | null | null |
tests/test_utils/test_schema_as_dict.py
|
dalito/linkml-runtime
|
192a33962aed06f727ffad1a697003ac6ec85c2c
|
[
"CC0-1.0"
] | null | null | null |
tests/test_utils/test_schema_as_dict.py
|
dalito/linkml-runtime
|
192a33962aed06f727ffad1a697003ac6ec85c2c
|
[
"CC0-1.0"
] | null | null | null |
import os
import unittest
import logging
import yaml
from linkml_runtime.linkml_model.meta import SchemaDefinition, ClassDefinition
from linkml_runtime.loaders.yaml_loader import YAMLLoader
from linkml_runtime.utils.schema_as_dict import schema_as_yaml_dump, schema_as_dict
from linkml_runtime.utils.schemaview import SchemaView
from tests.test_utils import INPUT_DIR, OUTPUT_DIR
SCHEMA_NO_IMPORTS = os.path.join(INPUT_DIR, 'kitchen_sink_noimports.yaml')
SCHEMA_WITH_IMPORTS = os.path.join(INPUT_DIR, 'kitchen_sink.yaml')
CLEAN_SCHEMA = os.path.join(OUTPUT_DIR, 'kitchen_sink.clean.yaml')
yaml_loader = YAMLLoader()
class SchemaAsDictTestCase(unittest.TestCase):
def test_as_dict(self):
"""
tests schema_as_dict, see https://github.com/linkml/linkml/issues/100
"""
view = SchemaView(SCHEMA_NO_IMPORTS)
logging.debug(view.schema.id)
ystr = schema_as_yaml_dump(view.schema)
with open(CLEAN_SCHEMA, 'w') as stream:
stream.write(ystr)
view2 = SchemaView(ystr)
obj = schema_as_dict(view.schema)
# ensure that prefixes are compacted
assert obj['prefixes']['pav'] == 'http://purl.org/pav/'
assert '@type' not in obj
for k in ['slots', 'classes', 'enums', 'subsets']:
elt_dict = obj[k]
for e_name, e in elt_dict.items():
assert 'name' not in e
if k == 'enums':
for e in elt_dict.values():
for pv in e.get('permissible_values', {}).values():
assert 'text' not in pv
if __name__ == '__main__':
unittest.main()
| 34.978723
| 83
| 0.667275
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.