hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
213a52f9011384675eda1dd484d504bf3cb6d2fe
| 393
|
py
|
Python
|
rec_to_nwb/processing/nwb/components/position/time/valid/fl_pos_valid_time_builder.py
|
jihyunbak/rec_to_nwb
|
6e65f8bf0a4faa4d986483ec2442ba19d70c92a9
|
[
"Apache-2.0"
] | 8
|
2020-05-29T13:48:35.000Z
|
2021-11-19T04:24:48.000Z
|
rec_to_nwb/processing/nwb/components/position/time/valid/fl_pos_valid_time_builder.py
|
jihyunbak/rec_to_nwb
|
6e65f8bf0a4faa4d986483ec2442ba19d70c92a9
|
[
"Apache-2.0"
] | 12
|
2020-11-13T01:36:32.000Z
|
2022-01-23T20:35:55.000Z
|
rec_to_nwb/processing/nwb/components/position/time/valid/fl_pos_valid_time_builder.py
|
jihyunbak/rec_to_nwb
|
6e65f8bf0a4faa4d986483ec2442ba19d70c92a9
|
[
"Apache-2.0"
] | 3
|
2020-10-20T06:52:45.000Z
|
2021-07-06T23:00:53.000Z
|
from rec_to_nwb.processing.nwb.components.position.time.valid.fl_pos_valid_time import FlPosValidTime
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class FlPosValidTimeBuilder:
@staticmethod
@beartype
def build(start_time: float, stop_time: float):
return FlPosValidTime(
start_time=start_time,
stop_time=stop_time
)
| 28.071429
| 101
| 0.740458
|
55e3e1d3f07a656f38ec328964f8ee4819293bea
| 11,169
|
py
|
Python
|
TakeBlipPosTagger/model.py
|
guireis1/testpos
|
efe22724d4c3512dd5336dc9788704793793122d
|
[
"MIT"
] | null | null | null |
TakeBlipPosTagger/model.py
|
guireis1/testpos
|
efe22724d4c3512dd5336dc9788704793793122d
|
[
"MIT"
] | null | null | null |
TakeBlipPosTagger/model.py
|
guireis1/testpos
|
efe22724d4c3512dd5336dc9788704793793122d
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.autograd import Variable
def log_sum_exp(vec, dim=0):
max, idx = torch.max(vec, dim)
max_exp = max.unsqueeze(-1).expand_as(vec)
return max + torch.log(torch.sum(torch.exp(vec - max_exp), dim))
class CRF(nn.Module):
def __init__(self, vocab_size, pad_idx, unk_idx, device):
super(CRF, self).__init__()
self.device = device
self.vocab_size = vocab_size
self.n_labels = n_labels = vocab_size + 2
self.start_idx = n_labels - 2
self.stop_idx = n_labels - 1
self.transitions = nn.Parameter(torch.randn(n_labels, n_labels).to(self.device))
self.pad_idx = pad_idx
self.unk_idx = unk_idx
def reset_parameters(self):
nn.init.constant_(self.transitions.data, 0)
nn.init.constant_(self.transitions.data[:, self.unk_idx], -3)
nn.init.constant_(self.transitions.data[:, self.pad_idx], -3)
nn.init.constant_(self.transitions.data[:, self.start_idx], -3)
nn.init.constant_(self.transitions.data[:, self.stop_idx], -3)
def forward(self, logits, lens):
'''
Arguments:
logits: [batch_size, seq_len, n_labels] FloatTensor
lens: [batch_size] LongTensor
'''
batch_size, seq_len, n_labels = logits.size()
alpha = logits.data.new(batch_size, self.n_labels).fill_(-10000)
alpha[:, self.start_idx] = 0
alpha = Variable(alpha)
cloned_lens = lens.clone()
logits_transposed = logits.transpose(1, 0)
for logit in logits_transposed:
logit_expanded = logit.unsqueeze(-1).expand(batch_size,
*self.transitions.size())
alpha_expanded = alpha.unsqueeze(1).expand(batch_size,
*self.transitions.size())
transition_expanded = self.transitions.unsqueeze(0).expand_as(alpha_expanded)
matrix = transition_expanded + alpha_expanded + logit_expanded
alpha_next = log_sum_exp(matrix, 2).squeeze(-1)
mask = (cloned_lens > 0).float().unsqueeze(-1).expand_as(alpha)
alpha = mask * alpha_next + (1 - mask) * alpha
cloned_lens = cloned_lens - 1
alpha = alpha + self.transitions[self.stop_idx].unsqueeze(0).expand_as(alpha)
norm = log_sum_exp(alpha, 1).squeeze(-1)
return norm
def viterbi_decode(self, logits, lens):
'''Borrowed from pytorch tutorial
Arguments:
logits: [batch_size, seq_len, n_labels] FloatTensor
lens: [batch_size] LongTensor
'''
batch_size, seq_len, n_labels = logits.size()
viterbi = logits.data.new(batch_size, self.n_labels).fill_(-10000)
viterbi[:, self.start_idx] = 0
viterbi = Variable(viterbi)
cloned_lens = lens.clone()
logits_transposed = logits.transpose(1, 0)
pointers = []
for logit in logits_transposed:
viterbi_expanded = viterbi.unsqueeze(1).expand(batch_size, n_labels, n_labels)
transition_expanded = self.transitions.unsqueeze(0).expand_as(viterbi_expanded)
viterbi_transition_sum = viterbi_expanded + transition_expanded
viterbi_max, viterbi_argmax = viterbi_transition_sum.max(2)
viterbi_max = viterbi_max.squeeze(-1)
viterbi_next = viterbi_max + logit
pointers.append(viterbi_argmax.squeeze(-1).unsqueeze(0))
mask = (cloned_lens > 0).float().unsqueeze(-1).expand_as(viterbi_next)
viterbi = mask * viterbi_next + (1 - mask) * viterbi
mask = (cloned_lens == 1).float().unsqueeze(-1).expand_as(viterbi_next)
viterbi += mask * self.transitions[self.stop_idx].unsqueeze(0).expand_as(viterbi_next)
cloned_lens = cloned_lens - 1
pointers = torch.cat(pointers)
scores, idx = viterbi.max(1)
paths = [idx.unsqueeze(1)]
for argmax in reversed(pointers):
idx_exp = idx.unsqueeze(-1)
idx = torch.gather(argmax, 1, idx_exp)
idx = idx.squeeze(-1)
paths.insert(0, idx.unsqueeze(1))
paths = torch.cat(paths[1:], 1)
scores = scores.squeeze(-1)
return scores, paths
def transition_score(self, labels, lens):
'''
Arguments:
labels: [batch_size, seq_len] LongTensor
lens: [batch_size] LongTensor
'''
batch_size, seq_len = labels.size()
# pad labels with <start> and <stop> indices
labels_ext = Variable(labels.data.new(batch_size, seq_len + 2))
labels_ext[:, 0] = self.start_idx
labels_ext[:, 1:-1] = labels
mask = sequence_mask(lens + 1, self.device, max_len=seq_len + 2).long()
pad_stop = Variable(labels.data.new(1).fill_(self.stop_idx))
pad_stop = pad_stop.unsqueeze(-1).expand(batch_size, seq_len + 2)
labels_ext = (1 - mask) * pad_stop + mask * labels_ext
labels = labels_ext
transitions = self.transitions
# obtain transition vector for each label in batch and timestep
# (except the last ones)
transitions_expanded = transitions.unsqueeze(0).expand(batch_size, *transitions.size())
labels_except_last = labels[:, 1:]
labels_except_last_expanded = labels_except_last.unsqueeze(-1)\
.expand(*labels_except_last.size(), transitions.size(0))
transitions_row = torch.gather(transitions_expanded, 1, labels_except_last_expanded)
# obtain transition score from the transition vector for each label
# in batch and timestep (except the first ones)
labels_except_first_expanded = labels[:, :-1].unsqueeze(-1)
transitions_score = torch.gather(transitions_row, 2, labels_except_first_expanded)
transitions_score = transitions_score.squeeze(-1)
mask = sequence_mask(lens + 1, self.device).float()
transitions_score = transitions_score * mask
score = transitions_score.sum(1).squeeze(-1)
return score
class LSTMCRF(nn.Module):
def __init__(self, crf, vocab_size, word_dim, hidden_dim, layers,
dropout_prob, device, alpha=0, bidirectional=False):
super(LSTMCRF, self).__init__()
self.device = device
self.word_dim = word_dim
self.hidden_dim = hidden_dim
self.lstm_layers = layers
self.dropout_prob = dropout_prob
self.alpha = alpha
self.crf = crf
self.bidirectional = bidirectional
self.n_labels = n_labels = self.crf.n_labels
self.embeddings = nn.ModuleList([nn.Embedding(vocab_size, word_dim).to(self.device)]).to(self.device)
self.output_hidden_dim = self.hidden_dim
if bidirectional:
self.output_hidden_dim *= 2
self.tanh = nn.Tanh()
self.input_layer = nn.Linear(self.word_dim, hidden_dim)
self.output_layer = nn.Linear(self.output_hidden_dim, n_labels)
self.lstm = nn.LSTM(input_size=hidden_dim,
hidden_size=hidden_dim,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout_prob,
batch_first=True)
def reset_parameters(self):
for emb in self.embeddings:
nn.init.xavier_normal_(emb.weight.data)
nn.init.xavier_normal_(self.input_layer.weight.data)
nn.init.xavier_normal_(self.output_layer.weight.data)
self.crf.reset_parameters()
self.lstm.reset_parameters()
def update_embedding(self, vocab_size):
self.embeddings = nn.ModuleList(
[nn.Embedding(vocab_size, self.word_dim).to(self.device)]
).to(self.device)
def _run_rnn_packed(self, cell, sequence_batch, sequence_batch_lens, h=None):
sequence_batch_packed = nn.utils.rnn.pack_padded_sequence(sequence_batch, sequence_batch_lens.data.tolist(),
batch_first=True)
if h is not None:
output, h = cell(sequence_batch_packed, h)
else:
output, h = cell(sequence_batch_packed)
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
return output, h
def _embeddings(self, sequences_batch):
embedded_sequence_combination = self.embeddings[0].to(self.device)(sequences_batch[0].to(self.device))
return embedded_sequence_combination
def _forward_bilstm(self, sequences_batch, lens):
n_feats, batch_size, seq_len = sequences_batch.size()
embedded_sequences_batch = self._embeddings(sequences_batch)
embedded_sequences_batch = embedded_sequences_batch.view(-1, self.word_dim)
embedded_sequences_batch = self.tanh(self.input_layer(embedded_sequences_batch))
embedded_sequences_batch = embedded_sequences_batch.view(batch_size, seq_len, self.hidden_dim)
output, h = self._run_rnn_packed(self.lstm, embedded_sequences_batch, lens)
output = output.contiguous()
output = output.view(-1, self.output_hidden_dim)
output = self.tanh(self.output_layer(output))
output = output.view(batch_size, seq_len, self.n_labels)
return output
def _bilstm_score(self, logits, sequence_labels, lens):
sequence_labels_expanded = sequence_labels.unsqueeze(-1)
scores = torch.gather(logits, 2, sequence_labels_expanded).squeeze(-1)
mask = sequence_mask(lens, self.device).float()
scores = scores * mask
score = scores.sum(1).squeeze(-1)
return score
def score(self, sequences, sequence_labels, lens, logits=None):
if logits is None:
logits = self._forward_bilstm(sequences, lens)
transition_score = self.crf.transition_score(sequence_labels, lens)
bilstm_score = self._bilstm_score(logits, sequence_labels, lens)
score = transition_score + bilstm_score
return score
def predict(self, sequences, lens):
logits = self._forward_bilstm(sequences, lens)
scores, preds = self.crf.viterbi_decode(logits, lens)
return preds, scores, logits
def loglik(self, sequences, sequence_labels, lens, return_logits=False):
logits = self._forward_bilstm(sequences, lens)
norm_score = self.crf(logits, lens)
sequence_score = self.score(sequences, sequence_labels, lens, logits=logits)
loglik = sequence_score - norm_score - self.alpha*self.crf.transitions[2:-2, 2:-2].pow(2).sum()
if return_logits:
return loglik, logits
else:
return loglik
def sequence_mask(lens, device, max_len=None):
batch_size = lens.size(0)
if max_len is None:
max_len = lens.max().data
ranges = torch.arange(0, max_len).long().to(device)
ranges = ranges.unsqueeze(0).expand(batch_size, max_len)
ranges = Variable(ranges)
lens_expanded = lens.unsqueeze(1).expand_as(ranges)
mask = ranges < lens_expanded
return mask
| 39.606383
| 116
| 0.640433
|
abcba4fb0ab3e1ec4f96b6afc582f966847db034
| 1,224
|
py
|
Python
|
autotyper.py
|
Voided-Git/autotyper
|
12610d96c1b4b1babf46eb18c8d526c0a234657f
|
[
"MIT"
] | null | null | null |
autotyper.py
|
Voided-Git/autotyper
|
12610d96c1b4b1babf46eb18c8d526c0a234657f
|
[
"MIT"
] | null | null | null |
autotyper.py
|
Voided-Git/autotyper
|
12610d96c1b4b1babf46eb18c8d526c0a234657f
|
[
"MIT"
] | null | null | null |
# imports (keyboard writing, waiting, pressing with immediate releasing and system)
from keyboard import write, wait, press_and_release
from os import system
# set the window title
system("title AutoTyper")
# inputting integers (error checking)
def int_input(prompt: str):
while True:
try:
return int(input(prompt))
except ValueError:
print("Invalid value.")
# main loop
while True:
# asking for repeated message
message = input("Enter the message: ")
# asking for the amount of times to repeat the message
repeat = int_input("Enter the amount of times the message should be repeated: ")
# asking for the delay between each message
delay = int_input("Enter the delay between each message (0 can be used): ")
# waiting for 't' to send the messages
print("Press 't' to start sending messages.")
# waiting for 't' to be entered (removing the 't' once entered)
wait("t")
press_and_release("backspace")
# sending loop
for _ in range(repeat):
write(text = message, delay = delay)
press_and_release("return")
# when finished
input("All done! (press enter to continue)")
# cleanup
system("cls")
| 28.465116
| 84
| 0.669935
|
9c8fb340782173b8bb0c4816cafdd918de3a2275
| 3,183
|
py
|
Python
|
aux_functions.py
|
rcolomina/simple_flask_app
|
39bf21021f8669ba208d22b574468c9b1c147b63
|
[
"MIT"
] | null | null | null |
aux_functions.py
|
rcolomina/simple_flask_app
|
39bf21021f8669ba208d22b574468c9b1c147b63
|
[
"MIT"
] | 1
|
2022-03-18T18:31:58.000Z
|
2022-03-18T18:31:58.000Z
|
aux_functions.py
|
rcolomina/simple_flask_app
|
39bf21021f8669ba208d22b574468c9b1c147b63
|
[
"MIT"
] | null | null | null |
from config import Config
# Not Found message configured
def output_not_found(username):
print("debug: output_not_found")
message = Config.MSG_DATA_NOT_FOUND+" Username '"+username+ \
"' does not exist."
status = 404
data = {"status":status,"message":message}
return data,status
def output_bad_request(message):
print("debug: output_bad_request")
status = 400
message = Config.MSG_BAD_REQUEST+" "+message
data = {"status":status,"message":message}
return data,status
def check_email_reg_exp(email):
print("debug: checking email regular expression on " + email)
''' Check whether email follows a regular expression'''
import re
# If email is passed check regular expresion
regexp=Config.REGEXP_EMAIL
if email != None and isinstance(email,str):
if re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$",email):
return True
return False
def get_data_from_dict(mydict):
''' Return a tuple with the information required to build a contact '''
print('debug: get_data_from_dict')
validKeys = ['username','email','firstname','surname']
insersec = [value for value in validKeys if value in mydict.keys()]
if insersec != validKeys:
return Config.INVALID_KEYS
username = mydict['username']
if username == None:
return Config.NULL_KEY_ID
# Here extract multiple emails (one or more)
list_emails = mydict['email']
# Check whether email content is a list
# e.g.: ['jonhsmith@gmail.com','jonhsmith@hotmail.com','jsmiths@ibm.com']
# Convert list into string "email1|email2|...|emailn"
# Check emails are correct accross them
# Check that emails come in a list
if not isinstance(list_emails,list):
#Check whether is a single string
if not isinstance(list_emails,str):
return Config.INVALID_EMAIL
else:
if not check_email_reg_exp(list_emails):
return Config.INVALID_EMAIL
else:
for email in list_emails:
if not isinstance(email,str):
return Config.INVALID_EMAIL
if not check_email_reg_exp(email):
return Config.INVALID_EMAIL
list_emails = "|".join(list_emails)
firstname = mydict['firstname']
surname = mydict['surname']
# Create tuple with the inputs to update or insert a new contact
return (username,list_emails,firstname,surname)
def string_on_extracted_data(extractData):
''' Determine whether extracted data is good or bad'''
print("debug: string_on_extracted_data")
if isinstance(extractData,str):
msg = Config.BAD_DATA
if extractData == Config.INVALID_USERNAME:
msg = Config.MSG_INVALID_USERNAME
if extractData == Config.INVALID_KEYS:
msg = Config.MSG_INVALID_KEYS
if extractData == Config.NULL_KEY_ID:
msg = Config.MSG_NULL_KEY_ID
if extractData == Config.INVALID_EMAIL:
msg = Config.MSG_INVALID_EMAIL
return (True,msg)
else:
return (False,"OK")
| 33.861702
| 79
| 0.642476
|
99eeafca934aac326b7d6b8f1d2ed31c5d0e456d
| 1,611
|
py
|
Python
|
src/act/client/actproxy.py
|
bryngemark/aCT
|
be6d0fec0bd1313cb112c1901f21b3e39ccca305
|
[
"Apache-2.0"
] | null | null | null |
src/act/client/actproxy.py
|
bryngemark/aCT
|
be6d0fec0bd1313cb112c1901f21b3e39ccca305
|
[
"Apache-2.0"
] | null | null | null |
src/act/client/actproxy.py
|
bryngemark/aCT
|
be6d0fec0bd1313cb112c1901f21b3e39ccca305
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
Insert proxy certificate into aCT.
Returns:
8: Error inserting or updating proxy.
"""
import argparse
import os
import sys
import logging
import act.client.proxymgr as proxymgr
from act.client.errors import NoProxyFileError
def printProxyInfo(proxyInfo):
"""Print proxy info from aCT table."""
for key, value in proxyInfo.items():
print('{:<12}: {}'.format(key, value))
def main():
# parse arguments
parser = argparse.ArgumentParser(description = 'aCT proxies utility')
parser.add_argument('-p', '--proxy', default = None, help = 'custom path to proxy')
parser.add_argument('-v', '--verbose', action='store_true',
help='show more information')
args = parser.parse_args()
# logging
logFormat = "[%(asctime)s] [%(filename)s:%(lineno)d] [%(levelname)s] - %(message)s"
if args.verbose:
logging.basicConfig(format=logFormat, level=logging.DEBUG, stream=sys.stdout)
else:
logging.basicConfig(format=logFormat, level=logging.DEBUG, filename=os.devnull)
# determine proxy file path from args
if not args.proxy: # default proxy path is /tmp/x509_u<user id>
proxyPath = '/tmp/x509up_u' + str(os.getuid())
else:
proxyPath = args.proxy
manager = proxymgr.ProxyManager()
try:
manager.updateProxy(proxyPath)
except NoProxyFileError as e:
print("error: path \"{}\" is not a proxy file; use arcproxy".format(e.path))
except Exception as e:
print('error: {}'.format(str(e)))
sys.exit(8)
if __name__ == '__main__':
main()
| 26.409836
| 87
| 0.655493
|
4178ae97a31f925a5014aae944ec1ec68178bd26
| 1,983
|
py
|
Python
|
getmodel.py
|
hmartelb/speech-denoising
|
78424200d16215c100d9eb47e73dfc67845a0fbf
|
[
"MIT"
] | 4
|
2021-06-20T10:31:50.000Z
|
2022-03-15T01:19:14.000Z
|
getmodel.py
|
hmartelb/speech-denoising
|
78424200d16215c100d9eb47e73dfc67845a0fbf
|
[
"MIT"
] | null | null | null |
getmodel.py
|
hmartelb/speech-denoising
|
78424200d16215c100d9eb47e73dfc67845a0fbf
|
[
"MIT"
] | 4
|
2021-08-14T19:06:13.000Z
|
2022-02-25T05:09:35.000Z
|
import torch.nn.functional as F
from torchaudio.models import ConvTasNet
from losses import ScaleInvariantSDRLoss
from models import UNet, UNetDNP, TransUNet
default_params = {
"UNet": {
"n_channels": 1,
"n_class": 2,
"unet_depth": 6,
"unet_scale_factor": 16,
},
"UNetDNP": {
"n_channels": 1,
"n_class": 2,
"unet_depth": 6,
"n_filters": 16,
},
"ConvTasNet": {
"num_sources": 2,
"enc_kernel_size": 16,
"enc_num_feats": 128,
"msk_kernel_size": 3,
"msk_num_feats": 32,
"msk_num_hidden_feats": 128,
"msk_num_layers": 8,
"msk_num_stacks": 3,
},
"TransUNet": {
"img_dim": 256,
"in_channels": 1,
"classes": 2,
"vit_blocks": 6, # 12
"vit_heads": 4,
"vit_dim_linear_mhsa_block": 128, # 1024
"apply_masks": True
},
"SepFormer": {},
}
def get_model(name, parameters=None):
if not parameters:
parameters = default_params[name]
if name == "UNet":
model = UNet(**parameters)
data_mode = "amplitude"
loss_fn = F.mse_loss
loss_mode = "min"
if name == "UNetDNP":
model = UNetDNP(**parameters)
data_mode = "time"
loss_fn = ScaleInvariantSDRLoss
loss_mode = "max"
if name == "ConvTasNet":
model = ConvTasNet(**parameters)
data_mode = "time"
loss_fn = ScaleInvariantSDRLoss
loss_mode = "max"
if name == "TransUNet":
model = TransUNet(**parameters)
data_mode = "amplitude"
loss_fn = F.mse_loss
loss_mode = "min"
# if name == "SepFormer":
# model = Sepformer(**parameters)
# data_mode = "time"
# loss_fn = ScaleInvariantSDRLoss
# loss_mode = "max"
return {
"model": model,
"data_mode": data_mode,
"loss_fn": loss_fn,
"loss_mode": loss_mode,
}
| 24.182927
| 48
| 0.544629
|
a86cf1a822ea9dba9ee77c15a29a38ba92e1fb43
| 17,931
|
py
|
Python
|
tests/integration/shell/test_call.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2021-08-14T13:48:38.000Z
|
2021-08-14T13:48:38.000Z
|
tests/integration/shell/test_call.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 3
|
2015-03-31T14:44:05.000Z
|
2015-06-18T19:02:24.000Z
|
tests/integration/shell/test_call.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:24.000Z
|
2020-01-02T09:03:24.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.shell.call
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import getpass
import os
import sys
import re
import shutil
from datetime import datetime
import logging
# Import 3rd-party libs
import yaml
# Import Salt Testing libs
from tests.support.case import ShellCase
from tests.support.unit import skipIf
from tests.support.paths import FILES, TMP
from tests.support.mixins import ShellCaseCommonTestsMixin
from tests.support.helpers import destructiveTest
from tests.integration.utils import testprogram
# Import salt libs
import salt.utils.files
from salt.ext import six
log = logging.getLogger(__name__)
_PKG_TARGETS = {
'Arch': ['python2-django', 'libpng'],
'Debian': ['python-plist', 'apg'],
'RedHat': ['xz-devel', 'zsh-html'],
'FreeBSD': ['aalib', 'pth'],
'SUSE': ['aalib', 'python-pssh']
}
_PKGS_INSTALLED = set()
class CallTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin):
_call_binary_ = 'salt-call'
def test_default_output(self):
out = self.run_call('-l quiet test.fib 3')
expect = ['local:',
' - 2']
self.assertEqual(expect, out[:-1])
def test_text_output(self):
out = self.run_call('-l quiet --out txt test.fib 3')
expect = [
'local: (2'
]
self.assertEqual(''.join(expect), ''.join(out).rsplit(",", 1)[0])
def test_json_out_indent(self):
out = self.run_call('test.ping -l quiet --out=json --out-indent=-1')
self.assertIn('"local": true', ''.join(out))
out = self.run_call('test.ping -l quiet --out=json --out-indent=0')
self.assertIn('"local": true', ''.join(out))
out = self.run_call('test.ping -l quiet --out=json --out-indent=1')
self.assertIn('"local": true', ''.join(out))
def test_local_sls_call(self):
fileroot = os.path.join(FILES, 'file', 'base')
out = self.run_call('--file-root {0} --local state.sls saltcalllocal'.format(fileroot))
self.assertIn('Name: test.echo', ''.join(out))
self.assertIn('Result: True', ''.join(out))
self.assertIn('hello', ''.join(out))
self.assertIn('Succeeded: 1', ''.join(out))
@destructiveTest
@skipIf(True, 'Skipping due to off the wall failures and hangs on most os\'s. Will re-enable when fixed.')
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@skipIf(getpass.getuser() == 'root', 'Requires root to test pkg.install')
def test_local_pkg_install(self):
'''
Test to ensure correct output when installing package
'''
get_os_family = self.run_call('--local grains.get os_family')
pkg_targets = _PKG_TARGETS.get(get_os_family[1].strip(), [])
check_pkg = self.run_call('--local pkg.list_pkgs')
for pkg in pkg_targets:
if pkg not in str(check_pkg):
out = self.run_call('--local pkg.install {0}'.format(pkg))
self.assertIn('local: ----------', ''.join(out))
self.assertIn('{0}: ----------'.format(pkg), ''.join(out))
self.assertIn('new:', ''.join(out))
self.assertIn('old:', ''.join(out))
_PKGS_INSTALLED.add(pkg)
else:
log.debug('The pkg: {0} is already installed on the machine'.format(pkg))
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_user_delete_kw_output(self):
ret = self.run_call('-l quiet -d user.delete')
self.assertIn(
'salt \'*\' user.delete name remove=True force=True',
''.join(ret)
)
def test_salt_documentation_too_many_arguments(self):
'''
Test to see if passing additional arguments shows an error
'''
data = self.run_call('-d virtualenv.create /tmp/ve', catch_stderr=True)
self.assertIn('You can only get documentation for one method at one time', '\n'.join(data[1]))
def test_issue_6973_state_highstate_exit_code(self):
'''
If there is no tops/master_tops or state file matches
for this minion, salt-call should exit non-zero if invoked with
option --retcode-passthrough
'''
src = os.path.join(FILES, 'file/base/top.sls')
dst = os.path.join(FILES, 'file/base/top.sls.bak')
shutil.move(src, dst)
expected_comment = 'No states found for this minion'
try:
stdout, retcode = self.run_call(
'-l quiet --retcode-passthrough state.highstate',
with_retcode=True
)
finally:
shutil.move(dst, src)
self.assertIn(expected_comment, ''.join(stdout))
self.assertNotEqual(0, retcode)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
@skipIf(True, 'to be re-enabled when #23623 is merged')
def test_return(self):
self.run_call('cmd.run "echo returnTOmaster"')
jobs = [a for a in self.run_run('jobs.list_jobs')]
self.assertTrue(True in ['returnTOmaster' in j for j in jobs])
# lookback jid
first_match = [(i, j)
for i, j in enumerate(jobs)
if 'returnTOmaster' in j][0]
jid, idx = None, first_match[0]
while idx > 0:
jid = re.match("([0-9]+):", jobs[idx])
if jid:
jid = jid.group(1)
break
idx -= 1
assert idx > 0
assert jid
master_out = [
a for a in self.run_run('jobs.lookup_jid {0}'.format(jid))
]
self.assertTrue(True in ['returnTOmaster' in a for a in master_out])
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_issue_2731_masterless(self):
root_dir = os.path.join(TMP, 'issue-2731')
config_dir = os.path.join(root_dir, 'conf')
minion_config_file = os.path.join(config_dir, 'minion')
logfile = os.path.join(root_dir, 'minion_test_issue_2731')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
with salt.utils.files.fopen(self.get_config_file_path('master')) as fhr:
master_config = yaml.load(fhr.read())
master_root_dir = master_config['root_dir']
this_minion_key = os.path.join(
master_root_dir, 'pki', 'master', 'minions', 'minion_test_issue_2731'
)
minion_config = {
'id': 'minion_test_issue_2731',
'master': 'localhost',
'master_port': 64506,
'root_dir': master_root_dir,
'pki_dir': 'pki',
'cachedir': 'cachedir',
'sock_dir': 'minion_sock',
'open_mode': True,
'log_file': logfile,
'log_level': 'quiet',
'log_level_logfile': 'info',
'transport': self.master_opts['transport'],
}
try:
# Remove existing logfile
if os.path.isfile(logfile):
os.unlink(logfile)
start = datetime.now()
# Let's first test with a master running
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
fh_.write(
yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
)
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Calculate the required timeout, since next will fail.
# I needed this because after many attempts, I was unable to catch:
# WARNING: Master hostname: salt not found. Retrying in 30 seconds
ellapsed = datetime.now() - start
timeout = ellapsed.seconds + 3
# Now let's remove the master configuration
minion_config.pop('master')
minion_config.pop('master_port')
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
fh_.write(
yaml.dump(minion_config, default_flow_style=False)
)
out = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=timeout,
)
try:
self.assertIn(
'Process took more than {0} seconds to complete. '
'Process Killed!'.format(timeout),
out
)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with --local
ret = self.run_script(
'salt-call',
'--config-dir {0} --local cmd.run "echo foo"'.format(
config_dir
),
timeout=60
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with local file client
minion_config['file_client'] = 'local'
with salt.utils.files.fopen(minion_config_file, 'w') as fh_:
fh_.write(
yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=60
)
self.assertIn('local:', ret)
finally:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
with salt.utils.files.fopen(self.get_config_file_path('minion'), 'r') as fh_:
minion_config = yaml.load(fh_.read())
minion_config['log_file'] = 'file:///dev/log/LOG_LOCAL3'
with salt.utils.files.fopen(os.path.join(config_dir, 'minion'), 'w') as fh_:
fh_.write(
yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=60,
catch_stderr=True,
with_retcode=True
)
try:
self.assertIn('local:', ret[0])
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
except AssertionError:
# We now fail when we're unable to properly set the syslog logger
self.assertIn(
'Failed to setup the Syslog logging handler', '\n'.join(ret[1])
)
self.assertEqual(ret[2], 2)
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
def test_issue_15074_output_file_append(self):
output_file_append = os.path.join(TMP, 'issue-15074')
try:
# Let's create an initial output file with some data
_ = self.run_script(
'salt-call',
'-c {0} --output-file={1} test.versions'.format(
self.get_config_dir(),
output_file_append
),
catch_stderr=True,
with_retcode=True
)
with salt.utils.files.fopen(output_file_append) as ofa:
output = ofa.read()
self.run_script(
'salt-call',
'-c {0} --output-file={1} --output-file-append test.versions'.format(
self.get_config_dir(),
output_file_append
),
catch_stderr=True,
with_retcode=True
)
with salt.utils.files.fopen(output_file_append) as ofa:
self.assertEqual(ofa.read(), output + output)
finally:
if os.path.exists(output_file_append):
os.unlink(output_file_append)
def test_issue_14979_output_file_permissions(self):
output_file = os.path.join(TMP, 'issue-14979')
current_umask = os.umask(0o077)
try:
# Let's create an initial output file with some data
self.run_script(
'salt-call',
'-c {0} --output-file={1} -g'.format(
self.get_config_dir(),
output_file
),
catch_stderr=True,
with_retcode=True
)
stat1 = os.stat(output_file)
# Let's change umask
os.umask(0o777)
self.run_script(
'salt-call',
'-c {0} --output-file={1} --output-file-append -g'.format(
self.get_config_dir(),
output_file
),
catch_stderr=True,
with_retcode=True
)
stat2 = os.stat(output_file)
self.assertEqual(stat1.st_mode, stat2.st_mode)
# Data was appeneded to file
self.assertTrue(stat1.st_size < stat2.st_size)
# Let's remove the output file
os.unlink(output_file)
# Not appending data
self.run_script(
'salt-call',
'-c {0} --output-file={1} -g'.format(
self.get_config_dir(),
output_file
),
catch_stderr=True,
with_retcode=True
)
stat3 = os.stat(output_file)
# Mode must have changed since we're creating a new log file
self.assertNotEqual(stat1.st_mode, stat3.st_mode)
finally:
if os.path.exists(output_file):
os.unlink(output_file)
# Restore umask
os.umask(current_umask)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_42116_cli_pillar_override(self):
ret = self.run_call(
'state.apply issue-42116-cli-pillar-override '
'pillar=\'{"myhost": "localhost"}\''
)
for line in ret:
line = line.lstrip()
if line == 'Comment: Command "ping -c 2 localhost" run':
# Successful test
break
else:
log.debug('salt-call output:\n\n%s', '\n'.join(ret))
self.fail('CLI pillar override not found in pillar data')
def tearDown(self):
'''
Teardown method to remove installed packages
'''
user = ''
user_info = self.run_call('--local grains.get username')
if user_info and isinstance(user_info, (list, tuple)) and isinstance(user_info[-1], six.string_types):
user = user_info[-1].strip()
if user == 'root':
for pkg in _PKGS_INSTALLED:
_ = self.run_call('--local pkg.remove {0}'.format(pkg))
super(CallTest, self).tearDown()
# pylint: disable=invalid-name
def test_exit_status_unknown_argument(self):
'''
Ensure correct exit status when an unknown argument is passed to salt-call.
'''
call = testprogram.TestProgramSaltCall(
name='unknown_argument',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
call.setup()
stdout, stderr, status = call.run(
args=['--unknown-argument'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_USAGE',
message='unknown argument',
stdout=stdout, stderr=stderr
)
def test_exit_status_correct_usage(self):
'''
Ensure correct exit status when salt-call starts correctly.
'''
call = testprogram.TestProgramSaltCall(
name='correct_usage',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
call.setup()
stdout, stderr, status = call.run(
args=['--local', 'test.true'],
catch_stderr=True,
with_retcode=True,
)
self.assert_exit_status(
status, 'EX_OK',
message='correct usage',
stdout=stdout, stderr=stderr
)
| 35.790419
| 110
| 0.541799
|
4a3cf952f6d43a71b73f8438e110a062a1bcc724
| 5,269
|
py
|
Python
|
train.py
|
DelphyAlan/AON
|
7db113d20ee6b17f9fbc2f627e059fd46d134535
|
[
"MIT"
] | null | null | null |
train.py
|
DelphyAlan/AON
|
7db113d20ee6b17f9fbc2f627e059fd46d134535
|
[
"MIT"
] | null | null | null |
train.py
|
DelphyAlan/AON
|
7db113d20ee6b17f9fbc2f627e059fd46d134535
|
[
"MIT"
] | null | null | null |
import tensorflow.compat.v1 as tf
from model_aon import inference, get_train_op, get_init_op
from input_data import get_batch_data
import os
import numpy as np
tf.disable_v2_behavior()
flags = tf.app.flags
flags.DEFINE_string('exp_dir', 'exp_log', 'experiment model save directory')
flags.DEFINE_integer('batch_size', 32, 'define train batch size')
flags.DEFINE_integer('max_steps', 200000, 'step nums for training')
flags.DEFINE_boolean('restore', False, 'restore model parameter from checkpoint file')
flags.DEFINE_string('tfrecord_file_path', 'C://Users/Delphy/Downloads/train/train.tfrecord', 'tfrecord file path')
flags.DEFINE_boolean('single_seq', False, 'Use FG or not')
FLAGS = flags.FLAGS
def main(unused_argv):
if FLAGS.exp_dir:
checkpoint_dir = os.path.join(FLAGS.exp_dir, 'model.ckpt')
train_log_write_dir = os.path.join(FLAGS.exp_dir, 'log/train')
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.name_scope('input'):
image_placeholder = tf.placeholder(shape=[None, 100, 100, 3], dtype=tf.float32)
groundtruth_text_placeholder = tf.placeholder(shape=[None,], dtype=tf.string)
tf.summary.image('input_image', image_placeholder, FLAGS.batch_size)
print('image_placeholder', image_placeholder)
print('groundtruth_placeholder', groundtruth_text_placeholder)
output_tensor_dict, eval_output_tensor_dict = inference(
image_placeholder, groundtruth_text_placeholder, FLAGS.single_seq)
loss_tensor = output_tensor_dict['loss']
output_labels_tensor = output_tensor_dict['labels']
output_predict_text_tensor = output_tensor_dict['predict_text']
print('output_predict_text_tensor', output_predict_text_tensor)
probabilities_tensor = output_tensor_dict['probabilities']
output_eval_text_tensor = eval_output_tensor_dict['predict_text'] # For EVAL
print('output_eval_text_tensor', output_eval_text_tensor)
train_op = get_train_op(loss_tensor, global_step)
batch_tensor_dict = get_batch_data(FLAGS.tfrecord_file_path, mode='train', batch_size=FLAGS.batch_size)
decoder_inputs_tensor = tf.get_default_graph().get_tensor_by_name("attention_decoder/concat:0")
decoder_targets_tensor = tf.get_default_graph().get_tensor_by_name("attention_decoder/concat_1:0")
sess = tf.Session()
train_writer = tf.summary.FileWriter(train_log_write_dir, sess.graph)
summary_merge_tensor = tf.summary.merge_all()
sess.run(get_init_op())
total_loss = 0.0
begin_step = 0
saver = tf.train.Saver()
if os.path.exists(os.path.join(FLAGS.exp_dir, 'checkpoint')) and FLAGS.restore:
save_path = tf.train.latest_checkpoint(FLAGS.exp_dir)
saver.restore(sess, save_path=save_path)
begin_step = sess.run(global_step)
print('Restore model from {} successful, continue training from step {}'.format(save_path, begin_step))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
try:
for step in range(begin_step, FLAGS.max_steps):
if coord.should_stop():
break
batch_dict = sess.run(batch_tensor_dict)
images = batch_dict['images']
groundtruth_text = np.char.lower(batch_dict['groundtruth_text'].astype('str'))
feed_dict = {image_placeholder: images, groundtruth_text_placeholder: groundtruth_text}
_, loss = sess.run([train_op, loss_tensor], feed_dict=feed_dict)
total_loss += loss
if step % 100 == 0:
summary, output_labels, output_predict_text, decoder_inputs, decoder_targets= sess.run(
[summary_merge_tensor, output_labels_tensor, output_predict_text_tensor, decoder_inputs_tensor, decoder_targets_tensor],
feed_dict=feed_dict
)
probabilities = sess.run(probabilities_tensor, feed_dict)
eval_text = sess.run(output_eval_text_tensor, feed_dict={image_placeholder: images})
train_writer.add_summary(summary, step)
print('Step {}, loss {}'.format(step, total_loss / 100))
print('out_labels\n', output_labels[:5])
print('predict_text\n', output_predict_text[:5])
print('probabilities\n', probabilities[:5])
print('groundtruth_text\n', groundtruth_text[:5])
print('decoder_inputs\n', decoder_inputs[:5])
print('decoder_targets\n', decoder_targets[:5])
print('eval_text\n', eval_text[:5])
sample_image = images[:1]
print('Use a sample: ', sess.run(output_eval_text_tensor, feed_dict={image_placeholder: sample_image}))
print()
print()
total_loss = 0.0
if step % 1000 == 0:
saver.save(sess, save_path=checkpoint_dir, global_step=global_step)
print('Write checkpoint {}'.format(sess.run(global_step)))
except tf.errors.OutOfRangeError:
print('All finished')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
if __name__ == '__main__':
tf.app.run()
| 45.034188
| 140
| 0.683242
|
64c2c81aa04f3ca05115b16cc85f6e6ad79635f6
| 14,945
|
py
|
Python
|
src/dcmstack/extract.py
|
jbpoline/dcmstack
|
f93e8ea8ee6aedc2aef041876687243ad99fad7e
|
[
"MIT"
] | 1
|
2018-09-05T11:33:24.000Z
|
2018-09-05T11:33:24.000Z
|
src/dcmstack/extract.py
|
jbpoline/dcmstack
|
f93e8ea8ee6aedc2aef041876687243ad99fad7e
|
[
"MIT"
] | null | null | null |
src/dcmstack/extract.py
|
jbpoline/dcmstack
|
f93e8ea8ee6aedc2aef041876687243ad99fad7e
|
[
"MIT"
] | null | null | null |
"""
Extract meta data from a DICOM data set.
"""
import struct, warnings
from collections import OrderedDict, namedtuple, Counter
import dicom
from dicom.datadict import keyword_for_tag
from nibabel.nicom import csareader
#This is needed to allow extraction on files with invalid values (e.g. too
#long of a decimal string)
dicom.config.enforce_valid_values = False
def ignore_private(elem):
'''Ignore rule for `MetaExtractor` to skip private DICOM elements (odd
group number).'''
if elem.tag.group % 2 == 1:
return True
return False
def ignore_non_ascii_bytes(elem):
'''Ignore rule for `MetaExtractor` to skip elements with VR of 'OW', 'OB',
or 'UN' if the byte string contains non ASCII characters.'''
if elem.VR in ('OW', 'OB', 'OW or OB', 'UN'):
if not all(' ' <= c <= '~' for c in elem.value):
return True
return False
default_ignore_rules = (ignore_private,
ignore_non_ascii_bytes)
'''The default tuple of ignore rules for `MetaExtractor`.'''
Translator = namedtuple('Translator', ['name',
'tag',
'priv_creator',
'trans_func']
)
'''A namedtuple for storing the four elements of a translator: a name, the
dicom.tag.Tag that can be translated, the private creator string (optional), and
the function which takes the DICOM element and returns a dictionary.'''
def simplify_csa_dict(csa_dict):
'''Simplify the result of nibabel.nicom.csareader.
Parameters
----------
csa_dict : dict
The result from nibabel.nicom.csareader
Returns
-------
result : OrderedDict
Result where the keys come from the 'tags' sub dictionary of `csa_dict`.
The values come from the 'items' within that tags sub sub dictionary.
If items has only one element it will be unpacked from the list.
'''
if csa_dict is None:
return None
result = OrderedDict()
for tag in csa_dict['tags']:
items = csa_dict['tags'][tag]['items']
if len(items) == 0:
continue
elif len(items) == 1:
result[tag] = items[0]
else:
result[tag] = items
return result
def csa_image_trans_func(elem):
'''Function for translating the CSA image sub header.'''
return simplify_csa_dict(csareader.read(elem.value))
csa_image_trans = Translator('CsaImage',
dicom.tag.Tag(0x29, 0x1010),
'SIEMENS CSA HEADER',
csa_image_trans_func)
'''Translator for the CSA image sub header.'''
class PhoenixParseError(Exception):
'''Exception indicating a error parsing a line from the Phoenix Protocol.
'''
def __init__(self, line):
self.line = line
def __str__(self):
return 'Unable to parse phoenix protocol line: %s' % self.line
def _parse_phoenix_line(line, str_delim='""'):
delim_len = len(str_delim)
#Handle most comments (not always when string literal involved)
comment_idx = line.find('#')
if comment_idx != -1:
#Check if the pound sign is in a string literal
if line[:comment_idx].count(str_delim) == 1:
if line[comment_idx:].find(str_delim) == -1:
raise PhoenixParseError(line)
else:
line = line[:comment_idx]
#Allow empty lines
if line.strip() == '':
return None
#Find the first equals sign and use that to split key/value
equals_idx = line.find('=')
if equals_idx == -1:
raise PhoenixParseError(line)
key = line[:equals_idx].strip()
val_str = line[equals_idx + 1:].strip()
#If there is a string literal, pull that out
if val_str.startswith(str_delim):
end_quote = val_str[delim_len:].find(str_delim) + delim_len
if end_quote == -1:
raise PhoenixParseError(line)
elif not end_quote == len(val_str) - delim_len:
#Make sure remainder is just comment
if not val_str[end_quote+delim_len:].strip().startswith('#'):
raise PhoenixParseError(line)
return (key, val_str[2:end_quote])
else: #Otherwise try to convert to an int or float
val = None
try:
val = int(val_str)
except ValueError:
pass
else:
return (key, val)
try:
val = int(val_str, 16)
except ValueError:
pass
else:
return (key, val)
try:
val = float(val_str)
except ValueError:
pass
else:
return (key, val)
raise PhoenixParseError(line)
def parse_phoenix_prot(prot_key, prot_val):
'''Parse the MrPheonixProtocol string.
Parameters
----------
prot_str : str
The 'MrPheonixProtocol' string from the CSA Series sub header.
Returns
-------
prot_dict : OrderedDict
Meta data pulled from the ASCCONV section.
Raises
------
PhoenixParseError : A line of the ASCCONV section could not be parsed.
'''
if prot_key == 'MrPhoenixProtocol':
str_delim = '""'
elif prot_key == 'MrProtocol':
str_delim = '"'
else:
raise ValueError('Unknown protocol key: %s' % prot_key)
ascconv_start = prot_val.find('### ASCCONV BEGIN ###')
ascconv_end = prot_val.find('### ASCCONV END ###')
ascconv = prot_val[ascconv_start:ascconv_end].split('\n')[1:-1]
result = OrderedDict()
for line in ascconv:
parse_result = _parse_phoenix_line(line, str_delim)
if parse_result:
result[parse_result[0]] = parse_result[1]
return result
def csa_series_trans_func(elem):
'''Function for parsing the CSA series sub header.'''
csa_dict = simplify_csa_dict(csareader.read(elem.value))
#If there is a phoenix protocol, parse it and dump it into the csa_dict
phx_src = None
if 'MrPhoenixProtocol' in csa_dict:
phx_src = 'MrPhoenixProtocol'
elif 'MrProtocol' in csa_dict:
phx_src = 'MrProtocol'
if not phx_src is None:
phoenix_dict = parse_phoenix_prot(phx_src, csa_dict[phx_src])
del csa_dict[phx_src]
for key, val in phoenix_dict.iteritems():
new_key = '%s.%s' % ('MrPhoenixProtocol', key)
csa_dict[new_key] = val
return csa_dict
csa_series_trans = Translator('CsaSeries',
dicom.tag.Tag(0x29, 0x1020),
'SIEMENS CSA HEADER',
csa_series_trans_func)
'''Translator for parsing the CSA series sub header.'''
default_translators = (csa_image_trans,
csa_series_trans,
)
'''Default translators for MetaExtractor.'''
def tag_to_str(tag):
'''Convert a DICOM tag to a string representation using the group and
element hex values seprated by an underscore.'''
return '%#X_%#X' % (tag.group, tag.elem)
unpack_vr_map = {'SL' : 'i',
'UL' : 'I',
'FL' : 'f',
'FD' : 'd',
'SS' : 'h',
'US' : 'H',
'US or SS' : 'H',
}
'''Dictionary mapping value representations to corresponding format strings for
the struct.unpack function.'''
class MetaExtractor(object):
'''Callable object for extracting meta data from a dicom dataset'''
def __init__(self, ignore_rules=None, translators=None,
warn_on_trans_except=True):
'''Initialize a `MetaExtractor` with a set of ignore rules and
translators.
Parameters
----------
ignore_rules : sequence
A sequence of callables, each of which should take a DICOM element
and return True if it should be ignored. If None the module
default is used.
translators : sequence
A sequence of `Translator` objects each of which can convert a
DICOM element into a dictionary. Overrides any ignore rules. If
None the module default is used.
warn_on_trans_except : bool
Convert any exceptions from translators into warnings.
'''
if ignore_rules is None:
self.ignore_rules = default_ignore_rules
else:
self.ignore_rules = ignore_rules
if translators is None:
self.translators = default_translators
else:
self.translators = translators
self.warn_on_trans_except = warn_on_trans_except
def _get_elem_key(self, elem):
'''Get the key for any non-translated elements.'''
#Use standard DICOM keywords if possible
key = keyword_for_tag(elem.tag)
#For private tags we take elem.name and convert to camel case
if key == '':
key = elem.name
if key.startswith('[') and key.endswith(']'):
key = key[1:-1]
tokens = [token[0].upper() + token[1:]
for token in key.split()]
key = ''.join(tokens)
return key
def _get_elem_value(self, elem):
'''Get the value for any non-translated elements'''
if elem.VR in unpack_vr_map and isinstance(elem.value, str):
if elem.VM == 1:
return struct.unpack(unpack_vr_map[elem.VR], elem.value)[0]
else:
return list(struct.unpack(unpack_vr_map[elem.VR], elem.value))
if elem.VM == 1:
if elem.VR == 'DS':
return float(elem.value)
elif elem.VR == 'IS':
return int(elem.value)
else:
return elem.value
else:
if elem.VR == 'DS':
return [float(val) for val in elem.value]
elif elem.VR == 'IS':
return [int(val) for val in elem.value]
else:
return elem.value[:]
def __call__(self, dcm):
'''Extract the meta data from a DICOM dataset.
Parameters
----------
dcm : dicom.dataset.Dataset
The DICOM dataset to extract the meta data from.
Returns
-------
meta : dict
A dictionary of extracted meta data.
Notes
-----
Non-private tags use the DICOM keywords as keys. Translators have their
name, followed by a dot, prepended to the keys of any meta elements
they produce. Values are unchanged, except when the value
representation is 'DS' or 'IS' (decimal/integer strings) they are
converted to float and int types.
'''
standard_meta = []
trans_meta_dicts = OrderedDict()
#Make dict mapping tags to tranlators
trans_map = {}
for translator in self.translators:
if translator.tag in trans_map:
raise ValueError('More than one translator given for tag: '
'%s' % translator.tag)
trans_map[translator.tag] = translator
for elem in dcm:
if isinstance(elem.value, str) and elem.value.strip() == '':
continue
#Get the name for non-translated elements
name = self._get_elem_key(elem)
#If it is a private creator element, handle any corresponding
#translators
if elem.name == "Private Creator":
moves = []
for curr_tag, translator in trans_map.iteritems():
if translator.priv_creator == elem.value:
new_elem = ((translator.tag.elem & 0xff) |
(elem.tag.elem * 16**2))
new_tag = dicom.tag.Tag(elem.tag.group, new_elem)
if new_tag != curr_tag:
if (new_tag in trans_map or
any(new_tag == move[0] for move in moves)
):
raise ValueError('More than one translator '
'for tag: %s' % new_tag)
moves.append((curr_tag, new_tag))
for curr_tag, new_tag in moves:
trans_map[new_tag] = trans_map[curr_tag]
del trans_map[curr_tag]
#If there is a translator for this element, use it
if elem.tag in trans_map:
try:
meta = trans_map[elem.tag].trans_func(elem)
except Exception, e:
if self.warn_on_trans_except:
warnings.warn("Exception from translator %s: %s" %
(trans_map[elem.tag].name,
str(e)))
else:
raise
else:
if meta:
trans_meta_dicts[trans_map[elem.tag].name] = meta
#Otherwise see if we are supposed to ignore the element
elif any(rule(elem) for rule in self.ignore_rules):
continue
#Handle elements that are sequences with recursion
elif isinstance(elem.value, dicom.sequence.Sequence):
value = []
for val in elem.value:
value.append(self(val))
standard_meta.append((name, value, elem.tag))
#Otherwise just make sure the value is unpacked
else:
standard_meta.append((name,
self._get_elem_value(elem),
elem.tag
)
)
#Handle name collisions
name_counts = Counter(elem[0] for elem in standard_meta)
result = OrderedDict()
for name, value, tag in standard_meta:
if name_counts[name] > 1:
name = name + '_' + tag_to_str(tag)
result[name] = value
#Inject translator results
for trans_name, meta in trans_meta_dicts.iteritems():
for name, value in meta.iteritems():
name = '%s.%s' % (trans_name, name)
result[name] = value
return result
default_extractor = MetaExtractor()
'''The default `MetaExtractor`.'''
| 36.099034
| 81
| 0.542389
|
bce57df2eeb7b5aba8e62b7afb1ea5b9999221de
| 8,837
|
py
|
Python
|
IBA/test/test_tensorflow_v1.py
|
YaNgZhAnG-V5/covid-attribution
|
b3cb2efe6de9db79ab68f0d7c3d1264d2cfab030
|
[
"MIT"
] | 57
|
2020-03-09T20:39:04.000Z
|
2022-03-28T12:54:20.000Z
|
IBA/test/test_tensorflow_v1.py
|
YaNgZhAnG-V5/covid-attribution
|
b3cb2efe6de9db79ab68f0d7c3d1264d2cfab030
|
[
"MIT"
] | 25
|
2020-03-09T23:20:47.000Z
|
2022-03-12T00:17:53.000Z
|
IBA/test/test_tensorflow_v1.py
|
YaNgZhAnG-V5/covid-attribution
|
b3cb2efe6de9db79ab68f0d7c3d1264d2cfab030
|
[
"MIT"
] | 7
|
2020-03-10T13:13:51.000Z
|
2022-01-04T09:31:30.000Z
|
# Copyright (c) Karl Schulz, Leon Sixt
#
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pytest
import numpy as np
try:
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
tf.disable_v2_behavior()
tf.disable_v2_tensorshape()
except ModuleNotFoundError:
import tensorflow as tf
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Conv1D, MaxPooling1D
from IBA.tensorflow_v1 import model_wo_softmax, IBACopy, IBACopyInnvestigate
from IBA.tensorflow_v1 import IBALayer, to_saliency_map
INPUT_SHAPE = (32, 32, 3)
def simple_model(with_iba, with_softmax=False):
model = Sequential()
model.add(Conv2D(8, (3, 3), padding='same', name='conv1',
input_shape=(32, 32, 3)))
model.add(Activation('relu', name='relu1'))
model.add(Conv2D(8, (3, 3), padding='same', name='conv2'))
model.add(Activation('relu', name='relu2'))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool2')) # 8
model.add(Conv2D(8, (3, 3), padding='same', name='conv3'))
if with_iba:
# add iba to model definition
model.add(IBALayer(name='iba'))
model.add(Activation('relu', name='relu3'))
model.add(Conv2D(8, (3, 3), padding='same', name='conv4'))
model.add(Activation('relu', name='relu4'))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool4'))
model.add(Flatten())
model.add(Dropout(0.5, name='dropout1'))
model.add(Dense(64, name='fc1'))
model.add(Activation('relu', name='relu5'))
model.add(Dropout(0.5, name='dropout2'))
if with_softmax:
softmax = 'softmax'
else:
softmax = None
model.add(Dense(10, name='fc2', activation=softmax))
return model
def test_iba_layer(tmpdir):
K.clear_session()
model = simple_model(with_iba=True, with_softmax=True)
# check if softmax is removed
model = model_wo_softmax(model)
x = np.random.uniform(size=(10, 32, 32, 3))
model.predict(x)
iba = model.get_layer(name='iba') # type: IBALayer
# fit a single sample
iba.fit({model.input: x})
# fit a generator
def generator():
for i in range(10):
x = np.random.uniform(size=(2, 32, 32, 3))
yield {model.input: x}
hyperparams = {'batch_size': 10, 'steps': 10, 'beta': 1, 'learning_rate': 1,
'min_std': 0.01, 'smooth_std': 1, 'normalize_beta': True}
iba.set_default(**hyperparams)
assert iba.get_default() == hyperparams
iba.fit_generator(generator(), n_samples=15)
# setup model loss
iba.set_classification_loss(model.output)
# analyze
x = np.random.uniform(size=(1, 32, 32, 3))
iba.analyze({model.input: x, iba.target: np.array([4])})
iba.collect_all()
iba.analyze({model.input: x, iba.target: np.array([4])})
report = iba.get_report()
assert 'alpha' in report['init']
assert 'alpha' in report['final']
assert 'loss' in report[0]
iba.collect('loss')
capacity = iba.analyze({model.input: x, iba.target: np.array([4])})
saliency_map = to_saliency_map(capacity, shape=(32, 32))
assert saliency_map.shape == (32, 32)
report = iba.get_report()
assert 'alpha' not in report['init']
# for the test, remove all information
capacity = iba.analyze({model.input: x, iba.target: np.array([4])}, beta=100000)
print("STATE", iba._get_session().run([iba._restrict_flow, iba._use_layer_input]))
logits = model.predict(x)
with iba.restrict_flow():
print("STATE 2", iba._get_session().run([iba._restrict_flow, iba._use_layer_input]))
# computes logits using only a subset of all information
logits_restricted = model.predict(x)
# logits should be considerable different
assert np.abs(logits - logits_restricted).mean() > 1e-3
def test_iba_layer_1d(tmpdir):
K.clear_session()
model = Sequential()
model.add(Conv1D(32, 3, padding='same', name='conv1',
input_shape=(32, 3)))
model.add(Activation('relu', name='relu1'))
model.add(Conv1D(32, 3, padding='same', name='conv2'))
model.add(Activation('relu', name='relu2'))
model.add(MaxPooling1D(pool_size=2, name='pool2')) # 8
model.add(Conv1D(32, 3, padding='same', name='conv3'))
# add iba to model definition
model.add(IBALayer(name='iba'))
model.add(Activation('relu', name='relu3'))
model.add(Conv1D(32, 3, padding='same', name='conv4'))
model.add(Activation('relu', name='relu4'))
model.add(MaxPooling1D(pool_size=2, name='pool4'))
model.add(Flatten())
model.add(Dropout(0.5, name='dropout1'))
model.add(Dense(256, name='fc1'))
model.add(Activation('relu', name='relu5'))
model.add(Dropout(0.5, name='dropout2'))
model.add(Dense(10, name='fc2'))
x = np.random.uniform(size=(10, 32, 3))
model.predict(x)
iba = model.get_layer(name='iba')
iba.fit({model.input: x})
iba.set_classification_loss(model.output)
x = np.random.uniform(size=(1, 32, 3))
iba.analyze({model.input: x, iba.target: np.array([4])})
def test_copy_graph_innvestigate(tmpdir):
INPUT_SHAPE = (32, 32, 3)
def random_input_generator():
while True:
yield np.random.uniform(size=(5,) + INPUT_SHAPE), np.random.normal(size=5) > 0
K.clear_session()
model = simple_model(with_iba=False)
feat_layer = model.get_layer(name='conv3')
x = np.random.normal(size=(1,) + INPUT_SHAPE)
logits_before = model.predict(x)
analyzer = IBACopyInnvestigate(model, feature_name=feat_layer.output.name)
logits_after_copy = model.predict(x)
assert (logits_before == logits_after_copy).all()
analyzer.fit_generator(random_input_generator(), steps_per_epoch=2)
analyzer.analyze(np.random.normal(size=(1, ) + INPUT_SHAPE))
fname = str(tmpdir.join('innvestigate.npz'))
analyzer.save_npz(fname)
load_graph = tf.Graph()
sess = tf.Session(graph=load_graph)
with sess.as_default(), load_graph.as_default():
analyzer_loaded = IBACopyInnvestigate.load_npz(fname)
x = np.random.normal(size=(1,) + INPUT_SHAPE)
logit_copied = analyzer.predict(x)
with sess.as_default(), load_graph.as_default():
logit_loaded = analyzer_loaded.predict(x)
logit_model = model.predict(x)
assert np.abs(logit_model - logit_copied).mean() < 1e-5
assert np.abs(logit_model - logit_loaded).mean() < 1e-5
def test_copy_graph_raw():
K.clear_session()
model = simple_model(with_iba=False)
feat_layer = model.get_layer(name='conv2')
shape = [int(d) for d in feat_layer.output.shape[1:]]
mean = np.random.uniform(size=shape)
std = np.random.uniform(size=shape)
analyzer = IBACopy(feat_layer.output, [model.output.name],
feature_mean=mean, feature_std=std,
feature_active=np.ones_like(std))
analyzer.assert_variables_equal()
x = np.random.normal(size=(1, ) + INPUT_SHAPE)
# check weights are copied, i.e. same prediction
logit_model = model.predict(x)
logit_copied = analyzer.predict({model.input: x})
assert np.abs(logit_copied - logit_model).mean() < 1e-5
analyzer._outputs[0]
analyzer.set_classification_loss()
analyzer.analyze({model.input: x}, {analyzer.target: np.array([30])})
kernel = K.get_session().run(feat_layer.kernel)
new_kernel = kernel + np.ones_like(kernel)
K.get_session().run(tf.assign(feat_layer.kernel, new_kernel))
with pytest.raises(AssertionError):
analyzer.assert_variables_equal()
analyzer.update_variables()
analyzer.assert_variables_equal()
| 33.858238
| 92
| 0.677606
|
2c601292e948f53e48dfb96626fd4386acf57ee4
| 5,138
|
py
|
Python
|
project/data_analysis/python/montecarlo/mc_analysis.py
|
simonsobs/ps_py
|
fd34612f6756f693df92e01912fd71b291f1774a
|
[
"BSD-3-Clause"
] | 12
|
2019-01-25T13:42:52.000Z
|
2022-03-30T22:07:33.000Z
|
project/data_analysis/python/montecarlo/mc_analysis.py
|
simonsobs/ps_py
|
fd34612f6756f693df92e01912fd71b291f1774a
|
[
"BSD-3-Clause"
] | 20
|
2018-11-22T06:51:44.000Z
|
2022-03-22T19:31:14.000Z
|
project/data_analysis/python/montecarlo/mc_analysis.py
|
sgiardie/PSpipe
|
046c1d68c06fd3e8b7f0d9c068d0ff999bf95a0b
|
[
"BSD-3-Clause"
] | 10
|
2019-04-19T09:32:11.000Z
|
2022-01-21T10:26:09.000Z
|
"""
This script analyze the simulations generated by mc_get_spectra.py
it estimates the mean and numerical covariances from the simulations
"""
from pspy import pspy_utils, so_dict, so_spectra
import numpy as np
import sys
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
type = d["type"]
surveys = d["surveys"]
iStart = d["iStart"]
iStop = d["iStop"]
lmax = d["lmax"]
sim_alm_dtype = d["sim_alm_dtype"]
if sim_alm_dtype == "complex64":
spec_dtype = np.float32
elif sim_alm_dtype == "complex128":
spec_dtype = np.float64
spec_dir = "sim_spectra"
mcm_dir = "mcms"
mc_dir = "montecarlo"
cov_dir = "covariances"
pspy_utils.create_directory(mc_dir)
pspy_utils.create_directory(cov_dir)
spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
# we compute the full covariance matrix of the data
# for each sim we create two list, vec_list that include all power spectra and vec_list_restricted
# that includes only the TT,TE,EE spectra (also ET for cases where it is relevant)
# the mean and covariances of these vectors is computed and written to disc
for kind in ["cross", "noise", "auto"]:
vec_list = []
vec_list_restricted = []
vec_list_EB = []
for iii in range(iStart, iStop):
vec = []
vec_restricted = []
vec_EB = []
for spec in spectra:
for id_sv1, sv1 in enumerate(surveys):
arrays_1 = d["arrays_%s" % sv1]
for id_ar1, ar1 in enumerate(arrays_1):
for id_sv2, sv2 in enumerate(surveys):
arrays_2 = d["arrays_%s" % sv2]
for id_ar2, ar2 in enumerate(arrays_2):
if (id_sv1 == id_sv2) & (id_ar1 > id_ar2) : continue
if (id_sv1 > id_sv2) : continue
if (sv1 != sv2) & (kind == "noise"): continue
if (sv1 != sv2) & (kind == "auto"): continue
spec_name = "%s_%s_%sx%s_%s_%s_%05d" % (type, sv1, ar1, sv2, ar2, kind, iii)
lb, Db = so_spectra.read_ps(spec_dir + "/%s.dat" % spec_name, spectra=spectra)
n_bins = len(lb)
vec = np.append(vec, Db[spec])
if (sv1 == sv2) & (ar1 == ar2):
if spec == "TT" or spec == "EE" or spec == "TE" :
vec_restricted = np.append(vec_restricted, Db[spec])
else:
if spec == "TT" or spec == "EE" or spec == "TE" or spec == "ET":
vec_restricted = np.append(vec_restricted, Db[spec])
if spec == "EB":
vec_EB = np.append(vec_EB, (Db["EB"] + Db["BE"])/2 )
vec_list += [vec.astype(spec_dtype)]
vec_list_restricted += [vec_restricted.astype(spec_dtype)]
vec_list_EB += [vec_EB.astype(spec_dtype)]
mean_vec = np.mean(vec_list, axis=0)
mean_vec_restricted = np.mean(vec_list_restricted, axis=0)
mean_vec_EB = np.mean(vec_list_EB, axis=0)
cov = 0
cov_restricted = 0
cov_EB = 0
for iii in range(iStart, iStop):
cov += np.outer(vec_list[iii], vec_list[iii])
cov_restricted += np.outer(vec_list_restricted[iii], vec_list_restricted[iii])
cov_EB += np.outer(vec_list_EB[iii], vec_list_EB[iii])
cov = cov / (iStop-iStart) - np.outer(mean_vec, mean_vec)
cov_restricted = cov_restricted / (iStop-iStart) - np.outer(mean_vec_restricted, mean_vec_restricted)
cov_EB = cov_EB / (iStop-iStart) - np.outer(mean_vec_EB, mean_vec_EB)
np.save("%s/cov_all_%s.npy" % (mc_dir, kind), cov)
np.save("%s/cov_restricted_all_%s.npy" % (mc_dir, kind), cov_restricted)
np.save("%s/cov_EB_all_%s.npy" % (mc_dir, kind), cov_EB)
id_spec = 0
for spec in spectra:
for id_sv1, sv1 in enumerate(surveys):
arrays_1 = d["arrays_%s" % sv1]
for id_ar1, ar1 in enumerate(arrays_1):
for id_sv2, sv2 in enumerate(surveys):
arrays_2 = d["arrays_%s" % sv2]
for id_ar2, ar2 in enumerate(arrays_2):
if (id_sv1 == id_sv2) & (id_ar1 > id_ar2) : continue
if (id_sv1 > id_sv2) : continue
if (sv1 != sv2) & (kind == "noise"): continue
if (sv1 != sv2) & (kind == "auto"): continue
mean = mean_vec[id_spec * n_bins:(id_spec + 1) * n_bins]
std = np.sqrt(cov[id_spec * n_bins:(id_spec + 1) * n_bins, id_spec * n_bins:(id_spec + 1) * n_bins].diagonal())
np.savetxt("%s/spectra_%s_%s_%sx%s_%s_%s.dat" % (mc_dir, spec, sv1, ar1, sv2, ar2, kind), np.array([lb,mean,std]).T)
id_spec += 1
| 38.924242
| 140
| 0.526664
|
aaf919d53aab4bd4c9f8913967a2936a9a9def34
| 259
|
py
|
Python
|
main/forms.py
|
Shanmuga-raj/TXT2HTML
|
9d63afed3e9e0f1f198e6cc75dc3252ecb29e1ba
|
[
"MIT"
] | null | null | null |
main/forms.py
|
Shanmuga-raj/TXT2HTML
|
9d63afed3e9e0f1f198e6cc75dc3252ecb29e1ba
|
[
"MIT"
] | null | null | null |
main/forms.py
|
Shanmuga-raj/TXT2HTML
|
9d63afed3e9e0f1f198e6cc75dc3252ecb29e1ba
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Post
from ckeditor.widgets import CKEditorWidget
class PostForm(forms.ModelForm):
body = forms.CharField(widget=CKEditorWidget(),label="Text Editor")
class Meta:
model=Post
fields=('body',)
| 28.777778
| 71
| 0.725869
|
331e46f3591f3fdab61c3bd28053553d1f8598d8
| 4,759
|
py
|
Python
|
Python/iris_plain_mlp.py
|
kingmbc/iris-python
|
824a3dd9cf9fd9685ece05298423a4b8e78dccb7
|
[
"MIT"
] | 39
|
2017-04-11T19:31:53.000Z
|
2021-12-31T18:52:16.000Z
|
Python/iris_plain_mlp.py
|
kingmbc/iris-python
|
824a3dd9cf9fd9685ece05298423a4b8e78dccb7
|
[
"MIT"
] | null | null | null |
Python/iris_plain_mlp.py
|
kingmbc/iris-python
|
824a3dd9cf9fd9685ece05298423a4b8e78dccb7
|
[
"MIT"
] | 57
|
2017-03-09T10:27:54.000Z
|
2022-01-06T06:02:05.000Z
|
from __future__ import print_function
from builtins import range
"""
SECTION 1 : Load and setup data for training
"""
import csv
import random
import math
random.seed(113)
# Load dataset
with open('../Datasets/iris/iris.csv') as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader, None) # skip header
dataset = list(csvreader)
# Change string value to numeric
for row in dataset:
row[4] = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"].index(row[4])
row[:4] = [float(row[j]) for j in range(len(row))]
# Split x and y (feature and target)
random.shuffle(dataset)
datatrain = dataset[:int(len(dataset) * 0.8)]
datatest = dataset[int(len(dataset) * 0.8):]
train_X = [data[:4] for data in datatrain]
train_y = [data[4] for data in datatrain]
test_X = [data[:4] for data in datatest]
test_y = [data[4] for data in datatest]
"""
SECTION 2 : Build and Train Model
Multilayer perceptron model, with one hidden layer.
input layer : 4 neuron, represents the feature of Iris
hidden layer : 3 neuron, activation using sigmoid
output layer : 3 neuron, represents the class of Iris
optimizer = gradient descent
loss function = Square ROot Error
learning rate = 0.005
epoch = 400
best result = 96.67%
"""
def matrix_mul_bias(A, B, bias): # Matrix multiplication (for Testing)
C = [[0 for i in range(len(B[0]))] for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
C[i][j] += A[i][k] * B[k][j]
C[i][j] += bias[j]
return C
def vec_mat_bias(A, B, bias): # Vector (A) x matrix (B) multiplication
C = [0 for i in range(len(B[0]))]
for j in range(len(B[0])):
for k in range(len(B)):
C[j] += A[k] * B[k][j]
C[j] += bias[j]
return C
def mat_vec(A, B): # Matrix (A) x vector (B) multipilicatoin (for backprop)
C = [0 for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B)):
C[i] += A[i][j] * B[j]
return C
def sigmoid(A, deriv=False):
if deriv: # derivation of sigmoid (for backprop)
for i in range(len(A)):
A[i] = A[i] * (1 - A[i])
else:
for i in range(len(A)):
A[i] = 1 / (1 + math.exp(-A[i]))
return A
# Define parameter
alfa = 0.005
epoch = 400
neuron = [4, 4, 3] # number of neuron each layer
# Initiate weight and bias with 0 value
weight = [[0 for j in range(neuron[1])] for i in range(neuron[0])]
weight_2 = [[0 for j in range(neuron[2])] for i in range(neuron[1])]
bias = [0 for i in range(neuron[1])]
bias_2 = [0 for i in range(neuron[2])]
# Initiate weight with random between -1.0 ... 1.0
for i in range(neuron[0]):
for j in range(neuron[1]):
weight[i][j] = 2 * random.random() - 1
for i in range(neuron[1]):
for j in range(neuron[2]):
weight_2[i][j] = 2 * random.random() - 1
for e in range(epoch):
cost_total = 0
for idx, x in enumerate(train_X): # Update for each data; SGD
# Forward propagation
h_1 = vec_mat_bias(x, weight, bias)
X_1 = sigmoid(h_1)
h_2 = vec_mat_bias(X_1, weight_2, bias_2)
X_2 = sigmoid(h_2)
# Convert to One-hot target
target = [0, 0, 0]
target[int(train_y[idx])] = 1
# Cost function, Square Root Eror
eror = 0
for i in range(neuron[2]):
eror += (target[i] - X_2[i]) ** 2
cost_total += eror * 1 / neuron[2]
# Backward propagation
# Update weight_2 and bias_2 (layer 2)
delta_2 = []
for j in range(neuron[2]):
delta_2.append(-1 * 2. / neuron[2] * (target[j]-X_2[j]) * X_2[j] * (1-X_2[j]))
for i in range(neuron[1]):
for j in range(neuron[2]):
weight_2[i][j] -= alfa * (delta_2[j] * X_1[i])
bias_2[j] -= alfa * delta_2[j]
# Update weight and bias (layer 1)
delta_1 = mat_vec(weight_2, delta_2)
for j in range(neuron[1]):
delta_1[j] = delta_1[j] * (X_1[j] * (1-X_1[j]))
for i in range(neuron[0]):
for j in range(neuron[1]):
weight[i][j] -= alfa * (delta_1[j] * x[i])
bias[j] -= alfa * delta_1[j]
cost_total /= len(train_X)
if(e % 100 == 0):
print(cost_total)
"""
SECTION 3 : Testing
"""
res = matrix_mul_bias(test_X, weight, bias)
res_2 = matrix_mul_bias(res, weight_2, bias)
# Get prediction
preds = []
for r in res_2:
preds.append(max(enumerate(r), key=lambda x:x[1])[0])
# Print prediction
print(preds)
# Calculate accuration
acc = 0.0
for i in range(len(preds)):
if preds[i] == int(test_y[i]):
acc += 1
print(acc / len(preds) * 100, "%")
| 28.668675
| 90
| 0.578903
|
4dde16c1f3f9096478e4b1b49ae1a60cc7a678b7
| 8,656
|
py
|
Python
|
SRC/lexer.py
|
Tim8Trudeau/T3001
|
bdac8936e83906f1da1f9d60037f68755e770786
|
[
"MIT"
] | null | null | null |
SRC/lexer.py
|
Tim8Trudeau/T3001
|
bdac8936e83906f1da1f9d60037f68755e770786
|
[
"MIT"
] | null | null | null |
SRC/lexer.py
|
Tim8Trudeau/T3001
|
bdac8936e83906f1da1f9d60037f68755e770786
|
[
"MIT"
] | null | null | null |
""" Lexer module for CLIQ test robot interpreter"""
###############################################################################
# #
# LEXER #
# #
###############################################################################
from token_types import *
class Token(object):
"""Tokens contain a type and a value"""
def __init__(self, type_, value):
self.type = type_
self.value = value
def __eq__(self, other):
if self.type == other.type and self.value == other.value:
return True
else:
return False
def __str__(self):
"""String representation of the token class instance. Used to
convert a token to a string.
Examples:
Token(INTEGER_CONST, 3)
Token(PLUS, '+')
Token(MUL, '*')
"""
return 'Token({type}, {value})'.format(
type=self.type,
value=repr(self.value)
)
def __repr__(self):
"""Same as __str__
"""
return self.__str__()
VAR_TYPES = [INTEGER, BOOL, REAL, INTEGER_CONST, BOOL_CONST, REAL_CONST]
"""Dictionary of reserved words
"""
RESERVED_KEYWORDS: dict[str, Token] = {
'BEGIN': Token('BEGIN', 'BEGIN'),
'BOOL': Token('BOOL', 'BOOL'),
'DIV': Token('INTEGER_DIV', 'DIV'),
'ELSE': Token('ELSE', 'ELSE'),
'END': Token('END', 'END'),
'ENDIF': Token('ENDIF', 'ENDIF'),
'FALSE': Token('BOOL_CONST', 'FALSE'),
'HOME': Token('HOME', 'HOME'),
'IF': Token('IF', 'IF'),
'INTEGER': Token('INTEGER', 'INTEGER'),
'IO': Token('IO', 'PIN'),
'LOOP': Token('LOOP', 'LOOP'),
'MOVETO': Token('MOVETO', 'MOVETO'),
'NOT': Token('NOT', 'NOT'),
'PROGRAM': Token('PROGRAM', 'PROGRAM'),
'REAL': Token('REAL', 'REAL'),
'ROTATE': Token('ROTATE', 'ROTATE'),
'STOP' : Token('STOP', 'STOP'),
'THEN': Token('THEN', 'THEN'),
'TRUE': Token('BOOL_CONST', 'TRUE'),
'TURN': Token('TURN', 'TURN'),
'UNTIL': Token('UNTIL', 'UNTIL'),
'VAR': Token('VAR', 'VAR'),
'WAIT': Token('WAIT', 'WAIT'),
'WAYPOINT': Token('WAYPOINT', 'WAYPOINT'),
}
class Lexer(object):
""" This CLASS is responsible for breaking a program text
apart into token objects.
Tokenize string, e.g. '(4 + 2) * 3 - 6 / 2'.
Each character is represented by a token
"""
def __init__(self, text):
"""self.pos: an index into self.text
self.line: the line number count used to report error location.
self.line_pos: index within active line used to report position of error.
"""
self.text = text
# self.pos is an index into self.text.
#line_count and line_pos are used to report the location of syntax errors.
self.line_count = 0
self.line_pos = 0
self.pos = 0
self.current_char = self.text[self.pos]
def error(self):
""" Reports the location of an invalid character in the input text
"""
raise ValueError(f"Invalid character '{self.current_char}' in line {self.line_count} at position {self.line_pos}")
def advance(self):
"""Advance the `pos` pointer and set the `current_char` variable.
Also keeps track of where we are in the line
"""
self.pos += 1
self.line_pos += 1
if self.pos > len(self.text) - 1:
self.current_char = None # Indicates end of input
else:
self.current_char = self.text[self.pos]
def peek(self):
"""Look at next character in text without consuming it.
"""
peek_pos = self.pos + 1
if peek_pos > len(self.text) - 1:
return None
else:
return self.text[peek_pos]
def skip_whitespace(self):
"""White space is removed by skipping.
"""
while self.current_char is not None and self.current_char.isspace():
if self.current_char == '\n':
self.line_count += 1
self.line_pos = 0
self.advance()
def skip_comment(self):
"""Scans text for closing brace of a comment. Raises exception on EOL.
"""
while self.current_char != '}':
self.advance()
if self.current_char == '\n':
raise Exception(f'Missing closing brace at line {self.line_count} position {self.line_pos}')
self.advance() # the closing curly brace
def number(self):
"""Return a (multi-digit) integer or float consumed from the input.
"""
result = ''
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
if self.current_char == '.':
result += self.current_char
self.advance()
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
token = Token('REAL_CONST', float(result))
else:
token = Token('INTEGER_CONST', int(result))
return token
def _id(self):
"""Handle identifiers and reserved keywords
"""
result = ''
while self.current_char is not None and self.current_char.isidentifier():
result += self.current_char
self.advance()
token = RESERVED_KEYWORDS.get(result, Token(ID, result))
return token
def get_next_token(self):
"""Identifies operators and return a token for the operator.
Removes whitespace from input and skips over {comments}
"""
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char == '{':
self.advance()
self.skip_comment()
continue
if self.current_char.isidentifier():
return self._id()
if self.current_char.isdigit():
return self.number()
if self.current_char == ':' and self.peek() == '=':
self.advance()
self.advance()
return Token(ASSIGN, ':=')
if self.current_char == '=' and self.peek() == '=':
self.advance()
self.advance()
return Token(EQUAL, '==')
if self.current_char == '!' and self.peek() == '=':
self.advance()
self.advance()
return Token(NEQUAL, '!=')
if self.current_char == '<' and self.peek() == '=':
self.advance()
self.advance()
return Token(LTE, '<=')
if self.current_char == '>' and self.peek() == '=':
self.advance()
self.advance()
return Token(GTE, '>=')
if self.current_char == '<' and self.peek() != '=':
self.advance()
return Token(LT, '<')
if self.current_char == '>' and self.peek() != '=':
self.advance()
return Token(GT, '>')
if self.current_char == ';':
self.advance()
return Token(SEMI, ';')
if self.current_char == ':':
self.advance()
return Token(COLON, ':')
if self.current_char == ',':
self.advance()
return Token(COMMA, ',')
if self.current_char == '+':
self.advance()
return Token(PLUS, '+')
if self.current_char == '-':
self.advance()
return Token(MINUS, '-')
if self.current_char == '*':
self.advance()
return Token(MUL, '*')
if self.current_char == '/':
self.advance()
return Token(FLOAT_DIV, '/')
if self.current_char == '(':
self.advance()
return Token(LPAREN, '(')
if self.current_char == ')':
self.advance()
return Token(RPAREN, ')')
if self.current_char == '.':
self.advance()
return Token(DOT, '.')
else:
self.error()
return Token(EOF, None)
| 32.912548
| 122
| 0.490065
|
3d83a88ae4d4a8a25087b43e2c679023f098ce88
| 1,100
|
py
|
Python
|
ComputerManagement/serializacion.py
|
hugobarzano/IV_hugobarzano
|
4638750e1e7c16608e17e34744f38176144e1a15
|
[
"Artistic-2.0"
] | null | null | null |
ComputerManagement/serializacion.py
|
hugobarzano/IV_hugobarzano
|
4638750e1e7c16608e17e34744f38176144e1a15
|
[
"Artistic-2.0"
] | 14
|
2015-10-13T09:07:12.000Z
|
2015-12-11T15:49:08.000Z
|
ComputerManagement/serializacion.py
|
hugobarzano/IV_hugobarzano
|
4638750e1e7c16608e17e34744f38176144e1a15
|
[
"Artistic-2.0"
] | 1
|
2016-01-03T19:32:09.000Z
|
2016-01-03T19:32:09.000Z
|
from rest_framework import serializers
from ComputerManagement.models import Dispositivo
class DispositivoSerializado(serializers.Serializer):
id_dispositivo = serializers.IntegerField()
nombre_dispositivo = serializers.CharField(max_length=200)
fabricante = serializers.CharField(max_length=200)
caracteristicas = serializers.CharField(max_length=400)
def create(self, validated_data):
"""
Crea y vevuelve una nueva instancia de un Dispositivo
"""
return Dispositivo.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Actualiza y devuelve una instancia de Dispositivo, teniendo en cuenta los datos validados
"""
instance.id_dispositivo = validated_data.get('id_dispositivo', instance.id_dispositivo)
instance.nombre_dispositivo = validated_data.get('nombre_dispositivo', instance.nombre_dispositivo)
instance.fabricante = validated_data.get('fabricante', instance.fabricante)
instance.caracteristicas = validated_data.get('caracteristicas', instance.caracteristicas)
instance.save()
return instance
| 42.307692
| 101
| 0.784545
|
d4eaa39b9c8f57fac1085ae4adb14038bb544516
| 1,659
|
py
|
Python
|
seqdesign/scripts/library_selection_birch.py
|
eembees/SeqDesign
|
436b8ae55824957b444c312c2799f2bcbce4a103
|
[
"MIT"
] | 38
|
2020-11-26T15:54:05.000Z
|
2022-03-23T18:46:27.000Z
|
seqdesign/scripts/library_selection_birch.py
|
debbiemarkslab/SeqDesign
|
436b8ae55824957b444c312c2799f2bcbce4a103
|
[
"MIT"
] | 2
|
2021-04-02T20:58:44.000Z
|
2021-08-24T03:19:06.000Z
|
seqdesign/scripts/library_selection_birch.py
|
eembees/SeqDesign
|
436b8ae55824957b444c312c2799f2bcbce4a103
|
[
"MIT"
] | 9
|
2021-04-27T19:28:30.000Z
|
2022-03-23T18:46:31.000Z
|
#!/usr/bin/env python3
from seqdesign import birch
import argparse
import os
def main():
parser = argparse.ArgumentParser(description="Make a feature matrix for BIRCH clustering from a fasta file.")
parser.add_argument("--input", type=str, required=True,
help="Input feature matrix csv.")
parser.add_argument("--output-prefix", type=str, default=None,
help="Output cluster prefix.")
parser.add_argument("--threshold", type=float, default=0.575, help="Birch threshold.")
parser.add_argument("--branching-factor", type=int, default=1000, help="Birch branching factor.")
parser.add_argument("--batch-size", type=int, default=1000, help="Birch batch size.")
parser.add_argument("--r-seed", type=int, default=42, help="Birch branching factor.")
args = parser.parse_args()
data_helper = birch.NanobodyDataBirchCluster(
input_filename=args.input,
minibatch_size=args.batch_size,
r_seed=args.r_seed)
birch_inst = birch.BirchIter(threshold=args.threshold, branching_factor=args.branching_factor)
birch_inst.fit(data_helper)
if args.output_prefix is None:
os.makedirs('clusters', exist_ok=True)
args.output_prefix = args.input.rsplit('/', 1)[-1].rsplit('.', 1)[0]
output_name = (
f"clusters/{args.output_prefix}_birch_thresh-{args.threshold}_branch-{args.branching_factor}_"
f"num_clusters-{birch_inst.num_clusters}.csv"
)
print("\nPREDICTING LABELS\n", flush=True)
birch_inst.predict(data_helper, minibatch_size=args.batch_size, output_name=output_name)
if __name__ == "__main__":
main()
| 40.463415
| 113
| 0.694997
|
a0ed793813bca0beb98d07819c1edb767adcb45d
| 1,019
|
py
|
Python
|
hdd_pred/scripts/train_test.py
|
Reshirin/hdd_pred
|
f0065771fb2ca7ae1ff8492432a0c8bac21a4a19
|
[
"MIT"
] | null | null | null |
hdd_pred/scripts/train_test.py
|
Reshirin/hdd_pred
|
f0065771fb2ca7ae1ff8492432a0c8bac21a4a19
|
[
"MIT"
] | null | null | null |
hdd_pred/scripts/train_test.py
|
Reshirin/hdd_pred
|
f0065771fb2ca7ae1ff8492432a0c8bac21a4a19
|
[
"MIT"
] | null | null | null |
import pytest
import pathlib
from click.testing import CliRunner
from hdd_pred.scripts.train import main, _main
_HERE = pathlib.Path(__file__).resolve().parent
@pytest.mark.parametrize("config_file", [("configs/config.yml")])
def test_click_main(config_file):
"""Tests both _main() click runner and its call to main()"""
runner = CliRunner()
try:
result = runner.invoke(_main, [config_file])
assert result.exit_code == 0
except AssertionError:
import traceback
traceback.print_exception(*result.exc_info)
raise
def test_main():
"""Tests the function main() independently from main() click command
This pattern aids in writing tests, as the function with the click decorators can be
separately tested from the script functionality itself. In addition, the main()
function can now be imported and used in other python modules
"""
config_path = _HERE.parents[1].joinpath("configs", "config.yml")
assert main(config_path) is None
| 29.970588
| 88
| 0.713445
|
8e220a7b28e3bedcea371a951493e1bb4623f684
| 6,779
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/haemophilushaemolyticusm21639.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/haemophilushaemolyticusm21639.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/haemophilushaemolyticusm21639.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Haemophilus haemolyticus M21639.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:10:17.262524
The undirected graph Haemophilus haemolyticus M21639 has 2204 nodes and
134950 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.05559 and has 12 connected components, where the
component with most nodes has 2165 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 100, the mean node degree
is 122.46, and the node degree mode is 5. The top 5 most central nodes
are 1028806.GGE_0147 (degree 799), 1028806.GGE_0050 (degree 735), 1028806.GGE_2154
(degree 660), 1028806.GGE_1472 (degree 657) and 1028806.GGE_1471 (degree
620).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import HaemophilusHaemolyticusM21639
# Then load the graph
graph = HaemophilusHaemolyticusM21639()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def HaemophilusHaemolyticusM21639(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Haemophilus haemolyticus M21639 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Haemophilus haemolyticus M21639 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:10:17.262524
The undirected graph Haemophilus haemolyticus M21639 has 2204 nodes and
134950 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.05559 and has 12 connected components, where the
component with most nodes has 2165 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 100, the mean node degree
is 122.46, and the node degree mode is 5. The top 5 most central nodes
are 1028806.GGE_0147 (degree 799), 1028806.GGE_0050 (degree 735), 1028806.GGE_2154
(degree 660), 1028806.GGE_1472 (degree 657) and 1028806.GGE_1471 (degree
620).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import HaemophilusHaemolyticusM21639
# Then load the graph
graph = HaemophilusHaemolyticusM21639()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="HaemophilusHaemolyticusM21639",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.492147
| 223
| 0.706889
|
5e83d8cc2bb610155c2390094487e06f9213eb67
| 6,838
|
py
|
Python
|
tests/python/pygmod/test_lua.py
|
javabird25/gpython
|
f58df9c0c626ccd3f1dbd8a2d762ae62b9f22caf
|
[
"MIT"
] | 26
|
2018-11-27T16:40:30.000Z
|
2022-01-27T12:37:34.000Z
|
tests/python/pygmod/test_lua.py
|
javabird25/gpython
|
f58df9c0c626ccd3f1dbd8a2d762ae62b9f22caf
|
[
"MIT"
] | 9
|
2019-05-05T22:00:00.000Z
|
2021-01-18T13:43:21.000Z
|
tests/python/pygmod/test_lua.py
|
javabird25/gpython
|
f58df9c0c626ccd3f1dbd8a2d762ae62b9f22caf
|
[
"MIT"
] | 4
|
2019-05-05T18:10:35.000Z
|
2022-01-18T14:42:31.000Z
|
import logging
from collections import defaultdict
import pytest
import _luastack
from pygmod import lua
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(autouse=True)
def luastack():
yield
_luastack.stack = [_luastack.StackPad()]
def test_auto_pop_from_empty_stack():
@lua.auto_pop
def func():
_luastack.push_nil()
_luastack.push_nil()
func()
assert _luastack.top() == 0
def test_auto_pop_from_non_empty_stack():
_luastack.push_nil()
_luastack.push_nil()
@lua.auto_pop
def func():
_luastack.push_nil()
_luastack.push_nil()
func()
assert _luastack.top() == 2
def test_exec_no_error(mocker):
mocker.patch("pygmod.lua.G")
lua.G.RunString.return_value = None
lua.exec_lua("") # Shouldn't raise LuaError
def test_exec_error(mocker):
mocker.patch("pygmod.lua.G")
lua.G.RunString.return_value = "you should not see a LuaError"
with pytest.raises(lua.LuaError):
lua.exec_lua("")
def test_eval(mocker):
mocker.patch("pygmod.lua.exec_lua")
mocker.patch.object(lua.G, "_get")
lua.G._get.return_value = 1
result = lua.eval_lua("smth")
lua.exec_lua.assert_called_with("_pygmod_eval_result = smth")
assert result == 1
@pytest.fixture
def base_get_namespace_instance():
class A(lua.BaseGetNamespace):
def __init__(self):
self.dict = {'a': 1, 'b': 2, 'c': 3}
def _get(self, item):
return self.dict[item]
return A()
def test_base_get_namespace_getitem(base_get_namespace_instance):
assert base_get_namespace_instance["b"] == 2
def test_base_get_namespace_getitem_missing(base_get_namespace_instance):
with pytest.raises(KeyError):
base_get_namespace_instance["no"]
def test_base_get_namespace_getattr(base_get_namespace_instance):
assert base_get_namespace_instance.b == 2
def test_base_get_namespace_getattr_missing(base_get_namespace_instance):
with pytest.raises(KeyError):
base_get_namespace_instance["no"]
def test_base_get_namespace_getattr_underscore(base_get_namespace_instance):
with pytest.raises(AttributeError):
base_get_namespace_instance._no
@pytest.fixture
def lua_namespace_instance():
class A(lua.LuaNamespace):
def __init__(self):
self._table = defaultdict(lambda: None, {"a": "1", 2: 3})
self._to_delete = 1
def _push_namespace_object(self):
_luastack.stack.append(self._table)
return A()
def test_lua_namespace_getattr(lua_namespace_instance):
assert lua_namespace_instance.a == lua_namespace_instance["a"] == "1"
def test_lua_namespace_getitem_int_key(lua_namespace_instance):
assert lua_namespace_instance[2] == 3
def test_lua_namespace_setattr(lua_namespace_instance):
lua_namespace_instance.abc = 1
assert lua_namespace_instance.abc == lua_namespace_instance["abc"] == 1
def test_lua_namespace_setattr_underscore(lua_namespace_instance):
lua_namespace_instance._abc = 1
assert lua_namespace_instance["_abc"] is None
assert getattr(lua_namespace_instance, "_abc") == 1
def test_lua_namespace_setattr_int_key(lua_namespace_instance):
lua_namespace_instance[4] = 1
assert lua_namespace_instance[4] == 1
def test_lua_namespace_delattr(lua_namespace_instance):
del lua_namespace_instance.a
del lua_namespace_instance.b
assert lua_namespace_instance.a is lua_namespace_instance["a"] is None
assert lua_namespace_instance.b is lua_namespace_instance["b"] is None
def test_lua_namespace_delitem(lua_namespace_instance):
del lua_namespace_instance["a"]
del lua_namespace_instance["b"]
del lua_namespace_instance[2]
assert lua_namespace_instance.a is lua_namespace_instance["a"] is None
assert lua_namespace_instance.b is lua_namespace_instance["b"] is None
assert lua_namespace_instance[2] is None
def test_lua_namespace_delattr_underscore(lua_namespace_instance):
del lua_namespace_instance._to_delete
assert not hasattr(lua_namespace_instance, "_to_delete")
def test_callable_constructor(mocker):
mock = mocker.Mock()
_luastack.stack.append(mock)
func = lua._lua_func_from_stack(-1)
assert mock == _luastack.references[func._ref]
def test_callable_returns_none(mocker):
def call(*_):
_luastack.pop(3 + 1) # Arguments 1, 2, 3 + the function itself
mocker.patch("_luastack.call", side_effect=call)
mock = mocker.Mock()
_luastack.stack.append(mock)
func = lua._lua_func_from_stack(-1)
returned_value = func(1, 2, 3)
assert returned_value is None
def test_callable_returns_one_val(mocker):
def call(*_):
_luastack.pop(3 + 1) # Arguments 1, 2, 3 + the function itself
_luastack.stack.append("returned value")
mocker.patch("_luastack.call", side_effect=call)
mock = mocker.Mock()
_luastack.stack.append(mock)
func = lua._lua_func_from_stack(-1)
returned_value = func(1, 2, 3)
assert returned_value == "returned value"
def test_callable_returns_many_vals(mocker):
def call(*_):
_luastack.pop(3 + 1) # Arguments 1, 2, 3 + the function itself
_luastack.stack.append("returned value 1")
_luastack.stack.append("returned value 2")
mocker.patch("_luastack.call", side_effect=call)
mock = mocker.Mock()
_luastack.stack.append(mock)
func = lua._lua_func_from_stack(-1)
returned_value = func(1, 2, 3)
assert returned_value == ("returned value 1", "returned value 2")
def test_method_call_namespace(mocker):
mock = mocker.Mock()
table = {"method": mock}
class A(lua.MethodCallNamespace):
# noinspection PyMissingConstructor
def __init__(self):
self._tbl = table
a = A()
a.method(1, 2, 3)
mock.assert_called_with(table, 1, 2, 3)
a["method"]()
mock.assert_called_with(table)
# TODO: Table tests: iterator classes
def test_table_from_dict():
d = {"a": 1, "_b": 2, "c": {"d": 0, "e": [1, 2, 3]}}
tbl = lua.Table(d)
assert _luastack.references[tbl._ref] == d
def test_table_from_list(mocker):
mocker.patch("pygmod.lua.G")
i = [1, 2, [3, 4, {"a": "?"}]]
lua.Table(i)
table_insert_calls = lua.G.table.insert.call_args_list
table_insert_call_args = [call[0] for call in table_insert_calls]
table_insert_call_second_args = [args[1] for args in table_insert_call_args]
assert table_insert_call_second_args == i
def test_table_unknown_constructor_arg():
with pytest.raises(ValueError):
lua.Table(...)
def test_table_call(mocker):
mocker.patch("pygmod.lua.G")
mocker.patch.object(lua.Table, "__call__")
lua.G.getmetatable.return_value = {"__call": 1}
t = lua.Table()
t(1, 2, 3)
t.__call__.assert_called_with(1, 2, 3)
| 27.134921
| 80
| 0.708541
|
565762297a6987f94b0c5c408a4fac3b5f071dea
| 300
|
py
|
Python
|
erpnext_connector/erpnext_connector/doctype/erpnext_price_list/erpnext_price_list.py
|
Athenolabs/erpnext_connector
|
ce52c298b5a3b370f1def8d4df00cda5541aeabd
|
[
"MIT"
] | 3
|
2017-09-21T13:50:26.000Z
|
2020-12-31T11:48:36.000Z
|
erpnext_connector/erpnext_connector/doctype/erpnext_price_list/erpnext_price_list.py
|
Athenolabs/erpnext_connector
|
ce52c298b5a3b370f1def8d4df00cda5541aeabd
|
[
"MIT"
] | 1
|
2018-02-09T13:01:52.000Z
|
2021-03-31T12:30:44.000Z
|
erpnext_connector/erpnext_connector/doctype/erpnext_price_list/erpnext_price_list.py
|
mntechnique/erpnext_connector
|
5bb5d0bb418d57a5877b172546f0292420cfd678
|
[
"MIT"
] | 4
|
2017-10-26T03:59:43.000Z
|
2020-08-16T15:30:28.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, MN Technique and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, requests
import erpnext_connector
from frappe.model.document import Document
class ERPNextPriceList(Document):
pass
| 25
| 51
| 0.796667
|
992e756f1db73fb34b103dadf61736a6e0515822
| 6,723
|
py
|
Python
|
examples/pwr_run/checkpointing/nonpc_short/final3/job15.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/nonpc_short/final3/job15.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/nonpc_short/final3/job15.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.005
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final3/' + job_name + '*'
total_epochs = 15
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final3/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 30.559091
| 118
| 0.70296
|
597e5e074a2ff04b268c7479be43c71d8a831512
| 419
|
py
|
Python
|
jogadorAleatorio.py
|
kevenLeandro/simula-oBancoImobiliario
|
d79fd91b94eb6f05c9088223f8ba4e4dc1c58b90
|
[
"Apache-2.0"
] | null | null | null |
jogadorAleatorio.py
|
kevenLeandro/simula-oBancoImobiliario
|
d79fd91b94eb6f05c9088223f8ba4e4dc1c58b90
|
[
"Apache-2.0"
] | null | null | null |
jogadorAleatorio.py
|
kevenLeandro/simula-oBancoImobiliario
|
d79fd91b94eb6f05c9088223f8ba4e4dc1c58b90
|
[
"Apache-2.0"
] | null | null | null |
import jogador
import comportamento
class jogadorAleatorio(jogador.jogador):
def __init__(self, posicao, volta, saldo):
super().__init__(posicao, volta, saldo)
def comportamento(self, propriedade,jogador):
comportamento.aleatorio(propriedade, jogador)
def get_definition(self):
return "O jogador aleatรณrio compra a propriedade que ele parar em cima com probabilidade de 50%."
| 24.647059
| 105
| 0.73031
|
753ce1b686a74ec97686fe8ac1f6b167a3966b2e
| 1,429
|
py
|
Python
|
test_app_data/settings.py
|
ella/django-appdata
|
2a1bbc2c4b4569ebcd1bee98b29431df206912c9
|
[
"BSD-3-Clause"
] | 13
|
2015-03-13T11:40:43.000Z
|
2020-07-19T04:33:59.000Z
|
test_app_data/settings.py
|
ella/django-appdata
|
2a1bbc2c4b4569ebcd1bee98b29431df206912c9
|
[
"BSD-3-Clause"
] | 20
|
2015-01-03T07:26:56.000Z
|
2020-10-29T09:44:29.000Z
|
test_app_data/settings.py
|
ella/django-appdata
|
2a1bbc2c4b4569ebcd1bee98b29431df206912c9
|
[
"BSD-3-Clause"
] | 15
|
2015-01-03T07:27:09.000Z
|
2020-05-14T07:15:27.000Z
|
DEBUG = True
ROOT_URLCONF = 'test_app_data.urls'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
TEMPLATE_LOADERS = (
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_OPTIONS = {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
'loaders': TEMPLATE_LOADERS,
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': TEMPLATE_OPTIONS
},
]
SECRET_KEY = 'very-secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/app_data.db',
}
}
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.messages',
'test_app_data',
)
| 24.637931
| 69
| 0.69909
|
0105a6595061636f079690bbd87d145909c155f7
| 7,297
|
py
|
Python
|
hsv_dot_beer/config/common.py
|
it-avenger/hsv-beer
|
039611bbcf260ad24c9a829fca0af96b4c7da014
|
[
"Apache-2.0"
] | null | null | null |
hsv_dot_beer/config/common.py
|
it-avenger/hsv-beer
|
039611bbcf260ad24c9a829fca0af96b4c7da014
|
[
"Apache-2.0"
] | 6
|
2020-08-03T09:50:01.000Z
|
2021-06-10T18:17:28.000Z
|
hsv_dot_beer/config/common.py
|
cartacode/hsv_dot_beer
|
039611bbcf260ad24c9a829fca0af96b4c7da014
|
[
"Apache-2.0"
] | null | null | null |
import os
from os.path import join
from distutils.util import strtobool
import dj_database_url
from configurations import Configuration
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class Common(Configuration):
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party apps
'corsheaders',
'rest_framework', # utilities for rest apis
'rest_framework.authtoken', # token authentication
'django_filters', # for filtering rest endpoints
'django_countries', # for ease of using countries
'django_celery_beat', # use django admin to set up scheduled tasks
# Your apps
'hsv_dot_beer.users',
'venues',
'events',
'beers',
'taps',
'tap_list_providers',
)
# https://docs.djangoproject.com/en/2.0/topics/http/middleware/
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ALLOWED_HOSTS = ["*"]
ROOT_URLCONF = 'hsv_dot_beer.urls'
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
WSGI_APPLICATION = 'hsv_dot_beer.wsgi.application'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ADMINS = (
('Author', 'drewbrew@users.noreply.github.com'),
)
# Postgres
DATABASES = {
'default': dj_database_url.config(
default='postgres://postgres:@postgres:5432/postgres',
conn_max_age=int(os.getenv('POSTGRES_CONN_MAX_AGE', 600))
)
}
# General
APPEND_SLASH = False
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
USE_L10N = True
USE_TZ = True
LOGIN_REDIRECT_URL = '/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.normpath(join(os.path.dirname(BASE_DIR), 'static'))
STATICFILES_DIRS = []
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Media files
MEDIA_ROOT = join(os.path.dirname(BASE_DIR), 'media')
MEDIA_URL = '/media/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': STATICFILES_DIRS,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Set DEBUG to False as a default for safety
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = strtobool(os.getenv('DJANGO_DEBUG', 'no'))
# Password Validation
# https://docs.djangoproject.com/en/2.0/topics/auth/passwords/#module-django.contrib.auth.password_validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[%(server_time)s] %(message)s',
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO'
},
}
}
# Custom user app
AUTH_USER_MODEL = 'users.User'
# Django Rest Framework
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': int(os.getenv('DJANGO_PAGINATION_LIMIT', 10)),
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S%z',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PERMISSION_CLASSES': [
'hsv_dot_beer.permissions.IsAdminOrReadOnly',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
}
# Default Venue time zone
DEFAULT_VENUE_TIME_ZONE = 'America/Chicago'
# Celery info
CELERY_BROKER_URL = os.environ.get('REDIS_URL', 'redis://redis:6379/')
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
CELERY_IMPORTS = (
'tap_list_providers.tasks',
'beers.tasks',
)
| 32.431111
| 113
| 0.570371
|
35ee0627b0d757fb74fbc6be0ef6c100781f2bfd
| 2,052
|
py
|
Python
|
test/vanilla/Expected/AcceptanceTests/BodyDate/bodydate/aio/_configuration.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
test/vanilla/Expected/AcceptanceTests/BodyDate/bodydate/aio/_configuration.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
test/vanilla/Expected/AcceptanceTests/BodyDate/bodydate/aio/_configuration.py
|
tasherif-msft/autorest.python
|
5b0121bcfa802aedaeda36990e8bcaa2b7e26b14
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
class AutoRestDateTestServiceConfiguration(Configuration):
"""Configuration for AutoRestDateTestService.
Note that all parameters used to create this instance are saved as instance
attributes.
"""
def __init__(
self,
**kwargs: Any
) -> None:
super(AutoRestDateTestServiceConfiguration, self).__init__(**kwargs)
kwargs.setdefault('sdk_moniker', 'autorestdatetestservice/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
| 44.608696
| 108
| 0.680799
|
380c1507bc8ddf9fcb2c8a4ec0c0f2867645c02d
| 9,449
|
py
|
Python
|
src/triage/tracking.py
|
dssg/triage
|
9f96a6be3df6bb04f654f33e127c53351a301d07
|
[
"MIT"
] | 160
|
2017-06-13T09:59:59.000Z
|
2022-03-21T22:00:35.000Z
|
src/triage/tracking.py
|
dssg/triage
|
9f96a6be3df6bb04f654f33e127c53351a301d07
|
[
"MIT"
] | 803
|
2016-10-21T19:44:02.000Z
|
2022-03-29T00:02:33.000Z
|
src/triage/tracking.py
|
dssg/triage
|
9f96a6be3df6bb04f654f33e127c53351a301d07
|
[
"MIT"
] | 59
|
2017-01-31T22:10:22.000Z
|
2022-03-19T12:35:03.000Z
|
import sys
import datetime
import platform
import getpass
import os
import requests
import subprocess
import verboselogs, logging
logger = verboselogs.VerboseLogger(__name__)
from functools import wraps
from triage.util.db import scoped_session, get_for_update
from triage.util.introspection import classpath
from triage import __version__
try:
try:
from pip._internal.operations import freeze as pip_freeze
except ImportError: # pip < 10.0
from pip.operations import freeze as pip_freeze
except ImportError:
pip_freeze = None
from triage.component.results_schema import TriageRun, TriageRunStatus
def infer_git_hash():
"""Attempt to infer the git hash of the repository in the current working directory
Returns: Either the 'git rev-parse HEAD' output or None
"""
try:
git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode('utf-8')
except Exception as exc:
logger.spam("Unable to infer git hash")
git_hash = None
return git_hash
def infer_triage_version():
return __version__
def infer_python_version():
""" Returns python version """
return sys.version.replace("\r", "").replace("\n", "")
def infer_installed_libraries():
"""Attempt to infer the installed libraries by running pip freeze and formatting as a list
Returns: Either a list, or None
"""
if pip_freeze is not None:
installed_libraries = pip_freeze.freeze()
else:
logger.spam("Unable to pip freeze, cannot list installed libraries")
installed_libraries = []
return installed_libraries
def infer_ec2_instance_type():
"""Attempt to infer the instance type of the ec2 instance by querying Amazon's endpoint
Returns: Either the ec2 instance type as returned by Amazon's endpoint, or None
"""
try:
ec2_instance_type = requests.get(
'http://169.254.169.254/latest/meta-data/instance-type',
timeout=0.01
).text
except requests.exceptions.RequestException:
logger.spam(
"Unable to retrieve metadata about ec2 instance, will not set ec2 instance type"
)
ec2_instance_type = None
return ec2_instance_type
def infer_log_location():
"""Attempt to infer the location of the log file of the root logger
Returns: Either the baseFilename of the first FileHandler on the root logger, or None
"""
root_logger_handlers = [
handler
for handler in logging.getLoggerClass().root.handlers
if isinstance(handler, logging.FileHandler)
]
if root_logger_handlers:
log_location = root_logger_handlers[0].baseFilename
else:
logger.spam("No FileHandler found in root logger, cannot record logging filename")
log_location = None
return log_location
def initialize_tracking_and_get_run_id(
experiment_hash,
experiment_class_path,
random_seed,
experiment_kwargs,
db_engine
):
"""Create a row in the TriageRun table with some initial info and return the created run_id
Args:
experiment_hash (str) An experiment hash that exists in the experiments table
experiment_class_path (str) The name of the experiment subclass used
random_seed (int) Random seed used to run the experiment
experiment_kwargs (dict) Any runtime Experiment keyword arguments that should be saved
db_engine (sqlalchemy.engine)
"""
# Any experiment kwargs that are types (e.g. MatrixStorageClass) can't
# be serialized, so just use the class name if so
cleaned_experiment_kwargs = {
k: (classpath(v) if isinstance(v, type) else v)
for k, v in experiment_kwargs.items()
}
run = TriageRun(
start_time=datetime.datetime.now(),
git_hash=infer_git_hash(),
triage_version=infer_triage_version(),
python_version=infer_python_version(),
run_type="experiment",
run_hash=experiment_hash,
last_updated_time=datetime.datetime.now(),
current_status=TriageRunStatus.started,
installed_libraries=infer_installed_libraries(),
platform=platform.platform(),
os_user=getpass.getuser(),
working_directory=os.getcwd(),
ec2_instance_type=infer_ec2_instance_type(),
log_location=infer_log_location(),
experiment_class_path=experiment_class_path,
random_seed = random_seed,
experiment_kwargs=cleaned_experiment_kwargs,
)
run_id = None
with scoped_session(db_engine) as session:
session.add(run)
session.commit()
run_id = run.run_id
if not run_id:
raise ValueError("Failed to retrieve run_id from saved row")
return run_id
def get_run_for_update(db_engine, run_id):
"""Yields an TriageRun at the given run_id for update
Will kick the last_update_time timestamp of the row each time.
Args:
db_engine (sqlalchemy.engine)
run_id (int) The identifier/primary key of the run
"""
return get_for_update(db_engine, TriageRun, run_id)
def experiment_entrypoint(entrypoint_func):
"""Decorator to control tracking of an experiment run at the wrapped method
To update the database, it requires the instance of the wrapped method to have a
db_engine and run_id.
Upon method entry, will update the TriageRun row with the wrapped method name.
Upon method exit, will update the TriageRun row with the status (either failed or completed)
"""
@wraps(entrypoint_func)
def with_entrypoint(self, *args, **kwargs):
entrypoint_name = entrypoint_func.__name__
with get_run_for_update(self.db_engine, self.run_id) as run_obj:
if not run_obj.start_method:
run_obj.start_method = entrypoint_name
try:
return_value = entrypoint_func(self, *args, **kwargs)
except Exception as exc:
with get_run_for_update(self.db_engine, self.run_id) as run_obj:
run_obj.current_status = TriageRunStatus.failed
run_obj.stacktrace = str(exc)
raise exc
with get_run_for_update(self.db_engine, self.run_id) as run_obj:
run_obj.current_status = TriageRunStatus.completed
return return_value
return with_entrypoint
def increment_field(field, run_id, db_engine):
"""Increment an TriageRun's named field.
Expects that the field is an integer in the database.
Will also kick the last_updated_time timestamp.
Args:
field (str) The name of the field
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
with scoped_session(db_engine) as session:
# Use an update query instead of a session merge so it happens in one atomic query
# and protect against race conditions
session.query(TriageRun).filter_by(run_id=run_id).update({
field: getattr(TriageRun, field) + 1,
'last_updated_time': datetime.datetime.now()
})
def record_matrix_building_started(run_id, db_engine):
"""Mark the current timestamp as the time at which matrix building started
Args:
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
with get_run_for_update(db_engine, run_id) as run_obj:
run_obj.matrix_building_started = datetime.datetime.now()
def record_model_building_started(run_id, db_engine):
"""Mark the current timestamp as the time at which model building started
Args:
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
with get_run_for_update(db_engine, run_id) as run_obj:
run_obj.model_building_started = datetime.datetime.now()
def built_matrix(run_id, db_engine):
"""Increment the matrix build counter for the TriageRun
Args:
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
increment_field('matrices_made', run_id, db_engine)
def skipped_matrix(run_id, db_engine):
"""Increment the matrix skip counter for the TriageRun
Args:
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
increment_field('matrices_skipped', run_id, db_engine)
def errored_matrix(run_id, db_engine):
"""Increment the matrix error counter for the TriageRun
Args:
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
increment_field('matrices_errored', run_id, db_engine)
def built_model(run_id, db_engine):
"""Increment the model build counter for the TriageRun
Args:
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
increment_field('models_made', run_id, db_engine)
def skipped_model(run_id, db_engine):
"""Increment the model skip counter for the TriageRun
Args:
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
increment_field('models_skipped', run_id, db_engine)
def errored_model(run_id, db_engine):
"""Increment the model error counter for the TriageRun
Args:
run_id (int) The identifier/primary key of the run
db_engine (sqlalchemy.engine)
"""
increment_field('models_errored', run_id, db_engine)
| 32.47079
| 96
| 0.696793
|
b9077fa89c8246f8b65314b84ac6a321505041c2
| 308
|
py
|
Python
|
pyembroidery-convert.py
|
teosavv/pyembroidery
|
00985f423e64ea1a454e5484012c19a64f26eb2c
|
[
"MIT"
] | 45
|
2018-07-08T09:49:30.000Z
|
2022-03-23T07:01:15.000Z
|
pyembroidery-convert.py
|
teosavv/pyembroidery
|
00985f423e64ea1a454e5484012c19a64f26eb2c
|
[
"MIT"
] | 59
|
2018-07-05T22:05:58.000Z
|
2022-02-20T01:01:20.000Z
|
pyembroidery-convert.py
|
teosavv/pyembroidery
|
00985f423e64ea1a454e5484012c19a64f26eb2c
|
[
"MIT"
] | 23
|
2018-08-10T17:58:04.000Z
|
2022-03-29T03:41:46.000Z
|
from __future__ import print_function
import sys
from pyembroidery import *
if len(sys.argv) <= 1:
print("No command arguments")
exit(1)
input = sys.argv[1]
if len(sys.argv) >= 3:
output = sys.argv[2]
else:
output = sys.argv[1] + ".csv"
pattern = read(input)
write = write(pattern,output)
| 18.117647
| 37
| 0.668831
|
c51ebb956084607c3f7a3b2793698b665b55fd6a
| 926
|
py
|
Python
|
museosMadrid/migrations/0002_auto_20180517_1948.py
|
AlbertoCoding/X-Serv-Practica-Museos
|
5d3e3c99b8750ece9973f4e04ae3c3bfe77f3946
|
[
"Apache-2.0"
] | null | null | null |
museosMadrid/migrations/0002_auto_20180517_1948.py
|
AlbertoCoding/X-Serv-Practica-Museos
|
5d3e3c99b8750ece9973f4e04ae3c3bfe77f3946
|
[
"Apache-2.0"
] | null | null | null |
museosMadrid/migrations/0002_auto_20180517_1948.py
|
AlbertoCoding/X-Serv-Practica-Museos
|
5d3e3c99b8750ece9973f4e04ae3c3bfe77f3946
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('museosMadrid', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='museo',
name='mostrar',
field=models.PositiveSmallIntegerField(default=1),
),
migrations.AlterField(
model_name='museo',
name='fax',
field=models.CharField(max_length=40, null=True, blank=True),
),
migrations.AlterField(
model_name='museo',
name='num',
field=models.CharField(max_length=8, null=True, blank=True),
),
migrations.AlterField(
model_name='museo',
name='telefono',
field=models.CharField(max_length=40, null=True, blank=True),
),
]
| 26.457143
| 73
| 0.565875
|
59a55c99b76c84dd0197728ea8fdf8f86a6b45b4
| 33,585
|
py
|
Python
|
Lib/os.py
|
lazylife7157/RustPython
|
84f08c85d9f0295d2a1905b5065498181eb442c7
|
[
"MIT"
] | null | null | null |
Lib/os.py
|
lazylife7157/RustPython
|
84f08c85d9f0295d2a1905b5065498181eb442c7
|
[
"MIT"
] | null | null | null |
Lib/os.py
|
lazylife7157/RustPython
|
84f08c85d9f0295d2a1905b5065498181eb442c7
|
[
"MIT"
] | null | null | null |
r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix or nt, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix' or 'nt'
- os.curdir is a string representing the current directory (always '.')
- os.pardir is a string representing the parent directory (always '..')
- os.sep is the (or a most common) pathname separator ('/' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import abc
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
import _os
from _os import *
__all__.extend(_get_exports_list(_os))
del _os
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if name == 'nt':
linesep = '\r\n'
import ntpath as path
else:
linesep = '\n'
import posixpath as path
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
top = fspath(top)
dirs = []
nondirs = []
walk_dirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that scandir is global in this module due
# to earlier import-*.
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
with scandir_it:
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
walk_dirs.append(entry.path)
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
islink, join = path.islink, path.join
for dirname in dirs:
new_path = join(top, dirname)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
else:
# Recurse into sub-directories
for new_path in walk_dirs:
yield from walk(new_path, topdown, onerror, followlinks)
# Yield after recursion if going bottom up
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
if not isinstance(top, int) or not hasattr(top, '__index__'):
top = fspath(top)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except OSError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except OSError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
continue
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
errors = sys.getfilesystemencodeerrors()
def fsencode(filename):
"""Encode filename (an os.PathLike, bytes, or str) to the filesystem
encoding with 'surrogateescape' error handler, return bytes unchanged.
On Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, str):
return filename.encode(encoding, errors)
else:
return filename
def fsdecode(filename):
"""Decode filename (an os.PathLike, bytes, or str) from the filesystem
encoding with 'surrogateescape' error handler, return str unchanged. On
Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
return filename
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
if not isinstance(args, (tuple, list)):
raise TypeError('argv must be a tuple or a list')
if not args or not args[0]:
raise ValueError('argv first element cannot be empty')
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
# For testing purposes, make sure the function is available when the C
# implementation exists.
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
# If there is no C implementation, make the pure Python version the
# implementation as transparently as possible.
if not _exists('fspath'):
fspath = _fspath
fspath.__name__ = "fspath"
class PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, '__fspath__')
| 35.427215
| 95
| 0.624833
|
fa6b60c245da8136f6d940c68e7d2ea60515cc22
| 657
|
py
|
Python
|
pilot/pilot.py
|
pointerboy/atspilot
|
a70bf67c5dd6360cd5e1fe7dce50c18266560e5f
|
[
"MIT"
] | null | null | null |
pilot/pilot.py
|
pointerboy/atspilot
|
a70bf67c5dd6360cd5e1fe7dce50c18266560e5f
|
[
"MIT"
] | null | null | null |
pilot/pilot.py
|
pointerboy/atspilot
|
a70bf67c5dd6360cd5e1fe7dce50c18266560e5f
|
[
"MIT"
] | null | null | null |
import numpy as np
from PIL import ImageGrab
import cv2
import time
from pilot.input.d3_directkeys import ReleaseKey, PressKey, Keys
last_time = time.time()
def process_img(original):
new_img = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
new_img = cv2.Canny(new_img, threshold1=200, threshold2=300)
return new_img
while(True):
screen = np.array(ImageGrab.grab(bbox=(0, 40, 800, 640)))
new_screen = process_img(screen)
print('Look took {}'.format(time.time()-last_time))
last_time = time.time() # 123
cv2.imshow('window', new_screen)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
| 27.375
| 64
| 0.695586
|
a5d26bb656b38b9af4e3be7d0a397eb732c98fcb
| 98,142
|
py
|
Python
|
droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_utils.py
|
colesbury/fairo
|
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
|
[
"MIT"
] | 73
|
2021-09-14T19:24:45.000Z
|
2022-03-27T06:43:26.000Z
|
droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_utils.py
|
colesbury/fairo
|
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
|
[
"MIT"
] | 268
|
2021-09-14T22:40:23.000Z
|
2022-03-31T23:01:54.000Z
|
droidlet/perception/semantic_parsing/nsp_transformer_model/modeling_utils.py
|
colesbury/fairo
|
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
|
[
"MIT"
] | 20
|
2021-09-14T19:24:47.000Z
|
2022-03-30T19:03:44.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import warnings
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import torch
from torch import Tensor, device, dtype, nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from transformers.activations import get_activation
from transformers.configuration_utils import PretrainedConfig
from transformers.file_utils import (
CONFIG_NAME,
DUMMY_INPUTS,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
PushToHubMixin,
cached_path,
hf_bucket_url,
is_offline_mode,
is_remote_url,
replace_return_docstrings,
)
from transformers.generation_utils import GenerationMixin
from transformers.integrations import deepspeed_config, is_deepspeed_zero3_enabled
from transformers.utils import logging
logger = logging.get_logger(__name__)
_init_weights = True
@contextmanager
def no_init_weights(_enable=True):
"""
Context manager to globally disable weight initialization to speed up loading large models.
TODO(Patrick): Delete safety argument `_enable=True` at next major version. .
"""
global _init_weights
if _enable:
_init_weights = False
try:
yield
finally:
_init_weights = True
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive."""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
def find_pruneable_heads_and_indices(
heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`List[int]`): List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.
Returns:
:obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: torch.LongTensor = torch.arange(len(mask))[mask].long()
return heads, index
def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
try:
return next(parameter.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
try:
return next(parameter.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = parameter._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
class ModuleUtilsMixin:
"""
A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
"""
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError(
"You need to install psutil (pip install psutil) to use memory tracing."
)
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError(
"You need to install psutil (pip install psutil) to use memory tracing."
)
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (
module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0
)
return None
def add_memory_hooks(self):
"""
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
zero with :obj:`model.reset_memory_hooks_state()`.
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
"""
Reset the :obj:`mem_rss_diff` attribute of each module (see
:func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
"""
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self) -> device:
"""
:obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
return get_parameter_device(self)
@property
def dtype(self) -> dtype:
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
return get_parameter_dtype(self)
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=self.dtype
) # fp16 compatibility
if self.dtype == torch.float16:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
elif self.dtype == torch.float32:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
raise ValueError(
f"{self.dtype} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`"
)
return encoder_extended_attention_mask
def get_extended_attention_mask(
self, attention_mask: Tensor, input_shape: Tuple[int], device: device
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=self.dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(
self,
head_mask: Optional[Tensor],
num_hidden_layers: int,
is_attention_chunked: bool = False,
) -> Tensor:
"""
Prepare the head mask if needed.
Args:
head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (:obj:`int`):
The number of hidden layers in the model.
is_attention_chunked: (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
:obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or
list with :obj:`[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype) # switch to float if need + fp16 compatibility
return head_mask
def num_parameters(
self, only_trainable: bool = False, exclude_embeddings: bool = False
) -> int:
"""
Get number of (optionally, trainable or non-embeddings) parameters in the module.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of non-embeddings parameters
Returns:
:obj:`int`: The number of parameters.
"""
def parameter_filter(x):
return (x.requires_grad or not only_trainable) and not (
isinstance(x, torch.nn.Embedding) and exclude_embeddings
)
params = (
filter(parameter_filter, self.parameters()) if only_trainable else self.parameters()
)
return sum(p.numel() for p in params)
def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
"""
Helper function to estimate the total number of tokens from the model inputs.
Args:
inputs (:obj:`dict`): The model inputs.
Returns:
:obj:`int`: The total number of tokens.
"""
token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key]
if token_inputs:
return sum([token_input.numel() for token_input in token_inputs])
else:
warnings.warn(
"Could not estimate the number of tokens of the input, floating-point operations will not be computed"
)
return 0
def floating_point_ops(
self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
) -> int:
"""
Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper
<https://arxiv.org/pdf/2001.08361.pdf>`__ section 2.1. Should be overridden for transformers with parameter
re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
Args:
batch_size (:obj:`int`):
The batch size for the forward pass.
sequence_length (:obj:`int`):
The number of tokens in each line of the batch.
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to count embedding and softmax operations.
Returns:
:obj:`int`: The number of floating-point operations.
"""
return (
6
* self.estimate_tokens(input_dict)
* self.num_parameters(exclude_embeddings=exclude_embeddings)
)
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin):
r"""
Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch
model, taking as arguments:
- **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
TensorFlow checkpoint.
- **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to
the model.
- **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
- **is_parallelizable** (:obj:`bool`) -- A flag indicating whether this model supports model parallelization.
"""
config_class = None
base_model_prefix = ""
# a list of re pattern of tensor names to ignore from the model when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_missing = None
# a list of re pattern of tensor names to ignore from the weights when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_unexpected = None
# a list of of tensor names to ignore when saving the model (useful for keys that aren't
# trained, but which are deterministic)
_keys_to_ignore_on_save = None
is_parallelizable = False
@property
def dummy_inputs(self) -> Dict[str, torch.Tensor]:
"""
:obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
@property
def base_model(self) -> nn.Module:
"""
:obj:`torch.nn.Module`: The main body of the model.
"""
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self) -> nn.Module:
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value: nn.Module):
"""
Set model's input embeddings.
Args:
value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self) -> nn.Module:
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None and self.config.tie_word_embeddings:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
if hasattr(self, self.base_model_prefix):
self = getattr(self, self.base_model_prefix)
self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)
@staticmethod
def _tie_encoder_decoder_weights(
encoder: nn.Module, decoder: nn.Module, base_model_prefix: str
):
uninitialized_encoder_weights: List[str] = []
if decoder.__class__ != encoder.__class__:
logger.info(
f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
)
def tie_encoder_to_decoder_recursively(
decoder_pointer: nn.Module,
encoder_pointer: nn.Module,
module_name: str,
uninitialized_encoder_weights: List[str],
depth=0,
):
assert isinstance(decoder_pointer, nn.Module) and isinstance(
encoder_pointer, nn.Module
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
if hasattr(decoder_pointer, "weight"):
assert hasattr(encoder_pointer, "weight")
encoder_pointer.weight = decoder_pointer.weight
if hasattr(decoder_pointer, "bias"):
assert hasattr(encoder_pointer, "bias")
encoder_pointer.bias = decoder_pointer.bias
return
encoder_modules = encoder_pointer._modules
decoder_modules = decoder_pointer._modules
if len(decoder_modules) > 0:
assert (
len(encoder_modules) > 0
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
all_encoder_weights = set(
[module_name + "/" + sub_name for sub_name in encoder_modules.keys()]
)
encoder_layer_pos = 0
for name, module in decoder_modules.items():
if name.isdigit():
encoder_name = str(int(name) + encoder_layer_pos)
decoder_name = name
if not isinstance(
decoder_modules[decoder_name], type(encoder_modules[encoder_name])
) and len(encoder_modules) != len(decoder_modules):
# this can happen if the name corresponds to the position in a list module list of layers
# in this case the decoder has added a cross-attention that the encoder does not have
# thus skip this step and subtract one layer pos from encoder
encoder_layer_pos -= 1
continue
elif name not in encoder_modules:
continue
elif depth > 500:
raise ValueError(
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
)
else:
decoder_name = encoder_name = name
tie_encoder_to_decoder_recursively(
decoder_modules[decoder_name],
encoder_modules[encoder_name],
module_name + "/" + name,
uninitialized_encoder_weights,
depth=depth + 1,
)
all_encoder_weights.remove(module_name + "/" + encoder_name)
uninitialized_encoder_weights += list(all_encoder_weights)
# tie weights recursively
tie_encoder_to_decoder_recursively(
decoder, encoder, base_model_prefix, uninitialized_encoder_weights
)
if len(uninitialized_encoder_weights) > 0:
logger.warning(
f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
)
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
"""Tie or clone module weights depending of whether we are using TorchScript or not"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(
0,
output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(
input_embeddings, "num_embeddings"
):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model without doing
anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
model_embeds = self._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
self.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
# if word embeddings are not tied, make sure that lm head is resized as well
if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
old_lm_head = self.get_output_embeddings()
new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens)
self.set_output_embeddings(new_lm_head)
return self.get_input_embeddings()
def _get_resized_embeddings(
self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
) -> torch.nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.nn.Embedding`` module of the model without doing anything.
Return:
:obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_embeddings
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None):
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
else:
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
if not isinstance(old_embeddings, nn.Embedding):
raise TypeError(
f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}."
f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}."
)
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim).to(
self.device, dtype=old_embeddings.weight.dtype
)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
# numbers of tokens to copy
n = min(old_num_tokens, new_num_tokens)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0):
if torch.distributed.get_rank() == 0:
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
else:
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
return new_embeddings
def _get_resized_lm_head(
self,
old_lm_head: torch.nn.Linear,
new_num_tokens: Optional[int] = None,
transposed: Optional[bool] = False,
) -> torch.nn.Linear:
"""
Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end
Args:
old_lm_head (:obj:`torch.nn.Linear`):
Old lm head liner layer to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.nn.Linear`` module of the model without doing anything.
transposed (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ``old_lm_head`` is transposed or not. If True ``old_lm_head.size()`` is ``lm_head_dim,
vocab_size`` else ``vocab_size, lm_head_dim``.
Return:
:obj:`torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_lm_head
old_num_tokens, old_lm_head_dim = (
old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
)
if old_num_tokens == new_num_tokens:
return old_lm_head
if not isinstance(old_lm_head, nn.Linear):
raise TypeError(
f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}."
f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Linear}."
)
# Build new lm head
new_lm_head_shape = (
(old_lm_head_dim, new_num_tokens)
if not transposed
else (new_num_tokens, old_lm_head_dim)
)
has_new_lm_head_bias = old_lm_head.bias is not None
new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias).to(self.device)
# initialize new lm head (in particular added tokens)
self._init_weights(new_lm_head)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
# Copy old lm head weights to new lm head
if not transposed:
new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[
:num_tokens_to_copy, :
]
else:
new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[
:, :num_tokens_to_copy
]
# Copy bias weights to new lm head
if has_new_lm_head_bias:
new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
return new_lm_head
def init_weights(self):
"""
If needed prunes and maybe initializes weights.
"""
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
if _init_weights:
# Initialize weights
self.apply(self._init_weights)
# Tie weights should be skipped when not initializing all weights
# since from_pretrained(...) calls tie weights anyways
self.tie_weights()
def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(
union_heads
) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
save_config: bool = True,
state_dict: Optional[dict] = None,
save_function: Callable = torch.save,
push_to_hub: bool = False,
**kwargs,
):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory to which to save. Will be created if it doesn't exist.
save_config (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to save the config of the model. Useful when in distributed training like TPUs and need
to call this function on all processes. In this case, set :obj:`save_config=True` only on the main
process to avoid race conditions.
state_dict (nested dictionary of :obj:`torch.Tensor`):
The state dictionary of the model to save. Will default to :obj:`self.state_dict()`, but can be used to
only save parts of the model or if special precautions need to be taken when recovering the state
dictionary of a model (like when using model parallelism).
save_function (:obj:`Callable`):
The function to use to save the state dictionary. Useful on distributed training like TPUs when one
need to replace :obj:`torch.save` by another method.
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
kwargs:
Additional key word arguments passed along to the
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = unwrap_model(self)
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save the config
if save_config:
model_to_save.config.save_pretrained(save_directory)
# Save the model
if state_dict is None:
state_dict = model_to_save.state_dict()
# Handle the case where some state_dict keys shouldn't be saved
if self._keys_to_ignore_on_save is not None:
state_dict = {
k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save
}
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
save_function(state_dict, output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
if push_to_hub:
saved_files = [output_model_file]
if save_config:
saved_files.append(os.path.join(save_directory, CONFIG_NAME))
url = self._push_to_hub(save_files=saved_files, **kwargs)
logger.info(f"Model pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
*model_args,
**kwargs,
):
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
train the model, you should first set it back in training mode with ``model.train()``.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- A path or url to a model folder containing a `flax checkpoint file` in `.msgpack` format (e.g,
``./flax_model/`` containing ``flax_model.msgpack``). In this case, ``from_flax`` should be set
to :obj:`True`.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str, os.PathLike]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string or path valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
from_flax (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a Flax checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (i.e., do not try to download the model).
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
_fast_init(:obj:`bool`, `optional`, defaults to `:obj:`True`):
Whether or not to disable fast initialization.
.. warning::
One should only disable `_fast_init` to ensure backwards compatibility with
``transformers.__version__ < 4.6.0`` for seeded model initialization. This argument will be removed
at the next major version. See `pull request 11471
<https://github.com/huggingface/transformers/pull/11471>`__ for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
.. note::
Activate the special `"offline-mode"
<https://huggingface.co/transformers/installation.html#offline-mode>`__ to use this method in a firewalled
environment.
Examples::
>>> from transformers import BertConfig, BertModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = BertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = BertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
>>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower)
>>> model = BertModel.from_pretrained('bert-base-uncased', from_flax=True)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
from_flax = kwargs.pop("from_flax", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
_fast_init = kwargs.pop("_fast_init", True)
user_agent = {
"file_type": "model",
"framework": "pytorch",
"from_auto_class": from_auto_class,
}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(
os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
):
# Load from a TF 1.0 checkpoint in priority if from_tf
archive_file = os.path.join(
pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index"
)
elif from_tf and os.path.isfile(
os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
):
# Load from a TF 2.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif from_flax and os.path.isfile(
os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)
):
# Load from a Flax checkpoint in priority if from_flax
archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index', FLAX_WEIGHTS_NAME]} found in "
f"directory {pretrained_model_name_or_path} or `from_tf` and `from_flax` set to False."
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(
pretrained_model_name_or_path
):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
if not from_tf:
raise ValueError(
f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
"from_tf to True to load from this checkpoint."
)
archive_file = pretrained_model_name_or_path + ".index"
else:
# set correct filename
if from_tf:
filename = TF2_WEIGHTS_NAME
elif from_flax:
filename = FLAX_WEIGHTS_NAME
else:
filename = WEIGHTS_NAME
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=filename,
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(
f"loading weights file {archive_file} from cache at {resolved_archive_file}"
)
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# Instantiate model.
if is_deepspeed_zero3_enabled():
import deepspeed
logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
# this immediately partitions the model across all gpus, to avoid the overhead in time
# and memory copying it on CPU or each GPU first
with deepspeed.zero.Init(config=deepspeed_config()):
with no_init_weights(_enable=_fast_init):
model = cls(config, *model_args, **model_kwargs)
else:
with no_init_weights(_enable=_fast_init):
model = cls(config, *model_args, **model_kwargs)
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(
model, config, resolved_archive_file[:-6]
) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(
model, resolved_archive_file, allow_missing_keys=True
)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
elif from_flax:
try:
from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model
model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file)
except ImportError:
logger.error(
"Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see "
"https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions."
)
raise
else:
if state_dict is None:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
f"at '{resolved_archive_file}'"
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
model, missing_keys, unexpected_keys, error_msgs = cls._load_state_dict_into_model(
model, state_dict, pretrained_model_name_or_path
)
# make sure token embedding weights are still tied if needed
model.tie_weights()
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
return model
@classmethod
def _load_state_dict_into_model(cls, model, state_dict, pretrained_model_name_or_path):
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# Retrieve missing & unexpected_keys
expected_keys = list(model.state_dict().keys())
loaded_keys = list(state_dict.keys())
prefix = model.base_model_prefix
has_prefix_module = any(s.startswith(prefix) for s in loaded_keys)
expects_prefix_module = any(s.startswith(prefix) for s in expected_keys)
# key re-naming operations are never done on the keys
# that are loaded, but always on the keys of the newly initialized model
remove_prefix = not has_prefix_module and expects_prefix_module
add_prefix = has_prefix_module and not expects_prefix_module
if remove_prefix:
expected_keys = [
".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys
]
elif add_prefix:
expected_keys = [".".join([prefix, s]) for s in expected_keys]
missing_keys = list(set(expected_keys) - set(loaded_keys))
unexpected_keys = list(set(loaded_keys) - set(expected_keys))
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
# tie unintialized modules
unintialized_modules = model.retrieve_modules_from_names(
missing_keys, add_prefix=add_prefix, remove_prefix=remove_prefix
)
for module in unintialized_modules:
model._init_weights(module)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
error_msgs = []
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
if is_deepspeed_zero3_enabled():
import deepspeed
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(
list(module.parameters(recurse=False)), modifier_rank=0
):
if torch.distributed.get_rank() == 0:
module._load_from_state_dict(*args)
else:
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if len(unexpected_keys) > 0:
logger.warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.info(
f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n"
)
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.info(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(error_msgs) > 0:
error_msg = "\n\t".join(error_msgs)
raise RuntimeError(
f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}"
)
return model, missing_keys, unexpected_keys, error_msgs
def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False):
module_keys = set([".".join(key.split(".")[:-1]) for key in names])
# torch.nn.ParameterList is a special case where two parameter keywords
# are appended to the module name, *e.g.* bert.special_embeddings.0
module_keys = module_keys.union(
set([".".join(key.split(".")[:-2]) for key in names if key[-1].isdigit()])
)
retrieved_modules = []
# retrieve all modules that has at least one missing weight name
for name, module in self.named_modules():
if remove_prefix:
name = (
".".join(name.split(".")[1:])
if name.startswith(self.base_model_prefix)
else name
)
elif add_prefix:
name = (
".".join([self.base_model_prefix, name])
if len(name) > 0
else self.base_model_prefix
)
if name in module_keys:
retrieved_modules.append(module)
return retrieved_modules
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`): The number of output features.
nx (:obj:`int`): The number of input features.
"""
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
"""
Compute SQuAD start logits from sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(
self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
Returns:
:obj:`torch.FloatTensor`: The start logits for SQuAD.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
if get_parameter_dtype(self) == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
"""
Compute SQuAD end logits from sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
:obj:`layer_norm_eps` to use.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
The hidden states of the first tokens for the labeled span.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
The position of the first token for the labeled span.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
.. note::
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
``start_positions`` overrides ``start_states``.
Returns:
:obj:`torch.FloatTensor`: The end logits for SQuAD.
"""
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(
-1, -1, hsz
) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
if get_parameter_dtype(self) == torch.float16:
x = x * (1 - p_mask) - 65500 * p_mask
else:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
"""
Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
"""
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(
self,
hidden_states: torch.FloatTensor,
start_states: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
The final hidden states of the model.
start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
The hidden states of the first tokens for the labeled span.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
The position of the first token for the labeled span.
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
.. note::
One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
``start_positions`` overrides ``start_states``.
Returns:
:obj:`torch.FloatTensor`: The SQuAD 2.0 answer class.
"""
# No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
hsz = hidden_states.shape[-1]
assert (
start_states is not None or start_positions is not None
), "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(
-1, -1, hsz
) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(
-2
) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
@dataclass
class SquadHeadOutput(ModelOutput):
"""
Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification
losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities
(beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
"""
loss: Optional[torch.FloatTensor] = None
start_top_log_probs: Optional[torch.FloatTensor] = None
start_top_index: Optional[torch.LongTensor] = None
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
class SQuADHead(nn.Module):
r"""
A SQuAD head inspired by XLNet.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
:obj:`layer_norm_eps` to use.
"""
def __init__(self, config):
super().__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
@replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
def forward(
self,
hidden_states: torch.FloatTensor,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
cls_index: Optional[torch.LongTensor] = None,
is_impossible: Optional[torch.LongTensor] = None,
p_mask: Optional[torch.FloatTensor] = None,
return_dict: bool = False,
) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
"""
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
Final hidden states of the model on the sequence tokens.
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Positions of the first token for the labeled span.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Positions of the last token for the labeled span.
cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Whether the question has a possible answer in the paragraph or not.
p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
should be masked.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
"""
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(
hidden_states, start_positions=start_positions, p_mask=p_mask
)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(
hidden_states, start_positions=start_positions, cls_index=cls_index
)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(
-1, -1, hsz
) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(
hidden_states, -2, start_top_index_exp
) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(
-1, slen, -1, -1
) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(
hidden_states_expanded, start_states=start_states, p_mask=p_mask
)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
)
if not return_dict:
return (
start_top_log_probs,
start_top_index,
end_top_log_probs,
end_top_index,
cls_logits,
)
else:
return SquadHeadOutput(
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
class SequenceSummary(nn.Module):
r"""
Compute a single vector summary of a sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:
- :obj:`"last"` -- Take the last token hidden state (like XLNet)
- :obj:`"first"` -- Take the first token hidden state (like Bert)
- :obj:`"mean"` -- Take the mean of all tokens hidden states
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- :obj:`"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
output, another string or :obj:`None` will add no activation.
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
activation.
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
activation.
"""
def __init__(self, config: PretrainedConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if (
hasattr(config, "summary_proj_to_labels")
and config.summary_proj_to_labels
and config.num_labels > 0
):
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = (
get_activation(activation_string) if activation_string else Identity()
)
self.first_dropout = Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
) -> torch.FloatTensor:
"""
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`):
Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification
token.
Returns:
:obj:`torch.FloatTensor`: The summary of the sequence hidden states.
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(
hidden_states[..., :1, :],
hidden_states.shape[-2] - 1,
dtype=torch.long,
)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand(
(-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)
)
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(
-2
) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def unwrap_model(model: torch.nn.Module) -> torch.nn.Module:
"""
Recursively unwraps a model from potential containers (as used in distributed training).
Args:
model (:obj:`torch.nn.Module`): The model to unwrap.
"""
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return unwrap_model(model.module)
else:
return model
def prune_linear_layer(
layer: torch.nn.Linear, index: torch.LongTensor, dim: int = 0
) -> torch.nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(
layer.weight.device
)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
"""
Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
are transposed.
Used to remove heads.
Args:
layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices.
Returns:
:class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(
layer: Union[torch.nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
) -> Union[torch.nn.Linear, Conv1D]:
"""
Prune a Conv1D or linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`): The dimension on which to keep the indices.
Returns:
:obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with
:obj:`requires_grad=True`.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError(f"Can't prune layer of class {layer.__class__}")
def apply_chunking_to_forward(
forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
) -> torch.Tensor:
"""
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.
If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Args:
forward_fn (:obj:`Callable[..., torch.Tensor]`):
The forward function of the model.
chunk_size (:obj:`int`):
The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
chunk_dim (:obj:`int`):
The dimension over which the :obj:`input_tensors` should be chunked.
input_tensors (:obj:`Tuple[torch.Tensor]`):
The input tensors of ``forward_fn`` which will be chunked
Returns:
:obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`.
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
"""
assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
tensor_shape = input_tensors[0].shape[chunk_dim]
assert all(
input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
if num_args_in_forward_chunk_fn != len(input_tensors):
raise ValueError(
f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input "
"tensors are given"
)
if chunk_size > 0:
if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
raise ValueError(
f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
f"size {chunk_size}"
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(
input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors
)
# apply forward fn to every tuple
output_chunks = tuple(
forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)
)
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(*input_tensors)
| 46.076056
| 197
| 0.621385
|
5cd22db48003f7443b23858a0621a32de463ca88
| 4,364
|
py
|
Python
|
deprecated/gen_soap_kmat_deprecated.py
|
BingqingCheng/ASAP
|
a92dd34eaa092dcbe46163e000ebd2ccee22f8ae
|
[
"MIT"
] | 74
|
2020-01-09T10:38:39.000Z
|
2022-03-04T15:09:05.000Z
|
deprecated/gen_soap_kmat_deprecated.py
|
FelixWodaczek/ASAP
|
d34a064cd7e409ad8b5ae0dec4f1c0a621717773
|
[
"MIT"
] | 31
|
2020-01-30T13:15:42.000Z
|
2022-03-03T05:42:51.000Z
|
deprecated/gen_soap_kmat_deprecated.py
|
FelixWodaczek/ASAP
|
d34a064cd7e409ad8b5ae0dec4f1c0a621717773
|
[
"MIT"
] | 14
|
2020-02-23T15:03:31.000Z
|
2022-03-04T15:04:04.000Z
|
#!/usr/bin/python3
"""
python3 gen_soap_kmat.py --fxyz *.xyz --dict *.xyz --prefix $prefix
--rcut $rcut --n $nmax --l $lmax --g $g --periodic True/False --plot True/False
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
from ase.io import read
from dscribe.descriptors import SOAP
from dscribe.kernels import AverageKernel
from asaplib.io import str2bool
def main(fxyz, dictxyz, prefix, soap_rcut, soap_g, soap_n, soap_l, soap_periodic, matrix_plot):
"""
Generate the SOAP kernel matrix.
Parameters
----------
fxyz: string giving location of xyz file
dictxyz: string giving location of xyz file that is used as a dictionary
prefix: string giving the filename prefix
soap_rcut: float giving the cutoff radius, default value is 3.0
soap_g: float giving the atom width
soap_n: int giving the maximum radial label
soap_l: int giving the maximum angular label. Must be less than or equal to 9
soap_periodic: string (True or False) indicating whether the system is periodic
matrix_plot: string (True or False) indicating whether a plot of the kernel matrix
is to be generated
"""
soap_periodic = bool(soap_periodic)
fframes = []
dictframes = []
# read frames
if fxyz != 'none':
fframes = read(fxyz, ':')
nfframes = len(fframes)
print("read xyz file:", fxyz, ", a total of", nfframes, "frames")
# read frames in the dictionary
if dictxyz != 'none':
dictframes = read(dictxyz, ':')
ndictframes = len(dictframes)
print("read xyz file used for a dictionary:", dictxyz, ", a total of",
ndictframes, "frames")
frames = dictframes + fframes
nframes = len(frames)
global_species = []
for frame in frames:
global_species.extend(frame.get_atomic_numbers())
if not soap_periodic:
frame.set_pbc([False, False, False])
global_species = np.unique(global_species)
print("a total of", nframes, "frames, with elements: ", global_species)
if nframes > 1:
# set up the soap descriptors
soap_desc = SOAP(species=global_species, rcut=soap_rcut, nmax=soap_n, lmax=soap_l,
sigma=soap_g, crossover=False, average=True, periodic=soap_periodic)
else:
# if only one frame we compute the kernel matrix (kmat) between the atomic environments
# within this frame
soap_desc = SOAP(species=global_species, rcut=soap_rcut, nmax=soap_n, lmax=soap_l,
sigma=soap_g, crossover=False, average=False, periodic=soap_periodic)
# compute soap fingerprints
fall = soap_desc.create(frames, n_jobs=8)
# compute kmat
fshape = np.shape(fall)
re = AverageKernel(metric="linear")
kNN = re.create(fall.reshape((fshape[0], 1, fshape[1])))
# save
np.savetxt(prefix + "-n" + str(soap_n) + "-l" + str(soap_l) + "-c" + str(soap_rcut) + "-g" + str(soap_g) + ".kmat",
kNN, fmt='%4.8f')
# plot
if matrix_plot:
plt.matshow(kNN)
plt.title('Kernel matrix: ' + prefix)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-fxyz', type=str, required=True, help='Location of xyz file')
parser.add_argument('-fdict', type=str, default='none', help='Location of xyz file '
'that is used for a dictionary')
parser.add_argument('--prefix', type=str, default='ASAP', help='Filename prefix')
parser.add_argument('--rcut', type=float, default=3.0, help='Cutoff radius')
parser.add_argument('--n', type=int, default=6, help='Maximum radial label')
parser.add_argument('--l', type=int, default=6, help='Maximum angular label (<= 9)')
parser.add_argument('--g', type=float, default=0.5, help='Atom width')
parser.add_argument('--periodic', type=str2bool, nargs='?', const=True, default=True,
help='Is the system periodic (True/False)?')
parser.add_argument('--plot', type=str2bool, nargs='?', const=True, default=False,
help='Do you want to plot the kernel matrix (True/False)?')
args = parser.parse_args()
main(args.fxyz, args.fdict, args.prefix, args.rcut, args.g, args.n, args.l, args.periodic, args.plot)
| 39.672727
| 119
| 0.64253
|
effccdfece600e5beffda080862898774b76c397
| 13,489
|
py
|
Python
|
pytorch_sound/trainer.py
|
lunarbridge/pytorch_sound
|
61270221d85fefd5aee4f015caf9a0375575019e
|
[
"BSD-2-Clause"
] | null | null | null |
pytorch_sound/trainer.py
|
lunarbridge/pytorch_sound
|
61270221d85fefd5aee4f015caf9a0375575019e
|
[
"BSD-2-Clause"
] | null | null | null |
pytorch_sound/trainer.py
|
lunarbridge/pytorch_sound
|
61270221d85fefd5aee4f015caf9a0375575019e
|
[
"BSD-2-Clause"
] | null | null | null |
import abc
import glob
import os
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
import torch
import enum
from pathlib import Path
from typing import Tuple, Dict, Any
from tensorboardX import SummaryWriter
from collections import defaultdict
from pytorch_sound.settings import SAMPLE_RATE
from pytorch_sound.utils.commons import get_loadable_checkpoint, log
from pytorch_sound.utils.plots import imshow_to_buf, plot_to_buf
from pytorch_sound.utils.tensor import to_device, to_numpy
# switch matplotlib backend
plt.switch_backend('Agg')
class LogType(enum.Enum):
SCALAR: int = 1
IMAGE: int = 2
ENG: int = 3
AUDIO: int = 4
PLOT: int = 5
TEXT: int = 6
class Trainer:
"""
Generalized training helper class.
This class focuses remove repetitive sources in general training pipeline.
And almost things has similar patterns to train some models, but, in major,
forwarding process is mainly different in most cases.
So, if engineer extends this class as their own cases, he/she just override forward function.
Args:
model: a main model to be saved and to be forwarded
optimizer: optimizer module
train_dataset: dataset on train phase
valid_dataset: dataset on validation phase
max_step: maximum iteration step
valid_max_step: maximum iteration steps on each validation time.
save_interval: save and validate interval (in iteration)
log_interval: log interval (in iteration)
save_dir: base directory to save checkpoints and logs
save_prefix: a prefix to categorize each experiment
grad_clip: scalars to clamp gradients
grad_norm: maximum norm of gradients to be clipped
pretrained_path: specific file path of checkpoint
sr: sampling rate
scheduler: learning rate scheduler
Examples::
class MyTrainer(Trainer):
def forward(self, input: torch.tensor, target: torch.tensor, is_logging: bool):
# forward model
out = self.model(input)
# calc your own loss
loss = calc_loss(out, target)
# build meta for logging
meta = {
'loss': (loss.item(), LogType.SCALAR),
'out': (out[0], LogType.PLOT)
}
return loss, meta
"""
def __init__(self, model: nn.Module, optimizer: torch.optim.Optimizer,
train_dataset, valid_dataset,
max_step: int, valid_max_step: int, save_interval: int, log_interval: int,
save_dir: str, save_prefix: str = 'save',
grad_clip: float = 0.0, grad_norm: float = 0.0,
pretrained_path: str = None, sr: int = None, scheduler: torch.optim.lr_scheduler._LRScheduler = None,
seed: int = None):
# save project info
self.pretrained_path = pretrained_path
# model
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
# log how many parameters in the model
n_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
log('Model {} was loaded. Total {} params.'.format(self.model.__class__.__name__, n_params))
# adopt repeating function on datasets
self.train_dataset = self.repeat(train_dataset)
self.valid_dataset = self.repeat(valid_dataset)
# save parameters
self.step = 0
if sr:
self.sr = sr
else:
self.sr = SAMPLE_RATE
self.max_step = max_step
self.save_interval = save_interval
self.log_interval = log_interval
self.save_dir = save_dir
self.save_prefix = save_prefix
self.grad_clip = grad_clip
self.grad_norm = grad_norm
self.valid_max_step = valid_max_step
# make dirs
self.log_dir = os.path.join(save_dir, 'logs', self.save_prefix)
self.model_dir = os.path.join(save_dir, 'models')
os.makedirs(self.model_dir, exist_ok=True)
os.makedirs(self.log_dir, exist_ok=True)
self.writer = SummaryWriter(log_dir=self.log_dir, flush_secs=10)
# load previous checkpoint
self.load()
# set seed
self.seed = seed
if not self.seed:
self.seed = np.random.randint(np.iinfo(np.int32).max)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
# load pretrained model
# if self.step == 0 and pretrained_path:
# self.load_pretrained_model()
# valid loss
self.best_valid_loss = np.finfo(np.float32).max
self.cur_best_valid_loss = self.best_valid_loss
self.save_valid_loss = np.finfo(np.float32).max
@abc.abstractmethod
def forward(self, *inputs, is_logging: bool = False) -> Tuple[torch.Tensor, Dict]:
"""
:param inputs: Loaded Data Points from Speech Loader
:param is_logging: log or not
:return: Loss Tensor, Log Dictionary
"""
raise NotImplemented
def run(self) -> float:
try:
# training loop
for i in range(self.step + 1, self.max_step + 1):
# update step
self.step = i
# logging
if i % self.save_interval == 1:
log('------------- TRAIN step : %d -------------' % i)
# do training step
self.model.train()
self.train(i)
# save model
if i % self.save_interval == 0:
log('------------- VALID step : %d -------------' % i)
# valid
self.model.eval()
self.validate(i)
# save model checkpoint file
self.save(i)
except KeyboardInterrupt:
log('Train is canceled !!')
return self.best_valid_loss
def clip_grad(self):
if self.grad_clip:
for p in self.model.parameters():
if p.grad is not None:
p.grad = p.grad.clamp(-self.grad_clip, self.grad_clip)
if self.grad_norm:
torch.nn.utils.clip_grad_norm_([p for p in self.model.parameters() if p.requires_grad],
self.grad_norm)
def train(self, step: int) -> torch.Tensor:
# update model
self.optimizer.zero_grad()
# flag for logging
log_flag = step % self.log_interval == 0
# forward model
loss, meta = self.forward(*to_device(next(self.train_dataset)), is_logging=log_flag)
# check loss nan
if loss != loss:
log('{} cur step NAN is occured'.format(step))
return
loss.backward()
self.clip_grad()
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
# logging
if log_flag:
# console logging
self.console_log('train', meta, step)
try:
# tensorboard logging
self.tensorboard_log('train', meta, step)
except OverflowError:
pass
def validate(self, step: int):
loss = 0.
count = 0
stat = defaultdict(float)
for i in range(self.valid_max_step):
# forward model
with torch.no_grad():
batch_loss, meta = self.forward(*to_device(next(self.valid_dataset)), is_logging=True)
loss += batch_loss
for key, (value, log_type) in meta.items():
if log_type == LogType.SCALAR:
stat[key] += value
if i % self.log_interval == 0 or i == self.valid_max_step - 1:
self.console_log('valid', meta, i + 1)
# averaging stat
loss /= self.valid_max_step
for key in stat.keys():
if key == 'loss':
continue
stat[key] = stat[key] / self.valid_max_step
stat['loss'] = loss
# update best valid loss
if loss < self.best_valid_loss:
self.best_valid_loss = loss
# console logging of total stat
msg = 'step {} / total stat'.format(step)
for key, value in sorted(stat.items()):
msg += '\t{}: {:.6f}'.format(key, value)
log(msg)
# tensor board logging of scalar stat
for key, value in stat.items():
self.writer.add_scalar('valid/{}'.format(key), value, global_step=step)
@property
def save_name(self):
if isinstance(self.model, nn.parallel.DataParallel):
module = self.model.module
else:
module = self.model
return self.save_prefix + '/' + module.__class__.__name__
def load(self, load_optim: bool = True):
load_target_path = None
if self.pretrained_path:
log(f'load checkpoint from "{self.pretrained_path}"')
if not Path(self.pretrained_path).exists():
raise FileExistsError(f'pretrained path "{self.pretrained_path}" is not exist.')
load_target_path = self.pretrained_path
else:
# make name
save_name = self.save_name
# save path
save_path = os.path.join(self.model_dir, save_name)
log(f'load latest checkpoint from model save path "{save_path}".')
# get latest file
check_files = glob.glob(os.path.join(save_path, '*'))
if check_files:
# load latest state dict
load_target_path = max(check_files, key=os.path.getctime)
if load_target_path:
state_dict = torch.load(load_target_path)
if 'seed' in state_dict:
self.seed = state_dict['seed']
# load model
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(get_loadable_checkpoint(state_dict['model']))
else:
self.model.load_state_dict(get_loadable_checkpoint(state_dict['model']))
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
if self.scheduler is not None:
self.scheduler.load_state_dict(state_dict['scheduler'])
self.step = state_dict['step']
log('checkpoint \'{}\' is loaded. previous step={}'.format(load_target_path, self.step))
else:
log('No any checkpoint in "{}" and pretrained path is not defined. Loading network skipped.'.format(save_path))
def save(self, step: int):
# state dict
state_dict = get_loadable_checkpoint(self.model.state_dict())
# train
state_dict = {
'step': step,
'model': state_dict,
'optim': self.optimizer.state_dict(),
'pretrained_step': step,
'seed': self.seed
}
if self.scheduler is not None:
state_dict.update({
'scheduler': self.scheduler.state_dict()
})
# save for training
save_name = self.save_name
save_path = os.path.join(self.model_dir, save_name)
os.makedirs(save_path, exist_ok=True)
torch.save(state_dict, os.path.join(save_path, 'step_{:06d}.chkpt'.format(step)))
# save best
if self.best_valid_loss != self.cur_best_valid_loss:
save_path = os.path.join(self.model_dir, save_name + '.best.chkpt')
torch.save(state_dict, save_path)
self.cur_best_valid_loss = self.best_valid_loss
# logging
log('step %d / saved model.' % step)
def load_pretrained_model(self):
assert os.path.exists(self.pretrained_path), 'You must define pretrained path!'
self.model.load_state_dict(get_loadable_checkpoint(torch.load(self.pretrained_path)['model']))
def console_log(self, tag: str, meta: Dict[str, Any], step: int):
# console logging
msg = '{}\t{:06d} it'.format(tag, step)
for key, (value, log_type) in sorted(meta.items()):
if log_type == LogType.SCALAR:
msg += '\t{}: {:.6f}'.format(key, value)
log(msg)
def tensorboard_log(self, tag: str, meta: Dict[str, Any], step: int):
for key, (value, log_type) in meta.items():
if log_type != LogType.SCALAR and type(value) == torch.Tensor:
value = to_numpy(value)
if log_type == LogType.IMAGE:
self.writer.add_image('{}/{}'.format(tag, key), imshow_to_buf(value), global_step=step)
elif log_type == LogType.AUDIO:
self.writer.add_audio('{}/{}'.format(tag, key), value, global_step=step, sample_rate=self.sr)
elif log_type == LogType.SCALAR:
self.writer.add_scalar('{}/{}'.format(tag, key), value, global_step=step)
elif log_type == LogType.PLOT:
self.writer.add_image('{}/{}'.format(tag, key), plot_to_buf(value), global_step=step)
elif log_type == LogType.TEXT:
self.writer.add_text('{}/{}'.format(tag, key), value, global_step=step)
@staticmethod
def repeat(iterable):
while True:
for x in iterable:
yield x
| 35.311518
| 123
| 0.581956
|
090cf28077c12f4fe796d3972039330f125d63d6
| 12,490
|
py
|
Python
|
filebeat/tests/system/test_modules.py
|
ByteInternet/beats
|
fe23344906fbd4f0db3142738071225ae7c64644
|
[
"Apache-2.0"
] | 4
|
2018-05-28T00:45:15.000Z
|
2022-01-10T16:36:40.000Z
|
filebeat/tests/system/test_modules.py
|
ByteInternet/beats
|
fe23344906fbd4f0db3142738071225ae7c64644
|
[
"Apache-2.0"
] | null | null | null |
filebeat/tests/system/test_modules.py
|
ByteInternet/beats
|
fe23344906fbd4f0db3142738071225ae7c64644
|
[
"Apache-2.0"
] | 3
|
2019-10-29T11:33:17.000Z
|
2020-10-16T09:11:00.000Z
|
from filebeat import BaseTest
from beat.beat import INTEGRATION_TESTS
import os
import unittest
import glob
import shutil
import subprocess
from elasticsearch import Elasticsearch
import json
import logging
from parameterized import parameterized
def load_fileset_test_cases():
"""
Creates a list of all modules, filesets and testfiles inside for testing.
To execute tests for only 1 module, set the env variable TESTING_FILEBEAT_MODULES
to the specific module name or a , separated lists of modules.
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
modules_dir = os.path.join(current_dir, "..", "..", "module")
modules = os.getenv("TESTING_FILEBEAT_MODULES")
if modules:
modules = modules.split(",")
else:
modules = os.listdir(modules_dir)
test_cases = []
for module in modules:
path = os.path.join(modules_dir, module)
if not os.path.isdir(path):
continue
for fileset in os.listdir(path):
if not os.path.isdir(os.path.join(path, fileset)):
continue
if not os.path.isfile(os.path.join(path, fileset, "manifest.yml")):
continue
test_files = glob.glob(os.path.join(modules_dir, module,
fileset, "test", "*.log"))
for test_file in test_files:
test_cases.append([module, fileset, test_file])
return test_cases
class Test(BaseTest):
def init(self):
self.elasticsearch_url = self.get_elasticsearch_url()
self.kibana_url = self.get_kibana_url()
print("Using elasticsearch: {}".format(self.elasticsearch_url))
self.es = Elasticsearch([self.elasticsearch_url])
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
self.modules_path = os.path.abspath(self.working_dir +
"/../../../../module")
self.kibana_path = os.path.abspath(self.working_dir +
"/../../../../_meta/kibana")
self.filebeat = os.path.abspath(self.working_dir +
"/../../../../filebeat.test")
self.index_name = "test-filebeat-modules"
@parameterized.expand(load_fileset_test_cases)
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_fileset_file(self, module, fileset, test_file):
self.init()
# generate a minimal configuration
cfgfile = os.path.join(self.working_dir, "filebeat.yml")
self.render_config_template(
template_name="filebeat_modules",
output=cfgfile,
index_name=self.index_name,
elasticsearch_url=self.elasticsearch_url
)
self.run_on_file(
module=module,
fileset=fileset,
test_file=test_file,
cfgfile=cfgfile)
def _test_expected_events(self, module, test_file, res, objects):
with open(test_file + "-expected.json", "r") as f:
expected = json.load(f)
if len(expected) > len(objects):
res = self.es.search(index=self.index_name,
body={"query": {"match_all": {}},
"size": len(expected)})
objects = [o["_source"] for o in res["hits"]["hits"]]
assert len(expected) == res['hits']['total'], "expected {} but got {}".format(
len(expected), res['hits']['total'])
for ev in expected:
found = False
for obj in objects:
if ev["_source"][module] == obj[module]:
found = True
break
assert found, "The following expected object was not found:\n {}\nSearched in: \n{}".format(
ev["_source"][module], objects)
def run_on_file(self, module, fileset, test_file, cfgfile):
print("Testing {}/{} on {}".format(module, fileset, test_file))
try:
self.es.indices.delete(index=self.index_name)
except:
pass
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
cmd = [
self.filebeat, "-systemTest",
"-e", "-d", "*", "-once",
"-c", cfgfile,
"-modules={}".format(module),
"-M", "{module}.*.enabled=false".format(module=module),
"-M", "{module}.{fileset}.enabled=true".format(module=module, fileset=fileset),
"-M", "{module}.{fileset}.var.paths=[{test_file}]".format(
module=module, fileset=fileset, test_file=test_file),
"-M", "*.*.input.close_eof=true",
]
output_path = os.path.join(self.working_dir)
output = open(os.path.join(output_path, "output.log"), "ab")
output.write(" ".join(cmd) + "\n")
subprocess.Popen(cmd,
stdin=None,
stdout=output,
stderr=subprocess.STDOUT,
bufsize=0).wait()
# Make sure index exists
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
res = self.es.search(index=self.index_name,
body={"query": {"match_all": {}}})
objects = [o["_source"] for o in res["hits"]["hits"]]
assert len(objects) > 0
for obj in objects:
assert obj["fileset"]["module"] == module, "expected fileset.module={} but got {}".format(
module, obj["fileset"]["module"])
assert "error" not in obj, "not error expected but got: {}".format(obj)
if (module == "auditd" and fileset == "log") \
or (module == "osquery" and fileset == "result"):
# There are dynamic fields that are not documented.
pass
else:
self.assert_fields_are_documented(obj)
if os.path.exists(test_file + "-expected.json"):
self._test_expected_events(module, test_file, res, objects)
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_input_pipeline_config(self):
"""
Tests that the pipeline configured in the input overwrites
the one from the output.
"""
self.init()
index_name = "filebeat-test-input"
try:
self.es.indices.delete(index=index_name)
except:
pass
self.wait_until(lambda: not self.es.indices.exists(index_name))
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
elasticsearch=dict(
host=self.elasticsearch_url,
pipeline="estest",
index=index_name),
pipeline="test",
setup_template_name=index_name,
setup_template_pattern=index_name + "*",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'a') as file:
file.write("Hello World1\n")
# put pipeline
self.es.transport.perform_request("PUT", "/_ingest/pipeline/test",
body={
"processors": [{
"set": {
"field": "x-pipeline",
"value": "test-pipeline",
}
}]})
filebeat = self.start_beat()
# Wait until the event is in ES
self.wait_until(lambda: self.es.indices.exists(index_name))
def search_objects():
try:
self.es.indices.refresh(index=index_name)
res = self.es.search(index=index_name,
body={"query": {"match_all": {}}})
return [o["_source"] for o in res["hits"]["hits"]]
except:
return []
self.wait_until(lambda: len(search_objects()) > 0, max_timeout=20)
filebeat.check_kill_and_wait()
objects = search_objects()
assert len(objects) == 1
o = objects[0]
assert o["x-pipeline"] == "test-pipeline"
@unittest.skipIf(not INTEGRATION_TESTS or
os.getenv("TESTING_ENVIRONMENT") == "2x",
"integration test not available on 2.x")
def test_ml_setup(self):
""" Test ML are installed in all possible ways """
for setup_flag in (True, False):
for modules_flag in (True, False):
self._run_ml_test(setup_flag, modules_flag)
def _run_ml_test(self, setup_flag, modules_flag):
self.init()
# Clean any previous state
for df in self.es.transport.perform_request("GET", "/_xpack/ml/datafeeds/")["datafeeds"]:
if df["datafeed_id"] == 'filebeat-nginx-access-response_code':
self.es.transport.perform_request("DELETE", "/_xpack/ml/datafeeds/" + df["datafeed_id"])
for df in self.es.transport.perform_request("GET", "/_xpack/ml/anomaly_detectors/")["jobs"]:
if df["job_id"] == 'datafeed-filebeat-nginx-access-response_code':
self.es.transport.perform_request("DELETE", "/_xpack/ml/anomaly_detectors/" + df["job_id"])
shutil.rmtree(os.path.join(self.working_dir, "modules.d"), ignore_errors=True)
# generate a minimal configuration
cfgfile = os.path.join(self.working_dir, "filebeat.yml")
self.render_config_template(
template_name="filebeat_modules",
output=cfgfile,
index_name=self.index_name,
elasticsearch_url=self.elasticsearch_url,
kibana_url=self.kibana_url,
kibana_path=self.kibana_path)
if not modules_flag:
# Enable nginx
os.mkdir(os.path.join(self.working_dir, "modules.d"))
with open(os.path.join(self.working_dir, "modules.d/nginx.yml"), "wb") as nginx:
nginx.write("- module: nginx")
cmd = [
self.filebeat, "-systemTest",
"-e", "-d", "*",
"-c", cfgfile
]
if setup_flag:
cmd += ["--setup"]
else:
cmd += ["setup", "--machine-learning"]
if modules_flag:
cmd += ["--modules=nginx"]
output_path = os.path.join(self.working_dir, "output.log")
output = open(output_path, "ab")
output.write(" ".join(cmd) + "\n")
beat = subprocess.Popen(cmd,
stdin=None,
stdout=output,
stderr=output,
bufsize=0)
# Check result
self.wait_until(lambda: "filebeat-nginx-access-response_code" in
(df["job_id"] for df in self.es.transport.perform_request(
"GET", "/_xpack/ml/anomaly_detectors/")["jobs"]),
max_timeout=30)
self.wait_until(lambda: "datafeed-filebeat-nginx-access-response_code" in
(df["datafeed_id"] for df in self.es.transport.perform_request("GET", "/_xpack/ml/datafeeds/")["datafeeds"]))
beat.kill()
# check if fails during trying to setting it up again
output = open(output_path, "ab")
output.write(" ".join(cmd) + "\n")
beat = subprocess.Popen(cmd,
stdin=None,
stdout=output,
stderr=output,
bufsize=0)
output = open(output_path, "r")
for obj in ["Datafeed", "Job", "Dashboard", "Search", "Visualization"]:
self.wait_log_contains("{obj} already exists".format(obj=obj),
logfile=output_path,
max_timeout=30)
beat.kill()
| 38.549383
| 133
| 0.53787
|
fc4b13778f3cf2b1da5356b54a3909eb06b975ef
| 4,318
|
py
|
Python
|
src/LossFunction.py
|
askarum/CS547project
|
73e8da0d4113f9ec3be374958da9dc165212d722
|
[
"MIT"
] | null | null | null |
src/LossFunction.py
|
askarum/CS547project
|
73e8da0d4113f9ec3be374958da9dc165212d722
|
[
"MIT"
] | 3
|
2021-05-15T03:15:45.000Z
|
2021-05-15T03:16:36.000Z
|
src/LossFunction.py
|
askarum/CS547project
|
73e8da0d4113f9ec3be374958da9dc165212d722
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn
import torch.nn.functional as _F
#Thanks to Askar for finding this.
#TODO: Make sure to cite the underlying paper in our writeup.
#https://pytorch.org/docs/stable/generated/torch.nn.TripletMarginLoss.html
LossFunction = torch.nn.TripletMarginLoss
class SphereNormLoss(torch.nn.modules.loss._Loss):
def __init__(self, r: float = 1.0, r2: float = 2.0, r_strength: float = 1.0,
size_average=None,
reduce=None, reduction: str = 'mean'):
super(SphereNormLoss, self).__init__(size_average, reduce, reduction)
self.r = r #must be positive
self.r2 = r2 #must be positive and greater than r
self.r_strength = r_strength #negative to keep in, positive to keep out
def forward(self, vectors: torch.Tensor) -> torch.Tensor:
#distance_inside will be negative if the vectors are outside the r-sphere
n = torch.norm(vectors,dim=1)
d_in = self.r - n
d_in = d_in * self.r_strength #Doing this first automatically handles which side it should be on.
d_out = n - self.r2
d_out = d_out * self.r_strength
x = torch.nn.functional.relu(d_in) + torch.nn.functional.relu(d_out)
if "mean" == self.reduction:
x = x.mean()
else:
raise NotImplemented("Can't do reduction {}".format(self.reduction))
return x
class SphereTML(torch.nn.modules.loss._Loss):
def __init__(self, margin: float = 1.0, r: float = 1.0, r_strength: float = 1.0,
p: float = 2., eps: float = 1e-6,
swap: bool = False, size_average=None,
reduce=None, reduction: str = 'mean'):
super(SphereTML, self).__init__(size_average, reduce, reduction)
self.margin = margin
self.r = r
self.tml = torch.nn.TripletMarginLoss(margin = self.margin, p=p,eps=eps,swap=swap,size_average=size_average,reduce=reduce,reduction=reduction)
self.snl = SphereNormLoss(r,r_strength, size_average = size_average, reduce = reduce, reduction=reduction)
def forward(self, anchor: torch.Tensor, positive: torch.Tensor, negative: torch.Tensor) -> torch.Tensor:
tml_loss = self.tml(anchor, positive, negative)
a_norm_loss = self.snl(anchor)
p_norm_loss = self.snl(positive)
n_norm_loss = self.snl(negative)
return tml_loss + a_norm_loss + p_norm_loss + n_norm_loss
class NormedTML(torch.nn.TripletMarginLoss):
def __init__(self, *args,**kwargs):
super(NormedTML, self).__init__(*args,**kwargs)
def forward(self, anchor: torch.Tensor, positive: torch.Tensor, negative: torch.Tensor) -> torch.Tensor:
a = torch.nn.functional.normalize(anchor)
p = torch.nn.functional.normalize(positive)
n = torch.nn.functional.normalize(negative)
return super(NormedTML, self).forward(a,p,n)
class TripletAccuracy(torch.nn.Module):
def __init__(self, *args,**kwargs):
super(TripletAccuracy,self).__init__()
self.pairwise = torch.nn.PairwiseDistance(p=2.0)
self.reduction = "sum"
def forward(self, anchor: torch.Tensor, positive: torch.Tensor, negative: torch.Tensor) -> torch.Tensor:
dist_q_p = self.pairwise(anchor, positive)
dist_q_n = self.pairwise(anchor, negative)
differences = torch.lt(dist_q_p, dist_q_n)
#TODO: add an option to use sigmoid and be differentiable.
return differences.sum()
def create_loss(name="default"):
#TODO: Handle additional arguments and pass them to the constructor
if name in [None, "default","TripletMarginLoss","torch.nn.TripletMarginLoss"]:
return torch.nn.TripletMarginLoss(margin=1.0)
elif name in ["sphere","sphere_tml"]:
return SphereTML(margin=1.0)
elif name in ["normed"]:
return NormedTML(margin=1.0)
elif name in ["cosine","triplet_cosine"]:
#Not available in 1.5.0!
return torch.nn.TripletMarginWithDistanceLoss(margin=1.0,distance_function=torch.nn.CosineSimilarity())
#TODO: Add options for other models as we implement them.
raise Exception("No or invalid loss requested : '{}' ".format(name))
| 42.333333
| 150
| 0.649375
|
0dda0edfa26992ab1b5a69e34dd4e8c2dd0e4cd2
| 517
|
py
|
Python
|
src/Pyro4/constants.py
|
wronglink/Pyro4
|
3f78dcb6dc163913b385f22b318b94c9806df839
|
[
"MIT"
] | null | null | null |
src/Pyro4/constants.py
|
wronglink/Pyro4
|
3f78dcb6dc163913b385f22b318b94c9806df839
|
[
"MIT"
] | null | null | null |
src/Pyro4/constants.py
|
wronglink/Pyro4
|
3f78dcb6dc163913b385f22b318b94c9806df839
|
[
"MIT"
] | null | null | null |
"""
Definitions of various hard coded constants.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
# Pyro version
VERSION = "4.35"
# standard object name for the Daemon object
DAEMON_NAME = "Pyro.Daemon"
# standard name for the Name server itself
NAMESERVER_NAME = "Pyro.NameServer"
# standard name for Flame server
FLAME_NAME = "Pyro.Flame"
# wire protocol version. Note that if this gets updated, Pyrolite might need an update too.
PROTOCOL_VERSION = 47
| 24.619048
| 92
| 0.727273
|
d13fd45d5c3c776e634ffd61d4e813dd5bf10b0a
| 260
|
py
|
Python
|
nexus/meta_api/query_extensionner/grammar/__init__.py
|
RobbiNespu/hyperboria
|
7db858386f1a20e8d49bc16f53bfd7f1e4d03f7e
|
[
"Unlicense"
] | 54
|
2021-01-07T03:02:36.000Z
|
2022-03-28T17:19:29.000Z
|
nexus/meta_api/query_extensionner/grammar/__init__.py
|
the-superpirate/hyperboria
|
74776166158d07b199677f9738862e5f1fa54367
|
[
"Unlicense"
] | 10
|
2021-01-08T17:38:59.000Z
|
2022-02-28T14:34:45.000Z
|
nexus/meta_api/query_extensionner/grammar/__init__.py
|
the-superpirate/hyperboria
|
74776166158d07b199677f9738862e5f1fa54367
|
[
"Unlicense"
] | 16
|
2020-12-28T18:31:44.000Z
|
2022-02-22T15:00:53.000Z
|
from .parser import parser
from .tree import OrOperation
from .tree_transformer import (
FieldResolver,
MorphyResolver,
UnknownOperationResolver,
)
__all__ = ['parser', 'FieldResolver', 'MorphyResolver', 'OrOperation', 'UnknownOperationResolver']
| 26
| 98
| 0.765385
|
3227d0139adc9bbb2497a66ac09b994ea1bde168
| 272
|
py
|
Python
|
unique_word_occurences.py
|
krishnakanth-G/python-works
|
1186c40da20ca94be3b84089ffcf8130be3448de
|
[
"MIT"
] | null | null | null |
unique_word_occurences.py
|
krishnakanth-G/python-works
|
1186c40da20ca94be3b84089ffcf8130be3448de
|
[
"MIT"
] | null | null | null |
unique_word_occurences.py
|
krishnakanth-G/python-works
|
1186c40da20ca94be3b84089ffcf8130be3448de
|
[
"MIT"
] | null | null | null |
string = "one two three one one two three"
words = string.split()
unique = []
for word in words:
if word not in unique:
unique.append(word)
for i in range(0,len(unique)):
counts = string.count(unique[i])
print("('",unique[i],"',",counts,")",end =" ")
| 24.727273
| 50
| 0.610294
|
9ef21cc156b03201198a75956c6a75111fb9e90f
| 2,446
|
py
|
Python
|
gcloud/apigw/views/get_task_detail.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | 881
|
2019-03-25T02:45:42.000Z
|
2022-03-30T09:10:49.000Z
|
gcloud/apigw/views/get_task_detail.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | 3,303
|
2019-03-25T04:18:03.000Z
|
2022-03-31T11:52:03.000Z
|
gcloud/apigw/views/get_task_detail.py
|
wkma/bk-sops
|
8fb5609c0c4495c28d588fbafa9d9f5f2976929b
|
[
"Apache-2.0"
] | 395
|
2019-03-25T02:53:36.000Z
|
2022-03-31T08:37:28.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making ่้ฒธๆบไบPaaSๅนณๅฐ็คพๅบ็ (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from cachetools import TTLCache
from django.views.decorators.http import require_GET
from blueapps.account.decorators import login_exempt
from gcloud import err_code
from gcloud.apigw.decorators import mark_request_whether_is_trust
from gcloud.apigw.decorators import project_inject
from gcloud.apigw.utils import bucket_cached, BucketTTLCache, api_bucket_and_key
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.apigw.views.utils import logger
from gcloud.iam_auth.intercept import iam_intercept
from gcloud.iam_auth.view_interceptors.apigw import TaskViewInterceptor
from packages.bkoauth.decorators import apigw_required
@login_exempt
@require_GET
@apigw_required
@mark_request_whether_is_trust
@project_inject
@iam_intercept(TaskViewInterceptor())
@bucket_cached(BucketTTLCache(TTLCache, {"maxsize": 1024, "ttl": 60}), bucket_and_key_func=api_bucket_and_key)
def get_task_detail(request, task_id, project_id):
"""
@summary: ่ทๅไปปๅก่ฏฆ็ปไฟกๆฏ
@param request:
@param task_id:
@param project_id:
@return:
"""
project = request.project
try:
task = TaskFlowInstance.objects.get(id=task_id, project_id=project.id)
except TaskFlowInstance.DoesNotExist:
message = (
"[API] get_task_detail task[id={task_id}] "
"of project[project_id={project_id}, biz_id{biz_id}] does not exist".format(
task_id=task_id, project_id=project.id, biz_id=project.bk_biz_id
)
)
logger.exception(message)
return {"result": False, "message": message, "code": err_code.CONTENT_NOT_EXIST.code}
data = task.get_task_detail()
return {"result": True, "data": data, "code": err_code.SUCCESS.code}
| 42.172414
| 115
| 0.763696
|
d7b4c2dd4fe5034b95d4435cc084f46dc264e1a7
| 1,749
|
py
|
Python
|
test/viz/test_heatmap.py
|
khourhin/sequana
|
c56d4490b7c8edc8a0f63fd60578eb72ed64f1b5
|
[
"BSD-3-Clause"
] | 138
|
2016-07-13T06:24:45.000Z
|
2022-03-28T13:12:03.000Z
|
test/viz/test_heatmap.py
|
khourhin/sequana
|
c56d4490b7c8edc8a0f63fd60578eb72ed64f1b5
|
[
"BSD-3-Clause"
] | 655
|
2016-03-10T17:33:40.000Z
|
2022-03-30T16:10:45.000Z
|
test/viz/test_heatmap.py
|
khourhin/sequana
|
c56d4490b7c8edc8a0f63fd60578eb72ed64f1b5
|
[
"BSD-3-Clause"
] | 39
|
2016-11-04T11:40:58.000Z
|
2022-03-15T08:12:29.000Z
|
from sequana.viz import Heatmap
from sequana import sequana_data
def test_heatmap():
filename = sequana_data("test_heatmap.csv")
import pandas as pd
data = pd.read_csv(filename, skiprows=2, index_col=0)
h = Heatmap(data)
h.plot(cmap='hot')
h.row_method= 'single'
h.col_method= 'single'
# category_cols=[0,0,1,1],
# category_rows=[0,1,2,0,0,1,2,2,2,1])
def test_doc_example():
from sequana.viz import heatmap
df = heatmap.get_heatmap_df()
h = heatmap.Heatmap(df)
h.category_column['A'] = 1
h.category_column['B'] = 1
h.category_column['C'] = 2
h.category_column['D'] = 2
h.category_row[2] = 2
h.category_row[3] = 1
h.category_row[0] = 1
h.category_row[1] = 2
h.plot()
def test_methods_and_metrics():
from sequana.viz import heatmap
df = heatmap.get_heatmap_df()
h = heatmap.Heatmap(df, row_method="average", row_metric="jaccard",
column_metric="jaccard", column_method="average")
h.column_method = "average"
h.column_metric = "jaccard"
h.row_method = "average"
h.row_metric = "jaccard"
h.plot()
def test_misc():
from sequana.viz import heatmap
df = heatmap.get_heatmap_df()
h = heatmap.Heatmap(df)
h.plot(colorbar_position="top left")
h.plot(colorbar_position="right")
try:
h.plot(colorbar_position="left")
assert False
except:
assert True
h.plot(gradient_span="min_to_max_centered")
h.plot(gradient_span="only_max")
h.plot(gradient_span="only_min")
def test_others():
h = Heatmap(None, verbose=True)
h = Heatmap(None, verbose=False)
try:
h = Heatmap(1, verbose=True)
assert False
except:
assert True
| 25.720588
| 71
| 0.643796
|
a89fd375a2fe3143c53fd46cf21acd16ca5ecd55
| 3,874
|
py
|
Python
|
src/gui/icon.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 63
|
2016-01-02T16:28:47.000Z
|
2022-01-19T11:29:51.000Z
|
src/gui/icon.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 12
|
2016-06-12T14:14:15.000Z
|
2020-12-18T16:11:45.000Z
|
src/gui/icon.py
|
Epihaius/panda3dstudio
|
f5c62ca49617cae1aa5aa5b695200027da99e242
|
[
"BSD-3-Clause"
] | 17
|
2016-05-23T00:02:27.000Z
|
2021-04-25T17:48:27.000Z
|
from .base import *
class Icon(Widget):
def __init__(self, parent, icon_id):
self.type = "widget"
self.widget_type = "icon"
self._parent = parent
self._id = icon_id
self.node = parent.node.attach_new_node("icon_widget")
self.image_offset = (0, 0)
self.outer_borders = (0, 0, 0, 0)
self._image = img = self.__create_icon(icon_id)
self._size = self._min_size = (w, h) = img.size
self._sizer = None
self.sizer_cell = None
self.mouse_region = None
self._is_hidden = False
def __create_icon(self, icon_id):
x, y, w, h = Skin.atlas.regions[icon_id]
image = PNMImage(w, h, 4)
image.copy_sub_image(Skin.atlas.image, 0, 0, x, y, w, h)
return image
def destroy(self):
if self.node:
self.node.detach_node()
self.node = None
self._parent = None
self.sizer_cell = None
def set_icon(self, icon_id):
if not icon_id or self._id == icon_id:
return False
self._image = img = self.__create_icon(icon_id)
self._size = (w, h) = img.size
self._id = icon_id
return True
def set_size(self, size, includes_borders=True, is_min=False):
self._size = size
if is_min:
self._min_size = size
return size
def update_images(self, recurse=True, size=None): pass
def get_image(self, state=None, composed=True):
return PNMImage(self._image)
def enable(self, enable=True): pass
class LayeredIcon(Widget):
def __init__(self, parent, icon_ids):
self.type = "widget"
self.widget_type = "layered_icon"
self._parent = parent
self._ids = icon_ids
self._icons_shown = [icon_ids[0]]
self.node = parent.node.attach_new_node("layered_icon_widget")
self.image_offset = (0, 0)
self.outer_borders = (0, 0, 0, 0)
self._icon_images = images = {}
for icon_id in icon_ids:
img = self.__create_icon(icon_id)
images[icon_id] = img
self._image = img = images[icon_ids[0]]
self._size = self._min_size = (w, h) = img.size
self._sizer = None
self.sizer_cell = None
self.mouse_region = None
self._is_hidden = False
def __create_icon(self, icon_id):
x, y, w, h = Skin.atlas.regions[icon_id]
image = PNMImage(w, h, 4)
image.copy_sub_image(Skin.atlas.image, 0, 0, x, y, w, h)
return image
def destroy(self):
if self.node:
self.node.detach_node()
self.node = None
self._parent = None
self.sizer_cell = None
def update(self):
w, h = self._size
self._image = img = PNMImage(w, h, 4)
images = self._icon_images
icons_shown = self._icons_shown
for i_id in self._ids:
if i_id in icons_shown:
img.blend_sub_image(images[i_id], 0, 0, 0, 0)
def show_icon(self, icon_id, show=True, update=False):
if icon_id not in self._ids:
return False
if show:
if icon_id in self._icons_shown:
return False
elif icon_id not in self._icons_shown:
return False
icons_shown = self._icons_shown
icons_shown.append(icon_id) if show else icons_shown.remove(icon_id)
if update:
self.update()
return True
def set_size(self, size, includes_borders=True, is_min=False):
self._size = size
if is_min:
self._min_size = size
return size
def update_images(self, recurse=True, size=None): pass
def get_image(self, state=None, composed=True):
return PNMImage(self._image)
def enable(self, enable=True): pass
| 24.36478
| 76
| 0.581569
|
702f54af93e4b35cb7420af24a69635f81f0b010
| 10,453
|
py
|
Python
|
src/image_builder.py
|
Lokiiiiii/deep-learning-containers
|
f54b733567fd741b12362dc71cf93a72b5da1c82
|
[
"Apache-2.0"
] | 1
|
2021-07-10T14:01:23.000Z
|
2021-07-10T14:01:23.000Z
|
src/image_builder.py
|
Lokiiiiii/deep-learning-containers
|
f54b733567fd741b12362dc71cf93a72b5da1c82
|
[
"Apache-2.0"
] | null | null | null |
src/image_builder.py
|
Lokiiiiii/deep-learning-containers
|
f54b733567fd741b12362dc71cf93a72b5da1c82
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You
may not use this file except in compliance with the License. A copy of
the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
"""
import concurrent.futures
import datetime
import os
from copy import deepcopy
import constants
import utils
from context import Context
from metrics import Metrics
from image import DockerImage
from buildspec import Buildspec
from output import OutputFormatter
from config import build_config
def _find_image_object(images_list, image_name):
"""
Find and return an image object from images_list with a name that matches image_name
:param images_list: <list> List of <DockerImage> objects
:param image_name: <str> Name of image as per buildspec
:return: <DockerImage> Object with image_name as "name" attribute
"""
ret_image_object = None
for image in images_list:
if image.name == image_name:
ret_image_object = image
break
return ret_image_object
# TODO: Abstract away to ImageBuilder class
def image_builder(buildspec):
FORMATTER = OutputFormatter(constants.PADDING)
BUILDSPEC = Buildspec()
BUILDSPEC.load(buildspec)
IMAGES = []
if "huggingface" in str(BUILDSPEC["framework"]):
os.system("echo login into public ECR")
os.system("aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 763104351884.dkr.ecr.us-west-2.amazonaws.com")
for image_name, image_config in BUILDSPEC["images"].items():
ARTIFACTS = deepcopy(BUILDSPEC["context"]) if BUILDSPEC.get("context") else {}
extra_build_args = {}
labels = {}
if image_config.get("version") is not None:
if BUILDSPEC["version"] != image_config.get("version"):
continue
if image_config.get("context") is not None:
ARTIFACTS.update(image_config["context"])
build_context = os.getenv("BUILD_CONTEXT")
image_tag = (
tag_image_with_pr_number(image_config["tag"])
if build_context == "PR"
else image_config["tag"]
)
if not build_config.DISABLE_DATETIME_TAG or build_context != "PR":
image_tag = tag_image_with_datetime(image_tag)
image_repo_uri = (
image_config["repository"]
if build_context == "PR"
else modify_repository_name_for_context(str(image_config["repository"]), build_context)
)
base_image_uri = None
if image_config.get("base_image_name") is not None:
base_image_object = _find_image_object(IMAGES, image_config["base_image_name"])
base_image_uri = base_image_object.ecr_url
if image_config.get("download_artifacts") is not None:
for artifact_name, artifact in image_config.get("download_artifacts").items():
type = artifact["type"]
uri = artifact["URI"]
var = artifact["VAR_IN_DOCKERFILE"]
try:
file_name = utils.download_file(uri, type).strip()
except ValueError:
FORMATTER.print(f"Artifact download failed: {uri} of type {type}.")
ARTIFACTS.update({
f"{artifact_name}": {
"source": f"{os.path.join(os.sep, os.path.abspath(os.getcwd()), file_name)}",
"target": file_name
}
})
extra_build_args[var] = file_name
labels[var] = file_name
labels[f"{var}_URI"] = uri
if str(BUILDSPEC["framework"]).startswith("huggingface"):
if "transformers_version" in image_config:
extra_build_args["TRANSFORMERS_VERSION"] = image_config.get("transformers_version")
else:
raise KeyError(f"HuggingFace buildspec.yml must contain 'transformers_version' field for each image")
if "datasets_version" in image_config:
extra_build_args["DATASETS_VERSION"] = image_config.get("datasets_version")
elif str(image_config["image_type"]) == "training":
raise KeyError(f"HuggingFace buildspec.yml must contain 'datasets_version' field for each image")
ARTIFACTS.update(
{
"dockerfile": {
"source": image_config["docker_file"],
"target": "Dockerfile",
}
}
)
context = Context(ARTIFACTS, f"build/{image_name}.tar.gz", image_config["root"])
if "labels" in image_config:
labels.update(image_config.get("labels"))
"""
Override parameters from parent in child.
"""
info = {
"account_id": str(BUILDSPEC["account_id"]),
"region": str(BUILDSPEC["region"]),
"framework": str(BUILDSPEC["framework"]),
"version": str(BUILDSPEC["version"]),
"root": str(image_config["root"]),
"name": str(image_name),
"device_type": str(image_config["device_type"]),
"python_version": str(image_config["python_version"]),
"image_type": str(image_config["image_type"]),
"image_size_baseline": int(image_config["image_size_baseline"]),
"base_image_uri": base_image_uri,
"labels": labels,
"extra_build_args": extra_build_args
}
image_object = DockerImage(
info=info,
dockerfile=image_config["docker_file"],
repository=image_repo_uri,
tag=image_tag,
to_build=image_config["build"],
context=context,
)
IMAGES.append(image_object)
FORMATTER.banner("DLC")
FORMATTER.title("Status")
THREADS = {}
# In the context of the ThreadPoolExecutor each instance of image.build submitted
# to it is executed concurrently in a separate thread.
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# Standard images must be built before example images
# Example images will use standard images as base
standard_images = [image for image in IMAGES if "example" not in image.name.lower()]
example_images = [image for image in IMAGES if "example" in image.name.lower()]
for image in standard_images:
THREADS[image.name] = executor.submit(image.build)
# the FORMATTER.progress(THREADS) function call also waits until all threads have completed
FORMATTER.progress(THREADS)
for image in example_images:
THREADS[image.name] = executor.submit(image.build)
# the FORMATTER.progress(THREADS) function call also waits until all threads have completed
FORMATTER.progress(THREADS)
FORMATTER.title("Build Logs")
if not os.path.isdir("logs"):
os.makedirs("logs")
for image in IMAGES:
FORMATTER.title(image.name)
FORMATTER.table(image.info.items())
FORMATTER.separator()
FORMATTER.print_lines(image.log)
with open(f"logs/{image.name}", "w") as fp:
fp.write("/n".join(image.log))
image.summary["log"] = f"logs/{image.name}"
FORMATTER.title("Summary")
for image in IMAGES:
FORMATTER.title(image.name)
FORMATTER.table(image.summary.items())
FORMATTER.title("Errors")
is_any_build_failed = False
is_any_build_failed_size_limit = False
for image in IMAGES:
if image.build_status == constants.FAIL:
FORMATTER.title(image.name)
FORMATTER.print_lines(image.log[-10:])
is_any_build_failed = True
else:
if image.build_status == constants.FAIL_IMAGE_SIZE_LIMIT:
is_any_build_failed_size_limit = True
if is_any_build_failed:
raise Exception("Build failed")
else:
if is_any_build_failed_size_limit:
FORMATTER.print("Build failed. Image size limit breached.")
else:
FORMATTER.print("No errors")
FORMATTER.title("Uploading Metrics")
metrics = Metrics(
context=constants.BUILD_CONTEXT,
region=BUILDSPEC["region"],
namespace=constants.METRICS_NAMESPACE,
)
for image in IMAGES:
try:
metrics.push_image_metrics(image)
except Exception as e:
if is_any_build_failed or is_any_build_failed_size_limit:
raise Exception(f"Build failed.{e}")
else:
raise Exception(f"Build passed. {e}")
if is_any_build_failed_size_limit:
raise Exception("Build failed because of file limit")
FORMATTER.separator()
# Set environment variables to be consumed by test jobs
test_trigger_job = utils.get_codebuild_project_name()
utils.set_test_env(
IMAGES,
BUILD_CONTEXT=os.getenv("BUILD_CONTEXT"),
TEST_TRIGGER=test_trigger_job,
)
def tag_image_with_pr_number(image_tag):
pr_number = os.getenv("CODEBUILD_SOURCE_VERSION").replace("/", "-")
return f"{image_tag}-{pr_number}"
def tag_image_with_datetime(image_tag):
datetime_suffix = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
return f"{image_tag}-{datetime_suffix}"
def modify_repository_name_for_context(image_repo_uri, build_context):
repo_uri_values = image_repo_uri.split("/")
repo_name = repo_uri_values[-1]
if build_context == "MAINLINE":
repo_uri_values[-1] = repo_name.replace(
constants.PR_REPO_PREFIX, constants.MAINLINE_REPO_PREFIX
)
elif build_context == "NIGHTLY":
repo_uri_values[-1] = repo_name.replace(
constants.PR_REPO_PREFIX, constants.NIGHTLY_REPO_PREFIX
)
return "/".join(repo_uri_values)
| 36.936396
| 158
| 0.623553
|
34075f0c35d1358a80efd22671b1553e0b415a15
| 650
|
py
|
Python
|
interaction/migrations/0004_auto_20220225_1650.py
|
protwis/Protwis
|
fdcad0a2790721b02c0d12d8de754313714c575e
|
[
"Apache-2.0"
] | null | null | null |
interaction/migrations/0004_auto_20220225_1650.py
|
protwis/Protwis
|
fdcad0a2790721b02c0d12d8de754313714c575e
|
[
"Apache-2.0"
] | null | null | null |
interaction/migrations/0004_auto_20220225_1650.py
|
protwis/Protwis
|
fdcad0a2790721b02c0d12d8de754313714c575e
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.1 on 2022-02-25 15:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('interaction', '0003_auto_20180117_1457'),
]
operations = [
migrations.RemoveField(
model_name='residuefragmentatom',
name='interaction',
),
migrations.RemoveField(
model_name='residuefragmentatom',
name='structureligandpair',
),
migrations.DeleteModel(
name='ProteinLigandInteraction',
),
migrations.DeleteModel(
name='ResidueFragmentAtom',
),
]
| 23.214286
| 51
| 0.587692
|
dc6e0cfac3804c44d3f12e1f88d1982300419fdb
| 17,110
|
py
|
Python
|
tb_rest_client/models/models_pe/__init__.py
|
fargiolas/thingsboard-python-rest-client
|
8c439e27218226b356e8203c2a7f1239278669c0
|
[
"Apache-2.0"
] | null | null | null |
tb_rest_client/models/models_pe/__init__.py
|
fargiolas/thingsboard-python-rest-client
|
8c439e27218226b356e8203c2a7f1239278669c0
|
[
"Apache-2.0"
] | null | null | null |
tb_rest_client/models/models_pe/__init__.py
|
fargiolas/thingsboard-python-rest-client
|
8c439e27218226b356e8203c2a7f1239278669c0
|
[
"Apache-2.0"
] | null | null | null |
from .default_device_profile_transport_configuration import DefaultDeviceProfileTransportConfiguration
from .json_node import JsonNode
from .merged_user_permissions import MergedUserPermissions
from .page_data_edge import PageDataEdge
from .tenant_solution_template_info import TenantSolutionTemplateInfo
from .tenant_info import TenantInfo
from .debug_rule_node_event_filter import DebugRuleNodeEventFilter
from .admin_settings_id import AdminSettingsId
from .entity_data import EntityData
from .page_data_device import PageDataDevice
from .server_security_config import ServerSecurityConfig
from .home_dashboard_info import HomeDashboardInfo
from .login_response import LoginResponse
from .widget_type import WidgetType
from .event_id import EventId
from .scheduler_event_info import SchedulerEventInfo
from .test_sms_request import TestSmsRequest
from .ota_package import OtaPackage
from .group_permission import GroupPermission
from .user import User
from .o_auth2_mobile_info import OAuth2MobileInfo
from .numeric_filter_predicate import NumericFilterPredicate
from .device_profile_alarm import DeviceProfileAlarm
from .ota_package_info import OtaPackageInfo
from .alarm_data import AlarmData
from .entity_id import EntityId
from .event import Event
from .complex_filter_predicate import ComplexFilterPredicate
from .edge_id import EdgeId
from .device_profile_data import DeviceProfileData
from .allowed_permissions_info import AllowedPermissionsInfo
from .page_data_customer import PageDataCustomer
from .device_group_ota_package import DeviceGroupOtaPackage
from .login_white_labeling_params import LoginWhiteLabelingParams
from .sign_up_request import SignUpRequest
from .share_group_request import ShareGroupRequest
from .save_ota_package_info_request import SaveOtaPackageInfoRequest
from .mqtt_device_transport_configuration import MqttDeviceTransportConfiguration
from .page_data_tb_resource_info import PageDataTbResourceInfo
from .home_dashboard import HomeDashboard
from .bulk_import_result_device import BulkImportResultDevice
from .device_search_query_filter import DeviceSearchQueryFilter
from .page_data_device_profile import PageDataDeviceProfile
from .dashboard_info import DashboardInfo
from .byte_buffer import ByteBuffer
from .entity_info import EntityInfo
from .edge import Edge
from .scheduler_event_with_customer_info import SchedulerEventWithCustomerInfo
from .tenant import Tenant
from .entity_relations_query import EntityRelationsQuery
from .sms_provider_configuration import SmsProviderConfiguration
from .entity_relation_info import EntityRelationInfo
from .tenant_id import TenantId
from .filter_predicate_valueboolean import FilterPredicateValueboolean
from .component_descriptor import ComponentDescriptor
from .short_customer_info import ShortCustomerInfo
from .device_profile_info import DeviceProfileInfo
from .duration_alarm_condition_spec import DurationAlarmConditionSpec
from .group_permission_id import GroupPermissionId
from .o_auth2_registration_info import OAuth2RegistrationInfo
from .twilio_sms_provider_configuration import TwilioSmsProviderConfiguration
from .device_profile import DeviceProfile
from .page_data_converter import PageDataConverter
from .lw_m2m_resource_observe import LwM2mResourceObserve
from .default_tenant_profile_configuration import DefaultTenantProfileConfiguration
from .role_id import RoleId
from .check_pre_provisioned_devices_device_profile_provision_configuration import \
CheckPreProvisionedDevicesDeviceProfileProvisionConfiguration
from .page_data_dashboard_info import PageDataDashboardInfo
from .alarm_info import AlarmInfo
from .asset import Asset
from .debug_converter_event_filter import DebugConverterEventFilter
from .o_auth2_client_info import OAuth2ClientInfo
from .page_data_user import PageDataUser
from .boolean_filter_predicate import BooleanFilterPredicate
from .rule_chain_id import RuleChainId
from .admin_settings import AdminSettings
from .o_auth2_client_registration_template import OAuth2ClientRegistrationTemplate
from .rule_node import RuleNode
from .other_configuration import OtherConfiguration
from .device_data import DeviceData
from .page_data_integration import PageDataIntegration
from .proto_transport_payload_configuration import ProtoTransportPayloadConfiguration
from .dashboard_id import DashboardId
from .change_password_request import ChangePasswordRequest
from .tenant_profile_data import TenantProfileData
from .device import Device
from .shared_attributes_setting_snmp_communication_config import SharedAttributesSettingSnmpCommunicationConfig
from .sign_up_self_registration_params import SignUpSelfRegistrationParams
from .report_config import ReportConfig
from .o_auth2_custom_mapper_config import OAuth2CustomMapperConfig
from .update_message import UpdateMessage
from .power_saving_configuration import PowerSavingConfiguration
from .entity_group_filter import EntityGroupFilter
from .ota_package_id import OtaPackageId
from .error_event_filter import ErrorEventFilter
from .jwt_token_pair import JWTTokenPair
from .page_data_short_entity_view import PageDataShortEntityView
from .alarm_schedule import AlarmSchedule
from .user_id import UserId
from .entity_group_list_filter import EntityGroupListFilter
from .integration_id import IntegrationId
from .asset_type_filter import AssetTypeFilter
from .statistics_event_filter import StatisticsEventFilter
from .page_data_entity_group_info import PageDataEntityGroupInfo
from .api_usage_state_filter import ApiUsageStateFilter
from .merged_group_permission_info import MergedGroupPermissionInfo
from .widgets_bundle_id import WidgetsBundleId
from .atomic_integer import AtomicInteger
from .security_settings import SecuritySettings
from .event_filter import EventFilter
from .lw_m2m_object import LwM2mObject
from .edge_search_query import EdgeSearchQuery
from .page_data_scheduler_event_info import PageDataSchedulerEventInfo
from .state_entity_owner_filter import StateEntityOwnerFilter
from .o_auth2_params_info import OAuth2ParamsInfo
from .entity_view_id import EntityViewId
from .alarm_condition_filter_key import AlarmConditionFilterKey
from .merged_group_type_permission_info import MergedGroupTypePermissionInfo
from .device_transport_configuration import DeviceTransportConfiguration
from .filter_predicate_valuedouble import FilterPredicateValuedouble
from .filter_predicate_valuestring import FilterPredicateValuestring
from .page_data_role import PageDataRole
from .alarm_condition_filter import AlarmConditionFilter
from .alarm import Alarm
from .attributes_entity_view import AttributesEntityView
from .login_request import LoginRequest
from .entity_view import EntityView
from .page_data_device_profile_info import PageDataDeviceProfileInfo
from .device_profile_provision_configuration import DeviceProfileProvisionConfiguration
from .specific_time_schedule import SpecificTimeSchedule
from .favicon import Favicon
from .o_auth2_info import OAuth2Info
from .activate_user_request import ActivateUserRequest
from .converter import Converter
from .resource import Resource
from .subscription_usage import SubscriptionUsage
from .default_device_transport_configuration import DefaultDeviceTransportConfiguration
from .entity_group_id import EntityGroupId
from .telemetry_mapping_configuration import TelemetryMappingConfiguration
from .default_device_profile_configuration import DefaultDeviceProfileConfiguration
from .any_time_schedule import AnyTimeSchedule
from .page_data_tenant import PageDataTenant
from .allow_create_new_devices_device_profile_provision_configuration import \
AllowCreateNewDevicesDeviceProfileProvisionConfiguration
from .to_device_rpc_request_snmp_communication_config import ToDeviceRpcRequestSnmpCommunicationConfig
from .default_device_configuration import DefaultDeviceConfiguration
from .widget_type_info import WidgetTypeInfo
from .entity_name_filter import EntityNameFilter
from .tb_resource_id import TbResourceId
from .efento_coap_device_type_configuration import EfentoCoapDeviceTypeConfiguration
from .edge_event import EdgeEvent
from .page_data_rule_chain import PageDataRuleChain
from .customer_id import CustomerId
from .snmp_device_transport_configuration import SnmpDeviceTransportConfiguration
from .short_entity_view import ShortEntityView
from .alarm_rule import AlarmRule
from .key_filter import KeyFilter
from .client_attributes_querying_snmp_communication_config import ClientAttributesQueryingSnmpCommunicationConfig
from .rule_chain_import_result import RuleChainImportResult
from .custom_menu_item import CustomMenuItem
from .role import Role
from .entity_group_info import EntityGroupInfo
from .input_stream import InputStream
from .edge_type_filter import EdgeTypeFilter
from .palette import Palette
from .object_node import ObjectNode
from .device_configuration import DeviceConfiguration
from .entity_subtype import EntitySubtype
from .entity_key import EntityKey
from .integration import Integration
from .device_type_filter import DeviceTypeFilter
from .edge_search_query_filter import EdgeSearchQueryFilter
from .save_device_with_credentials_request import SaveDeviceWithCredentialsRequest
from .bulk_import_result_edge import BulkImportResultEdge
from .lwm2m_device_transport_configuration import Lwm2mDeviceTransportConfiguration
from .palette_settings import PaletteSettings
from .response_entity import ResponseEntity
from .page_data_event import PageDataEvent
from .entity_list_filter import EntityListFilter
from .deferred_result_response_entity import DeferredResultResponseEntity
from .entity_type_filter import EntityTypeFilter
from .custom_time_schedule import CustomTimeSchedule
from .tenant_solution_template_instructions import TenantSolutionTemplateInstructions
from .snmp_communication_config import SnmpCommunicationConfig
from .dashboard import Dashboard
from .rule_chain_meta_data import RuleChainMetaData
from .filter_predicate_valueint import FilterPredicateValueint
from .bulk_import_result_asset import BulkImportResultAsset
from .edge_event_id import EdgeEventId
from .column_mapping import ColumnMapping
from .claim_request import ClaimRequest
from .filter_predicate_valuelong import FilterPredicateValuelong
from .widget_type_id import WidgetTypeId
from .custom_menu import CustomMenu
from .relations_search_parameters import RelationsSearchParameters
from .thingsboard_credentials_expired_response import ThingsboardCredentialsExpiredResponse
from .o_auth2_basic_mapper_config import OAuth2BasicMapperConfig
from .page_data_widgets_bundle import PageDataWidgetsBundle
from .simple_alarm_condition_spec import SimpleAlarmConditionSpec
from .rpc import Rpc
from .group_permission_info import GroupPermissionInfo
from .widgets_bundle import WidgetsBundle
from .rpc_id import RpcId
from .page_data_entity_info import PageDataEntityInfo
from .page_data_alarm_data import PageDataAlarmData
from .default_rule_chain_create_request import DefaultRuleChainCreateRequest
from .transport_payload_type_configuration import TransportPayloadTypeConfiguration
from .entity_group import EntityGroup
from .ts_value import TsValue
from .solution_install_response import SolutionInstallResponse
from .telemetry_querying_snmp_communication_config import TelemetryQueryingSnmpCommunicationConfig
from .device_profile_configuration import DeviceProfileConfiguration
from .page_data_asset import PageDataAsset
from .entity_group_name_filter import EntityGroupNameFilter
from .entity_data_query import EntityDataQuery
from .custom_translation import CustomTranslation
from .entity_count_query import EntityCountQuery
from .contact_basedobject import ContactBasedobject
from .entity_view_search_query import EntityViewSearchQuery
from .bootstrap_configuration import BootstrapConfiguration
from .o_auth2_domain_info import OAuth2DomainInfo
from .bulk_import_request import BulkImportRequest
from .node_connection_info import NodeConnectionInfo
from .entity_data_page_link import EntityDataPageLink
from .dynamic_valueint import DynamicValueint
from .thingsboard_error_response import ThingsboardErrorResponse
from .coap_device_transport_configuration import CoapDeviceTransportConfiguration
from .string_filter_predicate import StringFilterPredicate
from .snmp_mapping import SnmpMapping
from .mqtt_device_profile_transport_configuration import MqttDeviceProfileTransportConfiguration
from .device_credentials import DeviceCredentials
from .telemetry_entity_view import TelemetryEntityView
from .single_entity_filter import SingleEntityFilter
from .entity_view_search_query_filter import EntityViewSearchQueryFilter
from .disabled_device_profile_provision_configuration import DisabledDeviceProfileProvisionConfiguration
from .asset_search_query import AssetSearchQuery
from .entity_filter import EntityFilter
from .debug_integration_event_filter import DebugIntegrationEventFilter
from .entity_view_type_filter import EntityViewTypeFilter
from .page_data_alarm_info import PageDataAlarmInfo
from .page_data_entity_data import PageDataEntityData
from .dynamic_valueboolean import DynamicValueboolean
from .page_data_tenant_info import PageDataTenantInfo
from .page_data_audit_log import PageDataAuditLog
from .tenant_profile_configuration import TenantProfileConfiguration
from .customer import Customer
from .dynamic_valuelong import DynamicValuelong
from .device_profile_transport_configuration import DeviceProfileTransportConfiguration
from .tb_resource_info import TbResourceInfo
from .widget_type_details import WidgetTypeDetails
from .object_attributes import ObjectAttributes
from .relation_entity_type_filter import RelationEntityTypeFilter
from .asset_search_query_filter import AssetSearchQueryFilter
from .reset_password_email_request import ResetPasswordEmailRequest
from .tenant_solution_template_details import TenantSolutionTemplateDetails
from .tenant_profile_id import TenantProfileId
from .tenant_profile import TenantProfile
from .blob_entity_id import BlobEntityId
from .key_filter_predicate import KeyFilterPredicate
from .o_auth2_mapper_config import OAuth2MapperConfig
from .default_coap_device_type_configuration import DefaultCoapDeviceTypeConfiguration
from .snmp_device_profile_transport_configuration import SnmpDeviceProfileTransportConfiguration
from .life_cycle_event_filter import LifeCycleEventFilter
from .blob_entity_with_customer_info import BlobEntityWithCustomerInfo
from .relations_query_filter import RelationsQueryFilter
from .alarm_condition import AlarmCondition
from .self_registration_params import SelfRegistrationParams
from .rule_chain_data import RuleChainData
from .dynamic_valuedouble import DynamicValuedouble
from .dynamic_valuestring import DynamicValuestring
from .lw_m2m_instance import LwM2mInstance
from .repeating_alarm_condition_spec import RepeatingAlarmConditionSpec
from .page_data_tenant_profile import PageDataTenantProfile
from .custom_time_schedule_item import CustomTimeScheduleItem
from .mapping import Mapping
from .page_data_entity_view import PageDataEntityView
from .user_password_policy import UserPasswordPolicy
from .delete_tenant_request import DeleteTenantRequest
from .page_data_edge_event import PageDataEdgeEvent
from .device_id import DeviceId
from .converter_id import ConverterId
from .aws_sns_sms_provider_configuration import AwsSnsSmsProviderConfiguration
from .scheduler_event import SchedulerEvent
from .lwm2m_device_profile_transport_configuration import Lwm2mDeviceProfileTransportConfiguration
from .page_data_blob_entity_with_customer_info import PageDataBlobEntityWithCustomerInfo
from .component_descriptor_id import ComponentDescriptorId
from .entity_relation import EntityRelation
from .o_auth2_client_registration_template_id import OAuth2ClientRegistrationTemplateId
from .alarm_id import AlarmId
from .audit_log import AuditLog
from .scheduler_event_id import SchedulerEventId
from .alarm_data_page_link import AlarmDataPageLink
from .device_search_query import DeviceSearchQuery
from .debug_rule_chain_event_filter import DebugRuleChainEventFilter
from .alarm_data_query import AlarmDataQuery
from .alarm_condition_spec import AlarmConditionSpec
from .coap_device_type_configuration import CoapDeviceTypeConfiguration
from .reset_password_request import ResetPasswordRequest
from .white_labeling_params import WhiteLabelingParams
from .asset_id import AssetId
from .tb_resource import TbResource
from .blob_entity_info import BlobEntityInfo
from .device_credentials_id import DeviceCredentialsId
from .rule_node_id import RuleNodeId
from .rule_chain_connection_info import RuleChainConnectionInfo
from .audit_log_id import AuditLogId
from .device_profile_id import DeviceProfileId
from .coap_device_profile_transport_configuration import CoapDeviceProfileTransportConfiguration
from .json_transport_payload_configuration import JsonTransportPayloadConfiguration
from .entity_data_sort_order import EntityDataSortOrder
from .page_data_ota_package_info import PageDataOtaPackageInfo
from .page_data_contact_basedobject import PageDataContactBasedobject
from .rule_chain import RuleChain
from .entities_by_group_name_filter import EntitiesByGroupNameFilter
| 54.14557
| 113
| 0.907832
|
5ac64234df577f2d75b9f1c40f5e0556db76caca
| 5,135
|
py
|
Python
|
testfixtures/tdatetime.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
testfixtures/tdatetime.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
testfixtures/tdatetime.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2008-2013 Simplistix Ltd
# See license.txt for license details.
from calendar import timegm
from datetime import datetime, timedelta, date
from testfixtures.compat import new_class
@classmethod
def add(cls,*args,**kw):
if 'tzinfo' in kw or len(args)>7:
raise TypeError('Cannot add tzinfo to %s' % cls.__name__)
if args and isinstance(args[0], cls.__bases__[0]):
inst = args[0]
if getattr(inst, 'tzinfo', None):
raise ValueError(
'Cannot add %s with tzinfo set' % inst.__class__.__name__
)
if cls._ct:
inst = cls._ct(inst)
cls._q.append(inst)
else:
cls._q.append(cls(*args,**kw))
@classmethod
def set_(cls,*args,**kw):
if 'tzinfo' in kw or len(args)>7:
raise TypeError('Cannot set tzinfo on %s' % cls.__name__)
if args and isinstance(args[0], cls.__bases__[0]):
inst = args[0]
if getattr(inst, 'tzinfo', None):
raise ValueError(
'Cannot set %s with tzinfo set' % inst.__class__.__name__
)
if cls._q:
cls._q=[]
cls.add(*args,**kw)
def __add__(self,other):
r = super(self.__class__,self).__add__(other)
if self._ct:
r = self._ct(r)
return r
def __new__(cls, *args, **kw):
if cls is cls._cls:
return super(cls, cls).__new__(cls, *args,**kw)
else:
return cls._cls(*args, **kw)
@classmethod
def instantiate(cls):
r = cls._q.pop(0)
if not cls._q:
cls._gap += cls._gap_d
n = r+timedelta(**{cls._gap_t:cls._gap})
if cls._ct:
n = cls._ct(n)
cls._q.append(n)
return r
@classmethod
def now(cls,tz=None):
r = cls._instantiate()
if tz is not None:
if cls._tzta:
r = r - cls._tzta.utcoffset(r)
r = tz.fromutc(r.replace(tzinfo=tz))
return cls._ct(r)
@classmethod
def utcnow(cls):
r = cls._instantiate()
if cls._tzta is not None:
r = r - cls._tzta.utcoffset(r)
return r
def test_factory(n,type,default,args,kw,tz=None,**to_patch):
q = []
to_patch['_q']=q
to_patch['_tzta']=tz
to_patch['add']=add
to_patch['set']=set_
to_patch['__add__']=__add__
if '__new__' not in to_patch:
to_patch['__new__'] = __new__
class_ = new_class(n, (type, ), to_patch)
strict = kw.pop('strict', False)
if strict:
class_._cls = class_
else:
class_._cls = type
if args==(None,):
pass
elif args or kw:
q.append(class_(*args,**kw))
else:
q.append(class_(*default))
return class_
def correct_date_method(self):
return self._date_type(
self.year,
self.month,
self.day
)
@classmethod
def correct_datetime(cls,dt):
return cls._cls(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
)
def test_datetime(*args,**kw):
tz = None
if len(args) > 7:
tz = args[7]
args = args[:7]
else:
tz = kw.pop('tzinfo', None)
if 'delta' in kw:
gap = kw.pop('delta')
gap_delta = 0
else:
gap = 0
gap_delta = 10
delta_type = kw.pop('delta_type','seconds')
date_type = kw.pop('date_type',date)
return test_factory(
'tdatetime',datetime,(2001,1,1,0,0,0),args,kw,tz,
_ct=correct_datetime,
_instantiate=instantiate,
now=now,
utcnow=utcnow,
_gap = gap,
_gap_d = gap_delta,
_gap_t = delta_type,
date = correct_date_method,
_date_type = date_type,
)
test_datetime.__test__ = False
@classmethod
def correct_date(cls,d):
return cls._cls(
d.year,
d.month,
d.day,
)
def test_date(*args,**kw):
if 'delta' in kw:
gap = kw.pop('delta')
gap_delta = 0
else:
gap = 0
gap_delta = 1
delta_type = kw.pop('delta_type','days')
return test_factory(
'tdate',date,(2001,1,1),args,kw,
_ct=correct_date,
today=instantiate,
_gap = gap,
_gap_d = gap_delta,
_gap_t = delta_type,
)
ms = 10**6
def __time_new__(cls, *args, **kw):
if args or kw:
return super(cls, cls).__new__(cls, *args, **kw)
else:
val = cls.instantiate()
t = timegm(val.utctimetuple())
t += (float(val.microsecond)/ms)
return t
test_date.__test__ = False
def test_time(*args,**kw):
if 'tzinfo' in kw or len(args)>7:
raise TypeError("You don't want to use tzinfo with test_time")
if 'delta' in kw:
gap = kw.pop('delta')
gap_delta = 0
else:
gap = 0
gap_delta = 1
delta_type = kw.pop('delta_type','seconds')
return test_factory(
'ttime',datetime,(2001,1,1,0,0,0),args,kw,
_ct=None,
instantiate=instantiate,
_gap = gap,
_gap_d = gap_delta,
_gap_t = delta_type,
__new__ = __time_new__,
)
test_time.__test__ = False
| 24.6875
| 73
| 0.559299
|
09413fe43ff8fd03255899ddd2ff3133ad56eb7f
| 3,637
|
py
|
Python
|
scripts/analog_io_rampup.py
|
maymohan/baxter_examples
|
37dbb35ccb02356de76ea64fda61b5389b0a8f8c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/analog_io_rampup.py
|
maymohan/baxter_examples
|
37dbb35ccb02356de76ea64fda61b5389b0a8f8c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/analog_io_rampup.py
|
maymohan/baxter_examples
|
37dbb35ccb02356de76ea64fda61b5389b0a8f8c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import rospy
import baxter_interface.analog_io as AIO
def test_interface(io_component='torso_fan'):
"""Ramps an Analog component from 0 to 100, then back down to 0."""
rospy.loginfo("Ramping output of Analog IO component: %s", io_component)
b = AIO.AnalogIO(io_component)
rate = rospy.Rate(2)
# start: 0.0
print(b.state())
# ramp up
for i in range(0, 101, 10):
b.set_output(i)
print(i)
rate.sleep()
# max: 100.0
print(b.state())
# ramp down
for i in range(100, -1, -10):
b.set_output(i)
print(i)
rate.sleep()
# (fans off)
b.set_output(0)
def main():
"""RSDK Analog IO Example: Ramp
Ramps the output of an AnalogIO component from 0 to 100,
and then back down again. Demonstrates the use of the
baxter_interface.AnalogIO class.
Run this example and listen to the fan as output changes.
"""
epilog = """
ROS Parameters:
~component_id - name of AnalogIO component to use
Baxter AnalogIO
Note that 'AnalogIO' components are only those that use
the custom ROS Messages baxter_core_msgs/AnalogIOState
and baxter_core_msgs/AnalogOutputCommand.
AnalogIO component names can be found on the Wiki or by
echoing the names field of the analog_io_states topic:
$ rostopic echo -n 1 /robot/analog_io_states/names
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__,
epilog=epilog)
parser.add_argument(
'-c', '--component', dest='component_id', default='torso_fan',
help='name of Analog IO component to use (default:= torso_fan)'
)
args = parser.parse_args(rospy.myargv()[1:])
rospy.init_node('rsdk_analog_io_rampup', anonymous=True)
io_component = rospy.get_param('~component_id', args.component_id)
test_interface(io_component)
if __name__ == '__main__':
main()
| 35.656863
| 77
| 0.708826
|
829b61452447b60eace17e4106202fc37202d7e7
| 73
|
py
|
Python
|
klickbrick/script.py
|
jms/klickbrick
|
af2ae2c6692f91639dd6f9cf67fac8acaa48e115
|
[
"MIT"
] | 1
|
2021-07-16T03:59:22.000Z
|
2021-07-16T03:59:22.000Z
|
klickbrick/script.py
|
jms/klickbrick
|
af2ae2c6692f91639dd6f9cf67fac8acaa48e115
|
[
"MIT"
] | null | null | null |
klickbrick/script.py
|
jms/klickbrick
|
af2ae2c6692f91639dd6f9cf67fac8acaa48e115
|
[
"MIT"
] | null | null | null |
def greeting(name):
return f"Hello {name}"
def onboard():
pass
| 10.428571
| 26
| 0.616438
|
e65ee5885917b60296b659bacf7a0cf5d6ab344b
| 32,158
|
py
|
Python
|
test/functional/fundrawtransaction.py
|
Pakcoin-project/pakcoinold
|
02cb74e55daecaaf2c590753cdc6be245b448dd0
|
[
"MIT"
] | 3
|
2019-02-08T10:36:22.000Z
|
2021-02-17T22:05:25.000Z
|
test/functional/fundrawtransaction.py
|
Pakcoin-project/pakcoinold
|
02cb74e55daecaaf2c590753cdc6be245b448dd0
|
[
"MIT"
] | null | null | null |
test/functional/fundrawtransaction.py
|
Pakcoin-project/pakcoinold
|
02cb74e55daecaaf2c590753cdc6be245b448dd0
|
[
"MIT"
] | 3
|
2019-02-08T10:36:26.000Z
|
2020-04-10T09:35:16.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid pakcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 1*min_relay_tx_fee}) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.601942
| 223
| 0.569812
|
6da27a5bbeffb8372223cf31f66f93b9ab4a8c44
| 753
|
py
|
Python
|
python/20200714/ex03.py
|
kogepanh/class-numeric
|
4fd4cb56818339c6348f9f691c64fb33a09e1b69
|
[
"MIT"
] | null | null | null |
python/20200714/ex03.py
|
kogepanh/class-numeric
|
4fd4cb56818339c6348f9f691c64fb33a09e1b69
|
[
"MIT"
] | null | null | null |
python/20200714/ex03.py
|
kogepanh/class-numeric
|
4fd4cb56818339c6348f9f691c64fb33a09e1b69
|
[
"MIT"
] | null | null | null |
#ex03
def rev(lst):
if lst == []:
return []
else:
return rev(lst[1:]) + [lst[0]]
def deeprev(lst):
if lst == []:
return []
if isinstance(lst[0], list):
lst[0] = deeprev(lst[0])
return deeprev(lst[1:]) + [lst[0]]
def count_items(itemlist):
if len(itemlist) == 0:
return 0
if not isinstance(itemlist, list):
return 1
return len(itemlist[0]) + count_items(itemlist[1:])
# main
lst1 = [0, 1, 2]
print 'deeprev(', lst1, ') =', deeprev(lst1)
lst2 = [[0, 1], [2, 3], [4, 5]]
print 'deeprev(', lst2, ') =', deeprev(lst2)
lst3 = [[[0, 1], [2, 3]], 4, [5, 6]]
print 'deeprev(', lst3, ') =', deeprev(lst3)
lst4 = [0, 1, [2, [3, 4]]]
print 'deeprev(', lst4, ') =', deeprev(lst4)
| 22.147059
| 55
| 0.5166
|
f20d93afee69f62e6c751a48f7934766e7db26bb
| 2,314
|
py
|
Python
|
anthos/replace_ws_vars.py
|
datapipertech/google-anthos
|
48148457e884ba818a3d57bb4a8420907087e588
|
[
"Apache-2.0"
] | 6
|
2021-05-12T08:08:55.000Z
|
2021-12-13T08:09:48.000Z
|
anthos/replace_ws_vars.py
|
datapipertech/google-anthos
|
48148457e884ba818a3d57bb4a8420907087e588
|
[
"Apache-2.0"
] | 15
|
2021-03-29T22:45:48.000Z
|
2021-07-28T20:36:00.000Z
|
anthos/replace_ws_vars.py
|
datapipertech/google-anthos
|
48148457e884ba818a3d57bb4a8420907087e588
|
[
"Apache-2.0"
] | 7
|
2021-04-19T19:48:08.000Z
|
2022-02-22T05:21:42.000Z
|
import json
import ipaddress
import os
from subprocess import Popen
private_subnets = '${private_subnets}'
vsphere_network = '${vsphere_network}'
domain_name = '${domain_name}'
hostname = 'admin-workstation'
subnets = json.loads(private_subnets)
for subnet in subnets:
if subnet['name'] == vsphere_network:
workstation_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[2].compressed
gateway_ip = list(ipaddress.ip_network(subnet['cidr']).hosts())[0].compressed
prefix_length = int(subnet['cidr'].split('/')[1])
network = ipaddress.IPv4Network(subnet['cidr'], strict=True)
netmask = network.netmask
os.system("sed -i 's/__IP_ADDRESS__/{}/g' /root/anthos/terraform.tfvars".format(workstation_ip))
os.system("sed -i 's/__IP_PREFIX_LENGTH__/{}/g' /root/anthos/terraform.tfvars".format(prefix_length))
os.system("sed -i 's/__GATEWAY__/{}/g' /root/anthos/terraform.tfvars".format(gateway_ip))
os.system("sed -i 's/__IP_ADDRESS__/{}/g' /root/anthos/admin-ws-config.yaml".format(workstation_ip))
os.system("sed -i 's/__NETMASK__/{}/g' /root/anthos/admin-ws-config.yaml".format(netmask))
os.system("sed -i 's/__GATEWAY__/{}/g' /root/anthos/admin-ws-config.yaml".format(gateway_ip))
os.system("sed -i 's/__IP_ADDRESS__/{}/g' /root/anthos/deploy_admin_ws.sh".format(workstation_ip))
# Reserve IP in dnsmasq
dnsmasq_conf = open('/etc/dnsmasq.d/dhcp.conf', 'a+')
dnsmasq_conf.write("dhcp-host=00:00:00:00:00:98, {} # {} IP\n".format(workstation_ip, hostname))
dnsmasq_conf.close()
# DNS record for Admin Workstation
etc_hosts = open('/etc/hosts', 'a+')
etc_hosts.write('{}\t{}\t{}.{}\n'.format(workstation_ip, hostname, hostname, domain_name))
etc_hosts.close()
# Restart dnsmasq service
Popen(["systemctl restart dnsmasq"], shell=True, stdin=None, stdout=None, stderr=None, close_fds=True)
# Tell future Terraform Script where the admin workstation is
try:
os.makedirs('/root/anthos/cluster/')
except OSError as e:
if e.errno != errno.EEXIST:
raise
cluster_tf_var = '/root/anthos/cluster/terraform.tfvars'
if os.path.exists(cluster_tf_var):
append_write = 'a'
else:
append_write = 'w'
cluster_tf_vars = open(cluster_tf_var, append_write)
cluster_tf_vars.write('admin_workstation_ip="{}"'.format(workstation_ip))
cluster_tf_vars.close()
| 36.730159
| 102
| 0.725583
|
c432aa6709b8d6d28387ede6e024a95f95c80988
| 1,459
|
py
|
Python
|
tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_config_service_v2_delete_sink_sync.py
|
znowdev/gapic-generator-python
|
18ba7a0933461dfa3ecfccf48f2233d65824144a
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_config_service_v2_delete_sink_sync.py
|
znowdev/gapic-generator-python
|
18ba7a0933461dfa3ecfccf48f2233d65824144a
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_config_service_v2_delete_sink_sync.py
|
znowdev/gapic-generator-python
|
18ba7a0933461dfa3ecfccf48f2233d65824144a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteSink
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_generated_logging_v2_ConfigServiceV2_DeleteSink_sync]
from google.cloud import logging_v2
def sample_delete_sink():
"""Snippet for delete_sink"""
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.DeleteSinkRequest(
sink_name="projects/{project}/sinks/{sink}",
)
# Make the request
response = client.delete_sink(request=request)
# [END logging_generated_logging_v2_ConfigServiceV2_DeleteSink_sync]
| 31.717391
| 85
| 0.757368
|
3c86e794250c54a081f2f417cd8ed72cd3d76bc8
| 28,227
|
py
|
Python
|
filer/models/filemodels.py
|
pbs/django-filer
|
c862a84d4e1d86c14eeb509e341f6a7d39a421bf
|
[
"BSD-3-Clause"
] | 1
|
2015-03-03T15:49:14.000Z
|
2015-03-03T15:49:14.000Z
|
filer/models/filemodels.py
|
pbs/django-filer
|
c862a84d4e1d86c14eeb509e341f6a7d39a421bf
|
[
"BSD-3-Clause"
] | 10
|
2015-04-08T14:16:52.000Z
|
2021-12-15T16:17:57.000Z
|
filer/models/filemodels.py
|
pbs/django-filer
|
c862a84d4e1d86c14eeb509e341f6a7d39a421bf
|
[
"BSD-3-Clause"
] | null | null | null |
#-*- coding: utf-8 -*-
import polymorphic
import hashlib
import os
import filer
import logging
import operator
from django.contrib.auth import models as auth_models
from django.urls import reverse
from django.core.files.base import ContentFile
from django.core.exceptions import ValidationError
from django.db import (models, IntegrityError, transaction)
from django.utils.translation import ugettext_lazy as _
from filer.fields.multistorage_file import MultiStorageFileField
from filer.models import mixins
from filer.utils.cms_roles import *
from filer.utils.files import matching_file_subtypes
from filer import settings as filer_settings
from django.db.models import Count
from django.utils import timezone
from polymorphic.models import PolymorphicModel
from polymorphic.managers import PolymorphicManager
from polymorphic.query import PolymorphicQuerySet
import hashlib
import os
import filer
import logging
logger = logging.getLogger(__name__)
def silence_error_if_missing_file(exception):
"""
Ugly way of checking in an exception describes a 'missing file'.
"""
missing_files_errs = ('no such file', 'does not exist', )
def find_msg_in_error(msg):
return msg in str(exception).lower()
if not any(map(find_msg_in_error, missing_files_errs)):
raise exception
class FileQuerySet(PolymorphicQuerySet):
def readonly(self, user):
Folder = filer.models.foldermodels.Folder
return self.filter(folder__folder_type=Folder.CORE_FOLDER)
def find_duplicates(self, file_obj):
return self.exclude(pk=file_obj.pk).filter(sha1=file_obj.sha1)
def restricted(self, user):
sites = get_sites_without_restriction_perm(user)
if not sites:
return self.none()
return self.filter(
restricted=True,
folder__site__in=sites)
def unrestricted(self, user):
sites = get_sites_without_restriction_perm(user)
if not sites:
return self
return self.exclude(
restricted=True,
folder__site__in=sites)
class FileManager(PolymorphicManager):
queryset_class = FileQuerySet
# Proxy all unknown method calls to the queryset, so that its members are
# directly accessible as PolymorphicModel.objects.*
# Exclude any special functions (__) from this automatic proxying.
def __getattr__(self, name):
if name.startswith('__'):
return super(PolymorphicManager, self).__getattr__(self, name)
return getattr(self.all(), name)
def find_all_duplicates(self):
return {file_data['sha1']: file_data['count']
for file_data in self.get_queryset().values('sha1').annotate(
count=Count('id')).filter(count__gt=1)}
class AliveFileManager(FileManager):
# this is required in order to make sure that other models that are
# related to filer files will get an DoesNotExist exception if the file
# is in trash
use_for_related_fields = True
def get_queryset(self):
return super(AliveFileManager, self).get_queryset().filter(
deleted_at__isnull=True)
class TrashFileManager(FileManager):
def get_queryset(self):
return super(TrashFileManager, self).get_queryset().filter(
deleted_at__isnull=False)
@mixins.trashable
class File(PolymorphicModel,
mixins.IconsMixin):
file_type = 'File'
_icon = "file"
folder = models.ForeignKey('filer.Folder', verbose_name=_('folder'), related_name='all_files',
null=True, blank=True, on_delete=models.deletion.CASCADE)
file = MultiStorageFileField(_('file'), null=True, blank=True, db_index=True, max_length=255)
_file_size = models.IntegerField(_('file size'), null=True, blank=True)
sha1 = models.CharField(_('sha1'), max_length=40, blank=True, default='')
has_all_mandatory_data = models.BooleanField(_('has all mandatory data'), default=False, editable=False)
original_filename = models.CharField(_('original filename'), max_length=255, blank=True, null=True)
name = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_('file name'),
help_text=_('Change the FILE name for an image in the cloud storage'
' system; be sure to include the extension '
'(.jpg or .png, for example) to ensure asset remains '
'valid.'))
title = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_('name'),
help_text=_('Used in the Photo Gallery plugin as a title or name for'
' an image; not displayed via the image plugin.'))
description = models.TextField(
null=True, blank=True, verbose_name=_('description'),
help_text=_('Used in the Photo Gallery plugin as a description;'
' not displayed via the image plugin.'))
owner = models.ForeignKey(auth_models.User,
related_name='owned_%(class)ss', on_delete=models.SET_NULL,
null=True, blank=True, verbose_name=_('owner'))
uploaded_at = models.DateTimeField(_('uploaded at'), auto_now_add=True)
modified_at = models.DateTimeField(_('modified at'), auto_now=True)
is_public = models.BooleanField(
default=filer_settings.FILER_IS_PUBLIC_DEFAULT,
verbose_name=_('Permissions disabled'),
help_text=_('Disable any permission checking for this ' +\
'file. File will be publicly accessible ' +\
'to anyone.'))
restricted = models.BooleanField(
_("Restrict Editors and Writers from being able to edit "
"or delete this asset"), default=False,
help_text=_('If this box is checked, '
'Editors and Writers will still be able to '
'view the asset, add it to a plugin or smart '
'snippet but will not be able to delete or '
'modify the current version of the asset.'))
objects = AliveFileManager()
trash = TrashFileManager()
all_objects = FileManager()
@classmethod
def matches_file_type(cls, iname, ifile, request):
return True # I match all files...
def __init__(self, *args, **kwargs):
super(File, self).__init__(*args, **kwargs)
self._old_is_public = self.is_public
self._old_sha1 = self.sha1
self._force_commit = False
# see method _is_path_changed
self._old_name = self.name
self._current_file_location = self.file.name
self._old_folder_id = self.folder_id
def clean(self):
if self.name:
self.name = self.name.strip()
if "/" in self.name:
raise ValidationError(
"Slashes are not allowed in file names.")
extension = os.path.splitext(self.name)[1]
if not extension:
raise ValidationError(
"File name without extension is not allowed.")
old_file_type = self.get_real_instance_class()
new_file_type = matching_file_subtypes(self.name, None, None)[0]
if not old_file_type is new_file_type:
supported_extensions = getattr(
old_file_type, '_filename_extensions', [])
if supported_extensions:
err_msg = "File name (%s) for this %s should preserve " \
"one of the supported extensions %s" % (
self.name, old_file_type.file_type.lower(),
', '.join(supported_extensions))
else:
err_msg = "Extension %s is not allowed for this file " \
"type." % (extension, )
raise ValidationError(err_msg)
if self.folder:
entries = self.folder.entries_with_names([self.actual_name])
if entries and any(entry.pk != self.pk for entry in entries):
raise ValidationError(
_('Current folder already contains a file named %s') % \
self.actual_name)
def _move_file(self):
"""
Move the file from src to dst.
"""
src_file_name = self.file.name
dst_file_name = self._meta.get_field('file').generate_filename(
self, self.original_filename)
if self.is_public:
src_storage = self.file.storages['private']
dst_storage = self.file.storages['public']
else:
src_storage = self.file.storages['public']
dst_storage = self.file.storages['private']
# delete the thumbnail
# We are toggling the is_public to make sure that easy_thumbnails can
# delete the thumbnails
self.is_public = not self.is_public
self.file.delete_thumbnails()
self.is_public = not self.is_public
# This is needed because most of the remote File Storage backend do not
# open the file.
src_file = src_storage.open(src_file_name)
src_file.open()
self.file = dst_storage.save(dst_file_name,
ContentFile(src_file.read()))
src_file.close()
src_storage.delete(src_file_name)
def _copy_file(self, destination, overwrite=False):
"""
Copies the file to a destination files and returns it.
"""
if overwrite:
# If the destination file already exists default storage backend
# does not overwrite it but generates another filename.
# TODO: Find a way to override this behavior.
raise NotImplementedError
src_file_name = self._current_file_location
storage = self.file.storages['public' if self.is_public else 'private']
if hasattr(storage, 'copy'):
storage.copy(src_file_name, destination)
else:
# This is needed because most of the remote File Storage backend do not
# open the file.
src_file = storage.open(src_file_name)
src_file.open()
destination = storage.save(destination,
ContentFile(src_file.read()))
src_file.close()
self._current_file_location = destination
self.old_name = self.name
self._old_folder_id = getattr(self.folder, 'id', None)
return destination
def generate_sha1(self):
sha = hashlib.sha1()
self.file.seek(0)
sha.update(self.file.read())
self.sha1 = sha.hexdigest()
# to make sure later operations can read the whole file
self.file.seek(0)
def set_restricted_from_folder(self):
if self.folder and self.folder.restricted:
self.restricted = self.folder.restricted
def save(self, *args, **kwargs):
self.set_restricted_from_folder()
# check if this is a subclass of "File" or not and set
# _file_type_plugin_name
if self.__class__ == File:
# what should we do now?
# maybe this has a subclass, but is being saved as a File instance
# anyway. do we need to go check all possible subclasses?
pass
elif issubclass(self.__class__, File):
self._file_type_plugin_name = self.__class__.__name__
# cache the file size
# TODO: only do this if needed (depending on the storage backend the whole file will be downloaded)
try:
self._file_size = self.file.size
except:
pass
if self._old_is_public != self.is_public and self.pk:
self._move_file()
self._old_is_public = self.is_public
# generate SHA1 hash
# TODO: only do this if needed (depending on the storage backend the whole file will be downloaded)
try:
self.generate_sha1()
except (IOError, TypeError, ValueError) as e:
pass
if filer_settings.FOLDER_AFFECTS_URL and self._is_path_changed():
self._force_commit = True
self.update_location_on_storage(*args, **kwargs)
else:
super(File, self).save(*args, **kwargs)
save.alters_data = True
def _is_path_changed(self):
"""
Used to detect if file location on storage should be updated or not.
Since this is used only to check if location should be updated,
the values will be reset after the file is copied in the
destination location on storage.
"""
# check if file name changed
if self._old_name in ('', None):
name_changed = self.name not in ('', None)
else:
name_changed = self._old_name != self.name
folder_changed = self._old_folder_id != getattr(self.folder, 'id', None)
return name_changed or folder_changed
def _delete_thumbnails(self):
source = self.file.get_source_cache()
if source:
self.file.delete_thumbnails()
source.delete()
def update_location_on_storage(self, *args, **kwargs):
old_location = self._current_file_location
# thumbnails might get physically deleted evenif the transaction fails
# though luck... they get re-created anyway...
self._delete_thumbnails()
# check if file content has changed
if self._old_sha1 != self.sha1:
# actual file content needs to be replaced on storage prior to
# filer file instance save
self.file.storage.save(self._current_file_location, self.file)
self._old_sha1 = self.sha1
new_location = self.file.field.upload_to(self, self.actual_name)
storage = self.file.storage
def copy_and_save():
saved_as = self._copy_file(new_location)
assert saved_as == new_location, '%s %s' % (saved_as, new_location)
self.file = saved_as
super(File, self).save(*args, **kwargs)
if self._force_commit:
try:
with transaction.atomic(savepoint=False):
# The manual transaction management here breaks the transaction management
# from django.contrib.admin.options.ModelAdmin.change_view
# This isn't a big problem because the only CRUD operation done afterwards
# is an insertion in django_admin_log. If this method rollbacks the transaction
# then we will have an entry in the admin log describing an action
# that didn't actually finish succesfull.
# This 'hack' can be removed once django adds support for on_commit and
# on_rollback hooks (see: https://code.djangoproject.com/ticket/14051)
copy_and_save()
except:
# delete the file from new_location if the db update failed
if old_location != new_location:
storage.delete(new_location)
raise
else:
# only delete the file on the old_location if all went OK
if old_location != new_location:
storage.delete(old_location)
else:
copy_and_save()
return new_location
def soft_delete(self, *args, **kwargs):
"""
This method works as a default delete action of a filer file.
It will not actually delete the item from the database, instead it
will make it inaccessible for the default manager.
It just `fakes` a deletion by doing the following:
1. sets a deletion time that will be used to distinguish
`alive` and `trashed` filer files.
2. makes a copy of the actual file on storage and saves it to
a trash location on storage. Also tries to ignore if the
actual file is missing from storage.
3. updates only the filer file path in the database (no model
save is done since it tries to bypass the logic defined
in the save method)
4. deletes the file(and all it's thumbnails) from the
original location if no other filer files are referencing
it.
All the metadata of this filer file will remain intact.
"""
deletion_time = kwargs.pop('deletion_time', timezone.now())
# move file to a `trash` location
to_trash = filer.utils.generate_filename.get_trash_path(self)
old_location, new_location = self.file.name, None
try:
new_location = self._copy_file(to_trash)
except Exception as e:
silence_error_if_missing_file(e)
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while trying to copy file: %s to %s.' % (
old_location, to_trash), e)
else:
# if there are no more references to the file on storage delete it
# and all its thumbnails
if not File.objects.exclude(pk=self.pk).filter(
file=old_location, is_public=self.is_public).exists():
self.file.delete(False)
finally:
# even if `copy_file` fails, user is trying to delete this file so
# in worse case scenario this file is not restorable
new_location = new_location or to_trash
File.objects.filter(pk=self.pk).update(
deleted_at=deletion_time, file=new_location)
self.deleted_at = deletion_time
self.file = new_location
def hard_delete(self, *args, **kwargs):
"""
This method deletes the filer file from the database and from storage.
"""
# delete the model before deleting the file from storage
super(File, self).delete(*args, **kwargs)
# delete the actual file from storage and all its thumbnails
# if there are no other filer files referencing it.
if not File.objects.filter(file=self.file.name,
is_public=self.is_public).exists():
self.file.delete(False)
def delete(self, *args, **kwargs):
super(File, self).delete_restorable(*args, **kwargs)
delete.alters_data = True
def _set_valid_name_for_restore(self):
"""
Generates the first available name so this file
can be restored in the folder.
"""
basename, extension = os.path.splitext(self.clean_actual_name)
if self.folder:
files = self.folder.files
elif self.owner:
files = filer.models.tools.get_user_clipboard(self.owner).files.all()
else:
from filer.models.virtualitems import UnfiledImages
files = UnfiledImages().files
existing_file_names = [f.clean_actual_name for f in files]
i = 1
while self.clean_actual_name in existing_file_names:
filename = "%s_%s%s" % (basename, i, extension)
# set actual name
if self.name in ('', None):
self.original_filename = filename
else:
self.name = filename
i += 1
def restore(self):
"""
Restores the file to its folder location.
If there's already an existing file with the same name, it will
generate a new filename.
"""
if self.folder_id:
Folder = filer.models.foldermodels.Folder
try:
self.folder
except Folder.DoesNotExist:
self.folder = Folder.trash.get(id=self.folder_id)
self.folder.restore_path()
# at this point this file's folder should be `alive`
self.folder = filer.models.Folder.objects.get(id=self.folder_id)
old_location, new_location = self.file.name, None
self._set_valid_name_for_restore()
destination = self.file.field.upload_to(self, self.upload_to_name)
try:
new_location = self._copy_file(destination)
except Exception as e:
silence_error_if_missing_file(e)
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while trying to copy file: %s to %s.' % (
old_location, destination), e)
else:
self.file.delete(False)
finally:
new_location = new_location or destination
File.trash.filter(pk=self.pk).update(
deleted_at=None, file=new_location,
name=self.name, original_filename=self.original_filename)
self.deleted_at = None
self.file.name = new_location
# restore to user clipboard
if self.owner_id and not self.folder_id:
clipboard = filer.models.tools.get_user_clipboard(self.owner)
clipboard.append_file(File.objects.get(id=self.id))
@property
def label(self):
if self.name in ['', None]:
text = self.original_filename or 'unnamed file'
else:
text = self.name
text = "%s" % (text,)
return text
def _cmp(self, a, b):
return (a > b) - (a < b)
def __lt__(self, other):
return self._cmp(self.label.lower(), other.label.lower()) < 0
@property
def actual_name(self):
if not self.sha1:
try:
self.generate_sha1()
except (IOError, TypeError, ValueError):
return self.clean_actual_name
try:
folder = self.folder.get_ancestors().first()
root_folder = getattr(folder, 'name', None)
except:
root_folder = None
if root_folder in filer_settings.FILER_NOHASH_ROOTFOLDERS:
name_fmt = '{actual_name}'
else:
name_fmt = '{hashcode}_{actual_name}'
name = name_fmt.format(hashcode=self.sha1[:10],
actual_name=self.clean_actual_name)
return name
@property
def upload_to_name(self):
"""
For normal files this is the actual name with the hash but clipboard
file upload locations are the clean names.
"""
if self.folder:
return self.actual_name
else:
return self.clean_actual_name
@property
def clean_actual_name(self):
"""The name displayed to the user.
Uses self.name if set, otherwise it falls back on self.original_filename.
This property is used for enforcing unique filenames within the same folder.
"""
if self.name in ('', None):
name = "%s" % (self.original_filename,)
else:
name = "%s" % (self.name,)
return name
@property
def pretty_logical_path(self):
its_dir = self.logical_folder
if its_dir.is_root:
directory_path = ''
else:
directory_path = its_dir.pretty_logical_path
full_path = '{}{}{}'.format(directory_path, os.sep, self.actual_name)
return full_path
def __unicode__(self):
try:
name = self.pretty_logical_path
except:
name = self.actual_name
return name
def get_admin_url_path(self):
return reverse(
'admin:%s_%s_change' % (self._meta.app_label,
self._meta.model_name,),
args=(self.pk,)
)
def get_admin_delete_url(self):
return reverse(
'admin:{0}_{1}_delete'.format(self._meta.app_label, self._meta.model_name,),
args=(self.pk,))
@property
def url(self):
"""
to make the model behave like a file field
"""
try:
r = self.file.url
except:
r = ''
return r
@property
def path(self):
try:
return self.file.path
except:
return ""
@property
def size(self):
return self._file_size or 0
@property
def extension(self):
filetype = os.path.splitext(self.file.name)[1].lower()
if len(filetype) > 0:
filetype = filetype[1:]
return filetype
@property
def logical_folder(self):
"""
if this file is not in a specific folder return the Special "unfiled"
Folder object
"""
if not self.folder:
from filer.models.virtualitems import UnfiledImages
return UnfiledImages()
else:
return self.folder
@property
def logical_path(self):
"""
Gets logical path of the folder in the tree structure.
Used to generate breadcrumbs
"""
folder_path = []
if self.folder:
folder_path.extend(self.folder.get_ancestors())
folder_path.append(self.logical_folder)
return folder_path
@property
def duplicates(self):
return list(File.objects.find_duplicates(self))
def is_core(self):
if self.folder:
return self.folder.is_core()
return False
def is_readonly_for_user(self, user):
if self.folder:
return self.folder.is_readonly_for_user(user)
return False
def is_restricted_for_user(self, user):
perm = 'filer.can_restrict_operations'
return (self.restricted and (
not (user.has_perm(perm, self) or user.has_perm(perm)) or
not can_restrict_on_site(user, self.folder.site)))
def can_change_restricted(self, user):
"""
Checks if restriction operation is available for this file.
"""
perm = 'filer.can_restrict_operations'
if not user.has_perm(perm, self) and not user.has_perm(perm):
return False
if not self.folder:
# cannot restrict unfiled files
return False
if not can_restrict_on_site(user, self.folder.site):
return False
if self.folder.restricted == self.restricted == True:
# only parent can be set to True
return False
if self.folder.restricted == self.restricted == False:
return True
if self.folder.restricted == True and self.restricted == False:
raise IntegrityError(
'Re-save folder %s to fix restricted property' % (
self.folder.pretty_logical_path))
return True
def has_change_permission(self, user):
if not self.folder:
# clipboard and unfiled files
return True
if self.is_readonly_for_user(user):
# nobody can change core folder
# leaving these on True based on the fact that core folders are
# displayed as readonly fields
return True
# only admins can change site folders with no site owner
if not self.folder.site and has_admin_role(user):
return True
if self.folder.site:
can_change_file = (user.has_perm('filer.change_file', self) or
user.has_perm('filer.change_file'))
return can_change_file and has_role_on_site(user, self.folder.site)
return False
def has_delete_permission(self, user):
if not self.folder:
# clipboard and unfiled files
return True
# nobody can delete core files
if self.is_readonly_for_user(user):
return False
# only admins can delete site files with no site owner
if not self.folder.site and has_admin_role(user):
return True
if self.folder.site:
can_delete_file = (user.has_perm('filer.delete_file', self) or
user.has_perm('filer.delete_file'))
return can_delete_file and has_role_on_site(user, self.folder.site)
return False
def __str__(self):
return self.__unicode__()
class Meta:
app_label = 'filer'
verbose_name = _('file')
verbose_name_plural = _('files')
| 37.837802
| 108
| 0.609735
|
856e0fc41f3da9e4faff817456e02f16eaf84e02
| 90
|
py
|
Python
|
katas/kyu_7/how_many_are_smaller_than_me.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/kyu_7/how_many_are_smaller_than_me.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/kyu_7/how_many_are_smaller_than_me.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
def smaller(arr):
return [sum(b < a for b in arr[i + 1:]) for i, a in enumerate(arr)]
| 30
| 71
| 0.6
|
dcf9b52ce4e3f3b709da9963eebaeb4cbcf2020b
| 1,805
|
py
|
Python
|
allauth/socialaccount/providers/openstreetmap/views.py
|
christopherpoland/django-allauth
|
df78274669a87f5c1a9147843b353353230d1940
|
[
"MIT"
] | 2
|
2022-01-24T23:30:18.000Z
|
2022-01-26T00:21:22.000Z
|
allauth/socialaccount/providers/openstreetmap/views.py
|
christopherpoland/django-allauth
|
df78274669a87f5c1a9147843b353353230d1940
|
[
"MIT"
] | 8
|
2021-04-08T21:58:27.000Z
|
2022-03-12T00:44:58.000Z
|
allauth/socialaccount/providers/openstreetmap/views.py
|
safwanrahman/django-allauth
|
dac31fdac2c13a97829e6879e09431c735abab4e
|
[
"MIT"
] | 3
|
2020-06-21T20:52:12.000Z
|
2021-07-31T11:07:21.000Z
|
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
from allauth.compat import six
from allauth.socialaccount.providers.oauth.client import OAuth
from allauth.socialaccount.providers.oauth.views import (
OAuthAdapter,
OAuthCallbackView,
OAuthLoginView,
)
from .provider import OpenStreetMapProvider
class OpenStreetMapAPI(OAuth):
url = 'https://www.openstreetmap.org/api/0.6/user/details'
def get_user_info(self):
raw_xml = self.query(self.url)
if not six.PY3:
raw_xml = raw_xml.encode('utf8')
try:
user_element = ElementTree.fromstring(raw_xml).find('user')
user_info = user_element.attrib
user_avatar = user_element.find('img')
if user_avatar is not None:
user_info.update({'avatar': user_avatar.attrib.get('href')})
return user_info
except (ExpatError, KeyError, IndexError):
return None
class OpenStreetMapOAuthAdapter(OAuthAdapter):
provider_id = OpenStreetMapProvider.id
request_token_url = 'https://www.openstreetmap.org/oauth/request_token'
access_token_url = 'https://www.openstreetmap.org/oauth/access_token'
authorize_url = 'https://www.openstreetmap.org/oauth/authorize'
def complete_login(self, request, app, token, response):
client = OpenStreetMapAPI(request, app.client_id, app.secret,
self.request_token_url)
extra_data = client.get_user_info()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth_login = OAuthLoginView.adapter_view(OpenStreetMapOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(OpenStreetMapOAuthAdapter)
| 36.1
| 76
| 0.685873
|
a20cecf843606dcb3aab541a89a20c8dcf81ca9e
| 935
|
py
|
Python
|
Graphs/KruskalAlog.py
|
Saicharan67/Interview-Coding-Questions
|
b28cccd41e380f15b833039d687931570908adfb
|
[
"MIT"
] | 12
|
2021-06-18T16:24:27.000Z
|
2021-11-04T03:30:00.000Z
|
Graphs/KruskalAlog.py
|
Saicharan67/Interview-Coding-Questions
|
b28cccd41e380f15b833039d687931570908adfb
|
[
"MIT"
] | 32
|
2021-10-01T07:15:00.000Z
|
2021-11-05T15:35:53.000Z
|
Graphs/KruskalAlog.py
|
Saicharan67/Interview-Coding-Questions
|
b28cccd41e380f15b833039d687931570908adfb
|
[
"MIT"
] | 21
|
2021-09-29T09:16:31.000Z
|
2021-10-30T10:06:21.000Z
|
def GetParent(node, parent):
if node == parent[node]:
return node
parent[node] = GetParent(parent[node], parent)
return parent[node]
def union(u, v, parent, rank):
u = GetParent(u, parent)
v = GetParent(v, parent)
if rank[u] < rank[v]:
parent[u] = v
elif rank[u] > rank[v]:
parent[v] = u
else:
parent[v] = u
rank[u] += 1
# union
def Kruskal(n, m):
edges = []
for _ in range(m):
x, y, w = map(int, input().split())
edges.append(x, y, w)
edges = sorted(edges, key=lambda x: x[2])
parent = [0]*n
rank = [0]*n
for i in range(n):
parent[i] = i
cost = 0
mst = []
for x, y, w in edges:
if(GetParent(x, parent) != GetParent(y, parent)):
cost += w
mst.append([x, y])
union(x, y, parent, rank)
for i, j in mst:
print(i, '-', j)
return cost
| 18.7
| 57
| 0.48877
|
92858c527c1a133eed23663de4dbb5004052fbbd
| 1,240
|
py
|
Python
|
base/configs/custom.py
|
titorenko/compartmental-model
|
fe022ef754a2b07351db8f48e5b0fdfdbbcebdea
|
[
"MIT"
] | null | null | null |
base/configs/custom.py
|
titorenko/compartmental-model
|
fe022ef754a2b07351db8f48e5b0fdfdbbcebdea
|
[
"MIT"
] | null | null | null |
base/configs/custom.py
|
titorenko/compartmental-model
|
fe022ef754a2b07351db8f48e5b0fdfdbbcebdea
|
[
"MIT"
] | null | null | null |
from base.initialise_parameters import preparePopulationFrame, params
# camp
camp = 'Moria'
population_frame, population = preparePopulationFrame(camp)
# from github issue
# if not used, set timings to e.g. [0,0] or any other interval of 0 length or outside caluclated window
control_dict = dict( # contains our 6 different control options. Can choose any combination of these 6. Suggest limit to all occuring at similar times
# 1
# if True, reduces transmission rate by params.better_hygiene
better_hygiene = dict(value = 0.7,
timing = [0,200]),
ICU_capacity = dict(value = 6/population),
# 4
# move symptomatic cases off site
remove_symptomatic = dict(rate = 50/population, # people per day
timing = [30,90]),
# 5
# partially separate low and high risk
# (for now) assumed that if do this, do for entire course of epidemic
shielding = dict(used= False),
# 6
# move uninfected high risk people off site
remove_high_risk = dict(rate = 50/population, # people per day
n_categories_removed = 2, # remove oldest n categories
timing = [0,12])
)
| 35.428571
| 150
| 0.637903
|
cf7b1209097cecdc280f18cbfdf6fd6fd5b0eaac
| 981
|
py
|
Python
|
services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/dependencies.py
|
elisabettai/osparc-simcore
|
ad7b6e05111b50fe95e49306a992170490a7247f
|
[
"MIT"
] | null | null | null |
services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/dependencies.py
|
elisabettai/osparc-simcore
|
ad7b6e05111b50fe95e49306a992170490a7247f
|
[
"MIT"
] | 55
|
2018-05-15T09:47:00.000Z
|
2022-03-31T06:56:50.000Z
|
services/dynamic-sidecar/src/simcore_service_dynamic_sidecar/core/dependencies.py
|
odeimaiz/osparc-simcore
|
71c2fc58dcfe067487dcd75cb70298a4d6237e97
|
[
"MIT"
] | 1
|
2020-04-22T15:06:58.000Z
|
2020-04-22T15:06:58.000Z
|
from fastapi import Depends, FastAPI, Request
from fastapi.datastructures import State
from ..models.domains.shared_store import SharedStore
from ..models.schemas.application_health import ApplicationHealth
from .rabbitmq import RabbitMQ
from .settings import DynamicSidecarSettings
def get_application(request: Request) -> FastAPI:
return request.app
def get_app_state(request: Request) -> State:
return request.app.state
def get_application_health(
app_state: State = Depends(get_app_state),
) -> ApplicationHealth:
return app_state.application_health # type: ignore
def get_settings(app_state: State = Depends(get_app_state)) -> DynamicSidecarSettings:
return app_state.settings # type: ignore
def get_shared_store(app_state: State = Depends(get_app_state)) -> SharedStore:
return app_state.shared_store # type: ignore
def get_rabbitmq(app_state: State = Depends(get_app_state)) -> RabbitMQ:
return app_state.rabbitmq # type: ignore
| 28.852941
| 86
| 0.783894
|
36c672f6786c4de09170b8a5229faf7a70b3d09e
| 6,293
|
py
|
Python
|
hlso/web/callbacks.py
|
holtgrewe/clsify
|
ad5efecd1d0114898e10498434bded32840d9a4b
|
[
"MIT"
] | null | null | null |
hlso/web/callbacks.py
|
holtgrewe/clsify
|
ad5efecd1d0114898e10498434bded32840d9a4b
|
[
"MIT"
] | 4
|
2019-12-26T22:28:06.000Z
|
2020-01-16T21:37:58.000Z
|
hlso/web/callbacks.py
|
holtgrewe/clsify
|
ad5efecd1d0114898e10498434bded32840d9a4b
|
[
"MIT"
] | null | null | null |
"""Callback code."""
import base64
import json
import os
import tempfile
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from logzero import logger
import pandas as pd
from ..conversion import convert_seqs
from ..export import write_excel
from ..workflow import blast_and_haplotype_many, results_to_data_frames
from ..phylo import phylo_analysis
from .settings import FILE_NAME_TO_SAMPLE_NAME, SAMPLE_REGEX
from . import ui
def register_upload(app):
@app.callback(
Output("hidden-data", "children"),
[Input("upload-data", "contents")],
[State("hidden-data", "children"), State("upload-data", "filename")],
)
def data_uploaded(list_of_contents, hidden_data, list_of_names):
if list_of_contents:
with tempfile.TemporaryDirectory() as tmpdir:
paths_reads = []
for content, name in zip(list_of_contents, list_of_names):
paths_reads.append(os.path.join(tmpdir, name))
with open(paths_reads[-1], "wb") as tmp_file:
logger.info("Writing to %s", paths_reads[-1])
_, content = content.split(",", 1)
tmp_file.write(base64.b64decode(content))
seq_files = convert_seqs(paths_reads, tmpdir, FILE_NAME_TO_SAMPLE_NAME)
results = blast_and_haplotype_many(seq_files)
df_summary, df_blast, df_haplotyping = results_to_data_frames(results, SAMPLE_REGEX)
row_select = (df_summary.orig_sequence != "-") & (df_summary.region != "-")
columns = ["query", "region", "orig_sequence"]
phylo_result = phylo_analysis(df_summary[row_select][columns])
return json.dumps(
{
"summary": df_summary.to_dict(),
"blast": df_blast.to_dict(),
"haplotyping": df_haplotyping.to_dict(),
"phylo": phylo_result,
}
)
def load_hidden_data(hidden_data):
raw_data = json.loads(hidden_data)
def decode(data, key):
if key in ("summary", "blast", "haplotyping"):
return pd.DataFrame.from_dict(data)
else:
return data
return {key: decode(raw_data[key], key) for key in raw_data}
def register_computation_complete(app):
@app.callback(Output("page-content", "children"), [Input("hidden-data", "children")])
def computation_complete(hidden_data):
if not hidden_data:
return ui.render_page_content_empty_children()
else:
data = load_hidden_data(hidden_data)
mime = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
mime = "application/octet-stream"
with tempfile.NamedTemporaryFile() as tmpf:
# df_summary, df_blast, df_haplotyping, path):
write_excel(data["summary"], data["blast"], data["haplotyping"], tmpf.name)
tmpf.seek(0)
xlsx = base64.b64encode(tmpf.read()).decode("utf-8")
return [
html.P(
children=[
html.A(
children=[
html.I(className="fas fa-file-excel ml-2 mr-2"),
"Download XLSX",
],
# href="data:text/html,<script>alert('hi');</script>",
download="hlso_result.xlsx",
target="_blank",
href="data:%s;base64,%s" % (mime, xlsx),
)
]
),
dbc.Tabs(
children=[
dbc.Tab(ui.render_tab_summary(data), label="Summary", id="tab-summary"),
dbc.Tab(ui.render_tab_blast(data), label="BLAST", id="tab-blast"),
dbc.Tab(
ui.render_tab_haplotyping(data),
label="Haplotyping",
id="tab-haplotyping",
),
dbc.Tab(
ui.render_tab_dendrograms(data),
label="Dendrograms",
id="tab-dendgrograms",
),
]
),
]
def register_row_clicks(app):
@app.callback(
Output("blast-current-match", "children"),
[Input("hidden-data", "children"), Input("blast-table", "selected_row_ids")],
)
def update_haplotype_match(hidden_data, selected_row_ids):
# logger.info("Selected %s from %s", selected_row_ids, hidden_data)
if selected_row_ids and hidden_data:
selected_row_ids = list(map(str, selected_row_ids))[0]
data = load_hidden_data(hidden_data)
df_blast = data["blast"]
alignment = df_blast.loc[selected_row_ids].alignment
ncbi_tpl = "%s?DATABASE=nt&PROGRAM=blastn&MEGABLAST=on&QUERY=>%s%%0A%s"
ncbi_url = ncbi_tpl % (
"https://blast.ncbi.nlm.nih.gov/Blast.cgi",
df_blast.loc[selected_row_ids]["query"],
df_blast.loc[selected_row_ids].orig_sequence,
)
return [
html.P(
children=[
html.Div(
children=[
html.A(
children=[
html.I(className="fas fa-external-link-alt"),
" RunNCBI BLAST for this sequence",
],
href=ncbi_url,
)
],
className="mt-3",
)
]
),
dcc.Markdown("```text\n%s\n```" % alignment, className="mt-3"),
]
return [html.P(["no match selected yet"])]
| 40.6
| 100
| 0.505959
|
c0a9e8cca691fd9a9993816af7e07d8c83bbd71a
| 829
|
py
|
Python
|
settings/local_template.py
|
evertrol/det
|
5d397010bc9a608dcb38c176bb3f89e4f17ab272
|
[
"MIT"
] | null | null | null |
settings/local_template.py
|
evertrol/det
|
5d397010bc9a608dcb38c176bb3f89e4f17ab272
|
[
"MIT"
] | null | null | null |
settings/local_template.py
|
evertrol/det
|
5d397010bc9a608dcb38c176bb3f89e4f17ab272
|
[
"MIT"
] | null | null | null |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
# SECURITY WARNING: keep the secret key used in production secret!
# Use for example the command line command below to generatea a secret key
# $ python -c 'from django.core.management import utils; print(utils.get_random_secret_key())'
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
# Locate the database in a 'database' directory *outside* the
# project directory. The 'database' directory needs to be
# created first by the user.
'NAME': BASE_DIR / '..' / 'database' / 'db.sqlite3',
}
}
| 30.703704
| 95
| 0.691194
|
3fcffddc8ab8002248a2e6632d9865088a173714
| 2,278
|
py
|
Python
|
checker.py
|
nwtnni/java-autograder
|
acad5135fb668eaa218a314c02274be76a1911e8
|
[
"MIT"
] | 2
|
2017-09-27T12:05:56.000Z
|
2017-09-27T18:36:19.000Z
|
checker.py
|
nwtnni/submission-checker
|
acad5135fb668eaa218a314c02274be76a1911e8
|
[
"MIT"
] | null | null | null |
checker.py
|
nwtnni/submission-checker
|
acad5135fb668eaa218a314c02274be76a1911e8
|
[
"MIT"
] | null | null | null |
from util import *
from functools import reduce
ALLOWED_BY_DEFAULT = ["txt", "java", "pdf", "classpath", "jpg", "png", "DS_Store", "ctr"]
class Checker:
_REQUIRE_DIR = path("whitelists")
def __init__(self, assignment):
with open(join(Checker._REQUIRE_DIR, add_ext(assignment, ".txt")), "r") as f:
self.all = [line.strip() for line in f]
self.required = [line.split()[0] for line in self.all if (line != "" and line[0] != "*")]
def check_required(self):
success, missing = [], []
for req in self.required:
found = exists(req) or (req.endswith(".txt") and exists(req[:-4] + ".pdf"))
success.append(req) if found else missing.append(req)
return (success, missing)
def check_extra(self, path):
extra = []
for root, dirs, files in walk(path):
for name in files:
absolute = join(root, name)
relative = rel_path(absolute, path)
not_req = relative not in self.required
not_def = relative.rpartition(".")[2] not in ALLOWED_BY_DEFAULT
if not_req and not_def:
extra.append(relative)
return extra
def check(self, root):
previous = cwd(root)
success, missing = self.check_required()
extra = self.check_extra(root)
cwd(previous)
if len(success) > 0:
res = "We scanned your submission and found the following required files:\n"
res = res + arr_to_str(success) + "\n"
else:
res = ""
if len(missing) + len(extra) == 0:
return res + "\nYour submission looks good to go!\n"
if len(missing) > 0:
res = res + "Oops! Looks like you're missing some files:\n"
res = res + arr_to_str(missing) + "\n"
if len(extra) > 0:
res = res + "We've found some extraneous files. Please remove these when you resubmit:\n"
res = res + arr_to_str(extra) + "\n"
res = res + "If you think there's an issue with our script, please respond to this email.\n"
res = res + "For reference, here's the directory structure we're looking for:\n"
return res + arr_to_str(self.all)
| 37.966667
| 101
| 0.565847
|
d3c5b2007a1146b84250fb0fd62ee9ab462a8de6
| 2,405
|
py
|
Python
|
source/modules/synt/variable_algorithms.py
|
SyntLang/SyntPy
|
fdbfe4c4083193d430cbdf5a0968556ba9f72f0f
|
[
"MIT"
] | null | null | null |
source/modules/synt/variable_algorithms.py
|
SyntLang/SyntPy
|
fdbfe4c4083193d430cbdf5a0968556ba9f72f0f
|
[
"MIT"
] | null | null | null |
source/modules/synt/variable_algorithms.py
|
SyntLang/SyntPy
|
fdbfe4c4083193d430cbdf5a0968556ba9f72f0f
|
[
"MIT"
] | null | null | null |
# Variable Algorithms
# meta
def meta(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
# get input data
meta_name = args[0] if len(args) > 0 else None
meta_value = args[1] if len(args) > 1 else None
# validate input data
if meta_name is None:
self.throw("Meta name not found")
if meta_value is None:
self.throw(f"Meta value not found: {meta_name}")
# set meta data
self.meta.update({meta_name : meta_value})
# var
def var(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
# variable data
variable_data = {
"type" : args[0] if len(args) > 0 else None,
"name" : args[1] if len(args) > 1 else None,
"value" : args[2] if len(args) > 2 else ""
}
# insert variable to self storage
self.create_variable(variable_data)
# alg
def alg(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
# get algorithm data
algorithm_data = {
"name": args[0] if len(args) > 0 else None,
"args_name": args[1] if len(args) > 1 else None,
"data": args[2] if len(args) > 2 else []
}
# throw error if algorithm data is not defined
if algorithm_data["name"] is None:
self.throw("Algorithm data not found: name")
# append algorithm data to self
self.script_algorithms.update({
algorithm_data["name"] : {
"args_name": algorithm_data["args_name"],
"data": algorithm_data["data"]
}
})
# result
def result(self, *args):
# check if run_status is run
if self.run_status == "run":
pass
else:
return
# get result value and variable name
result_value = args[0] if len(args) > 0 else ""
result_variable_name = self.algorithm_output_variable_name_list[-1] if len(self.algorithm_output_variable_name_list) > 0 else None
# validate result data
if result_variable_name is None:
self.throw("Result variable name not found")
return
if result_variable_name not in self.variables:
self.throw(f"Variable does not exist: {result_variable_name}")
return
# get result data
result_variable_data = self.variables[result_variable_name]
# set result data
self.update_variable({
"name": result_variable_name,
"type": result_variable_data["type"],
"value": result_value
})
del self.algorithm_output_variable_name_list[-1]
| 24.05
| 132
| 0.669023
|
a154f3539c34b3b8e540c5840fec5ff9441ad356
| 10,169
|
py
|
Python
|
mps_database/tools/mps_checkout.py
|
slaclab/mps_database
|
023ed9bb3b333e382cc612f816c3f4b295b66a4c
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mps_database/tools/mps_checkout.py
|
slaclab/mps_database
|
023ed9bb3b333e382cc612f816c3f4b295b66a4c
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2017-07-07T21:31:59.000Z
|
2017-07-07T21:31:59.000Z
|
mps_database/tools/mps_checkout.py
|
slaclab/mps_database
|
023ed9bb3b333e382cc612f816c3f4b295b66a4c
|
[
"BSD-3-Clause-LBNL"
] | 4
|
2017-07-07T20:10:54.000Z
|
2020-12-13T00:03:37.000Z
|
#This script will check that all thresholds for an app id are corectly functioning
#The user will then check that the screen shows the expected ouput
#
import sys
import os
from datetime import datetime, date
import time
import argparse
import epics as e
import json
def process_args(argv):
parser=argparse.ArgumentParser(description='This script is used to verify the MPS system set points')
parser.add_argument('-AppID', required=True, type = int, help = 'The MPS app id you are checking out')
parser.add_argument('-SrcFileLoc', required=True, type = str, help='The directory containing the checkout JSON files')
parser.add_argument('-LogFileLoc', required=True, type = str, help='The directory where the log files will be written to')
parser.add_argument('-UserName', required=True, type = str, help='Your username')
parser.add_argument('-FirstName',required=True, type = str)
parser.add_argument('-LastName',required=True, type = str)
parser.add_argument('-Auto',required=False, type = str, choices=['True','False'])
parser=parser.parse_args()
return parser
def query_yes_no(question, bypass):
if(bypass):
return True
valid = {"yes": True, "y": True, "no": False, "n": False}
prompt = " [y/n] "
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
def tell_user_fault():
print("Thanks you for confirming this problem has been logged")
def enable_thresholds(thresholdCount, device, table, mode):
for threshold in range(thresholdCount-1, -1, -1):
e.caput(device+'_T'+str(threshold)+'_'+table+'_EN',mode)
def set_thresholds_safe(thresholdCount, device, safeMargin, table):
for threshold in range(thresholdCount-1, -1, -1):
currentValue = e.caget(device)
safeSetPoint = (safeMargin+10*threshold)*abs(currentValue)
e.caput(device+'_T'+str(threshold)+'_'+table,safeSetPoint)
def test_threshold(thresholdCount, safeMargin, testMargin, device, table, logDict ,skipUser):
for threshold in range(thresholdCount-1, -1, -1):
currentValue = e.caget(device)
safeSetPoint = (safeMargin+10*threshold)*abs(currentValue)
testSetPoint = (testMargin+10*threshold)*abs(currentValue)
print(device+'_T'+str(threshold)+'_'+table)
#test High set point
e.caput(device+'_T'+str(threshold)+'_'+table,-testSetPoint, wait=True)
time.sleep(2)
thresholdState = e.caget(device+'_T'+str(threshold)+'_SCMPSC', use_monitor=False)
logicState = e.caget(device+'_LINAC_LOGIC', use_monitor=False)
if((query_yes_no("Is "+str(device)+' threshold '+str(threshold)+' in state IS_EXCEDED',skipUser)) and thresholdState):
logDict['threshold'+table+str(threshold)] = [1,1,1]
elif(not(thresholdState)):
tell_user_fault()
logDict['threshold'+table+str(threshold)] = [0,1,1]
else:
if(query_yes_no('Are you looking at '+device+'_T'+str(threshold),skipUser)):
tell_user_fault()
logDict['threshold'+table+str(threshold)] = [1,1,0]
else:
if(query_yes_no("Confirm "+str(device)+' threshold '+str(threshold)+' in state IS_EXCEDED',skipUser)):
logDict['threshold'+table+str(threshold)] = [1,1,1]
else:
tell_user_fault()
logDict['threshold'+table+str(threshold)] = [1,1,0]
if((query_yes_no("Is "+device+'_LINAC_LOGIC in state: B'+ str(threshold),skipUser)) and ((thresholdCount-threshold)==logicState)):
logDict['logic'+table+str(threshold)] = [1,1,1]
elif(not((thresholdCount-threshold)==logicState)):
tell_user_fault()
logDict['logic'+table+str(threshold)] = [0,1,1]
else:
if(query_yes_no('Are you looking at '+device+'_LINAC_LOGIC',skipUser)):
tell_user_fault()
logDict['logic'+table+str(threshold)] = [1,1,0]
else:
if(query_yes_no("Confirm "+device+'_LINAC_LOGIC is in state: B'+ str(threshold),skipUser)):
logDict['logic'+table+str(threshold)] = [1,1,1]
else:
tell_user_fault()
logDict['logic'+table+str(threshold)] = [1,1,0]
def main(argv):
controls = process_args(argv)
if(controls.Auto == 'True'):
skip = True
else:
skip = False
with open(controls.SrcFileLoc+ "/" + "App"+str(controls.AppID) + "_checkout.json") as json_file:
appInfo = json.load(json_file)
checkOutLog = {"UserInfo":{'DateTime':datetime.now().strftime("%d/%m/%Y %H:%M:%S"),'UserName':controls.UserName, 'FirstName': controls.FirstName, 'LastName': controls.LastName},
"AppInfo": {'AppID': controls.AppID, 'devices':appInfo['devices']}}
#add software and firmware version to App info
#add appid related things enable the whole device to chekcout and log
#each item checked has a list [value of bool, computer check, person check] 1 is considered normal 0 is considered fault
print(checkOutLog)
print("Checking out mps device: ", controls.AppID)#, " with software version ", softwareVersion, "\n")
for device in appInfo['devices']:
deviceFault = False
deviceLog = {}
print("\n\nWorking on device: ", device)
#verify threshold count
thresholdCount = e.caget(device+'_THR_CNT')
confirmScreen = False
while(not(confirmScreen)):
if(query_yes_no("Verify for device "+str(device)+ " threshold count = "+ str(thresholdCount),skip)):
deviceLog['ThresholdNum']= [thresholdCount, 1,1]
confirmScreen = True
elif(query_yes_no("Verify you are looking at the display for: "+ device),skip):
tell_user_fault()
deviceLog['ThresholdNum']= [thresholdCount, 1,0]
deviceFault = True
break
else:
print("Please open the display for ", device)
if(not(deviceFault)):
print("Setting all thresholds to non fault state")
########### ########### ########### ########### ########### ########### ########### ########### ########### ########### ###########
#temporary workaround since threshold 7 PVs are not hosted
if(thresholdCount == 8):
thresholdCount -= 1
########### ########### ########### ########### ########### ########### ########### ########### ########### ########### ###########
enable_thresholds(thresholdCount, device, 'H', 1)
enable_thresholds(thresholdCount, device, 'L', 1)
safeMargin = 1000
testMargin =500
set_thresholds_safe(thresholdCount, device, safeMargin, 'H')
set_thresholds_safe(thresholdCount, device, -safeMargin, 'L')
time.sleep(2)
logicState = e.caget(device+'_LINAC_LOGIC', use_monitor=False)
confirmAllInBounds = False
while(not(confirmAllInBounds)):
if((query_yes_no("Is "+str(device)+' in state IS_OK for all thresholds',skip))and(logicState==0)):
deviceLog['AllThreshold'] = [1, 1, 1]
confirmAllInBounds = True
elif(not(logicState==0)):
deviceLog['AllThreshold'] = [0, 1, 1]
tell_user_fault()
break
else:
if(query_yes_no('Are you looking at '+device+'_LINAC_LOGIC',skip)):
tell_user_fault()
deviceLog['AllThreshold'] = [1, 1, 0]
else:
if(query_yes_no("Confirm "+device+'_LINAC_LOGIC is in state: B'+ str(threshold),skip)):
deviceLog['AllThreshold'] = [1, 1, 1]
else:
tell_user_fault()
deviceLog['AllThreshold'] = [1, 1, 0]
thresholdFault = False
#test the standard table high limits
print("Testing high thresholds for device: ", device)
test_threshold(thresholdCount, safeMargin, testMargin, device, 'H', deviceLog, skip)
print("Thank you for checking out the high thresholds")
print("We will now test the low thresholds\n")
print("Setting the thresholds to a safe possition\n")
set_thresholds_safe(thresholdCount, device, safeMargin, 'H')
set_thresholds_safe(thresholdCount, device, -safeMargin, 'L')
#test standard table low limits
print("Testing low thresholds for device: ", device)
test_threshold(thresholdCount, -safeMargin, -testMargin, device, 'L', deviceLog, skip)
print(deviceLog)
checkOutLog[device] = deviceLog
deviceLog={}
print("Thank you for checking out the low thresholds")
print("You have now checked the entire standard table")
deviceFault = False
print("MPS AppID ", controls.AppID, " has now been checked out.")
logInfo = json.dumps(checkOutLog)
checkOutFile = open(os.path.join(controls.LogFileLoc, "App"+str(controls.AppID)+"_checkout.json"), 'w')
checkOutFile.write(logInfo)
print("A checkout file named: ", "App"+str(controls.AppID)+"_checkout.json", " has been written to: ", controls.LogFileLoc)
return 0
if __name__ == "__main__":
main(sys.argv[1:])
| 45.195556
| 181
| 0.567411
|
25212cc10caaa3248f7fa287ddf913dccd307ef0
| 5,274
|
py
|
Python
|
tests/providers/apache/hive/__init__.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 8,092
|
2016-04-27T20:32:29.000Z
|
2019-01-05T07:39:33.000Z
|
tests/providers/apache/hive/__init__.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 2,961
|
2016-05-05T07:16:16.000Z
|
2019-01-05T08:47:59.000Z
|
tests/providers/apache/hive/__init__.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 3,546
|
2016-05-04T20:33:16.000Z
|
2019-01-05T05:14:26.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Optional
from unittest import TestCase
from unittest.mock import MagicMock
from airflow.models.dag import DAG
from airflow.providers.apache.hive.hooks.hive import HiveCliHook, HiveMetastoreHook, HiveServer2Hook
from airflow.providers.mysql.hooks.mysql import MySqlHook
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class TestHiveEnvironment(TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.hql = """
USE airflow;
DROP TABLE IF EXISTS static_babynames_partitioned;
CREATE TABLE IF NOT EXISTS static_babynames_partitioned (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY (ds string);
INSERT OVERWRITE TABLE static_babynames_partitioned
PARTITION(ds='{{ ds }}')
SELECT state, year, name, gender, num FROM static_babynames;
"""
class MockHiveMetastoreHook(HiveMetastoreHook):
def __init__(self, *args, **kwargs):
self._find_valid_server = MagicMock(return_value={})
self.get_metastore_client = MagicMock(return_value=MagicMock())
super().__init__()
class MockHiveCliHook(HiveCliHook):
def __init__(self, *args, **kwargs):
super().__init__()
self.conn = MockConnectionCursor()
self.conn.schema = 'default'
self.conn.host = 'localhost'
self.conn.port = 10000
self.conn.login = None
self.conn.password = None
self.conn.execute = MagicMock()
self.get_conn = MagicMock(return_value=self.conn)
self.get_connection = MagicMock(return_value=MockDBConnection({}))
class MockHiveServer2Hook(HiveServer2Hook):
def __init__(self, *args, **kwargs):
super().__init__()
self.mock_cursor = kwargs.get('connection_cursor', MockConnectionCursor())
self.mock_cursor.execute = MagicMock()
self.get_conn = MagicMock(return_value=self.mock_cursor)
self.get_connection = MagicMock(return_value=MockDBConnection({}))
class MockMySqlHook(MySqlHook):
def __init__(self, *args, **kwargs):
self.conn = MockConnectionCursor()
self.conn.execute = MagicMock()
self.get_conn = MagicMock(return_value=self.conn)
self.get_records = MagicMock(return_value=[])
self.insert_rows = MagicMock(return_value=True)
super().__init__(*args, **kwargs)
def get_connection(self, *args, **kwargs):
return self.conn
class MockDBConnection:
def __init__(self, extra_dejson=None, *args, **kwargs):
self.extra_dejson = extra_dejson
self.get_records = MagicMock(return_value=[['test_record']])
output = kwargs.get('output', ['' for _ in range(10)])
self.readline = MagicMock(side_effect=[line.encode() for line in output])
def status(self, *args, **kwargs):
return True
class BaseMockConnectionCursor:
def __init__(self, **kwargs):
self.arraysize = None
self.description = [
('hive_server_hook.a', 'INT_TYPE', None, None, None, None, True),
('hive_server_hook.b', 'INT_TYPE', None, None, None, None, True),
]
self.conn_exists = kwargs.get('exists', True)
def close(self):
pass
def cursor(self):
return self
def execute(self, values=None):
pass
def exists(self):
return self.conn_exists
def isfile(self):
return True
def remove(self):
pass
def upload(self, local_filepath, destination_filepath):
pass
def __next__(self):
return self.iterable
def __iter__(self):
yield from self.iterable
class MockConnectionCursor(BaseMockConnectionCursor):
def __init__(self):
super().__init__()
self.iterable = [(1, 1), (2, 2)]
class MockStdOut:
def __init__(self, *args, **kwargs):
output = kwargs.get('output', ['' for _ in range(10)])
self.readline = MagicMock(side_effect=[line.encode() for line in output])
class MockSubProcess:
PIPE = -1
STDOUT = -2
returncode: Optional[int] = None
def __init__(self, *args, **kwargs):
self.stdout = MockStdOut(*args, **kwargs)
def wait(self):
return
| 31.023529
| 100
| 0.668373
|
157e6dc70fd6712a2308dc620c14ea32d40d8156
| 147
|
py
|
Python
|
p4_5.py
|
yujie24/Fishc_Python
|
aef4bd5dbf40e01de56d3eb81c7dfb0c1a1f68d6
|
[
"MIT"
] | 1
|
2020-02-16T16:24:40.000Z
|
2020-02-16T16:24:40.000Z
|
p4_5.py
|
yujie24/Fishc_Python
|
aef4bd5dbf40e01de56d3eb81c7dfb0c1a1f68d6
|
[
"MIT"
] | null | null | null |
p4_5.py
|
yujie24/Fishc_Python
|
aef4bd5dbf40e01de56d3eb81c7dfb0c1a1f68d6
|
[
"MIT"
] | null | null | null |
# continue่ฏญๅฅ๏ผ็ปๆญขๆฌ่ฝฎๅพช็ฏ๏ผๅนถๅผๅงไธไธ่ฝฎๅพช็ฏ
# ๆณจๆ๏ผๅจๅผๅงไธไธๅพช็ฏๅ๏ผไผๅ
ๆต่ฏๅพช็ฏๆกไปถ
for i in range(10):
if i % 2 != 0:
print(i)
continue
i += 2
print(i)
| 13.363636
| 28
| 0.557823
|
a1f6d2b64b732cce94d9a48e9a53b24b588d4ca1
| 9,177
|
py
|
Python
|
worker.py
|
jet-black/ppo-lstm-parallel
|
4ab5c92602cc9f4c83a24a15717357c56c4a4372
|
[
"MIT"
] | 38
|
2018-08-15T19:38:47.000Z
|
2022-03-17T10:57:49.000Z
|
worker.py
|
JohnSun23/ppo-lstm-parallel
|
4ab5c92602cc9f4c83a24a15717357c56c4a4372
|
[
"MIT"
] | 1
|
2019-11-14T07:45:58.000Z
|
2019-11-14T09:44:45.000Z
|
worker.py
|
JohnSun23/ppo-lstm-parallel
|
4ab5c92602cc9f4c83a24a15717357c56c4a4372
|
[
"MIT"
] | 15
|
2018-08-17T10:00:47.000Z
|
2022-03-02T14:00:05.000Z
|
from multiprocessing import Queue, Process
import numpy as np
from threading import Thread
from agent import PPOAgent
from gather import GatheringWorker
from policy import get_policy
import utils
import environments
class Worker:
def __init__(self, env_producer, idx, master_weights_in_queue, master_weights_out_queue):
self.env_name = env_producer.get_env_name()
self.config = environments.get_config(self.env_name)
self.num_gather_workers = self.config["gather_per_worker"]
self.env_producer = env_producer
self.batch_size = self.config["batch_size"]
self.clip_eps = self.config["clip_eps"]
self.grad_step = self.config["grad_step"]
self.epochs = self.config["epochs"]
self.entropy_coef = self.config["entropy_coef"]
self.idx = idx
self.session = None
self.episode_step = 0
self.initialized = False
self.beta = self.config["init_beta"]
self.eta = self.config["eta"]
self.kl_target = self.config["kl_target"]
self.use_kl_loss = self.config["use_kl_loss"]
self.lr_multiplier = 1.0
self.variables_file_path = "models/%s/variables.txt" % self.env_name
self.worker_queue = Queue()
self.weights_queues = [Queue() for _ in range(self.num_gather_workers)]
self.master_weights_in_queue = master_weights_in_queue
self.master_weights_out_queue = master_weights_out_queue
self.init_workers()
self.agent = None
self.trainable_vars = None
self.accum_vars = None
self.assign_op = None
self.p_opt_vars = None
self.v_opt_vars = None
self.init_agent()
def init_agent(self):
import tensorflow as tf
env_opts = environments.get_env_options(self.env_name, self.env_producer.get_use_gpu())
self.session = utils.create_session(env_opts, True)
with tf.variable_scope("worker-%s" % self.idx):
pol = get_policy(env_opts, self.session)
self.agent = PPOAgent(pol, self.session, "worker-%s" % self.idx, env_opts)
self.trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "worker-%s" % self.idx)
self.accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in
self.trainable_vars]
p_vars = self.agent.p_opt.variables()
v_vars = self.agent.v_opt.variables()
self.p_opt_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in p_vars]
self.v_opt_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in v_vars]
p_assign_ops = [p_vars[i].assign(self.p_opt_vars[i]) for i in range(len(p_vars))]
v_assign_ops = [v_vars[i].assign(self.v_opt_vars[i]) for i in range(len(v_vars))]
assign_ops = [self.trainable_vars[i].assign(self.accum_vars[i]) for i in
range(len(self.trainable_vars))]
self.assign_op = tf.group(assign_ops + p_assign_ops + v_assign_ops)
self.session.run(tf.global_variables_initializer())
self.run()
def init_workers(self):
for i in range(self.num_gather_workers):
rollout_size = self.config["rollout_size"] // self.num_gather_workers
t = Process(target=make_worker, args=(i, self.env_producer,
self.worker_queue,
self.weights_queues[i],
rollout_size))
t.start()
def run(self):
while True:
self.apply_shared_variables()
self.apply_weights_to_gather_workers()
stats = self.compute_grads_and_stats()
self.send_to_master(stats)
def send_to_master(self, stats):
weights, p_opt_weights, v_opt_weights = self.session.run([self.trainable_vars,
self.agent.p_opt.variables(),
self.agent.v_opt.variables()])
arr = [self.beta, self.lr_multiplier, p_opt_weights, v_opt_weights, weights, stats]
self.master_weights_out_queue.put(arr)
def apply_weights_to_gather_workers(self):
weights = self.session.run(self.trainable_vars)
for q in self.weights_queues:
q.put(weights)
def apply_shared_variables(self):
beta, lr_multiplier, p_opt_weights, v_opt_weights, weights = self.master_weights_in_queue.get()
self.beta = beta
self.lr_multiplier = lr_multiplier
fd = {}
for i, t in enumerate(self.accum_vars):
fd[t] = weights[i]
for i, t in enumerate(self.p_opt_vars):
fd[t] = p_opt_weights[i]
for i, t in enumerate(self.v_opt_vars):
fd[t] = v_opt_weights[i]
self.session.run(self.assign_op, feed_dict=fd)
def compute_grads_and_stats(self):
results = []
for i in range(self.num_gather_workers):
results.append(self.worker_queue.get())
w_idx = list(range(self.num_gather_workers))
all_states = np.concatenate([results[i][0] for i in w_idx], axis=0)
all_advantages = np.concatenate([results[i][1] for i in w_idx], axis=0)
all_picked_actions = np.concatenate([results[i][2] for i in w_idx], axis=0)
all_returns = np.concatenate([results[i][3] for i in w_idx], axis=0)
all_old_actions_probs = np.concatenate([results[i][4] for i in w_idx], axis=0)
all_pred_values = np.concatenate([results[i][5] for i in w_idx], axis=0)
all_hidden_states = np.concatenate([results[i][6] for i in w_idx], axis=0)
all_advantages = (all_advantages - all_advantages.mean()) / (max(all_advantages.std(), 1e-4))
first_gather = [x for x in results if x[9] == 0][0]
self.episode_step = first_gather[7]
stats = first_gather[8]
sz = len(all_states)
n_batches = (sz - 1) // self.batch_size + 1
steps = 0
cur_kl = 0
entropy = 0
hinge = 0
src_policy_loss = 0
vloss = 0
ploss = 0
for cur_epoch in range(self.epochs):
idx = np.arange(len(all_states))
np.random.shuffle(idx)
all_states = all_states[idx]
all_returns = all_returns[idx]
all_picked_actions = all_picked_actions[idx]
all_old_actions_probs = all_old_actions_probs[idx]
all_advantages = all_advantages[idx]
all_pred_values = all_pred_values[idx]
all_hidden_states = all_hidden_states[idx]
for b in range(n_batches):
start = b * self.batch_size
end = min(sz, (b + 1) * self.batch_size)
states_b = all_states[start:end]
returns_b = all_returns[start:end]
picked_actions_b = all_picked_actions[start:end]
old_action_probs_b = all_old_actions_probs[start:end]
advantages_b = all_advantages[start:end]
hidden_states_b = all_hidden_states[start:end]
old_values_b = all_pred_values[start:end]
cur_kl, entropy, hinge, src_policy_loss, vloss, ploss = \
self.agent.train(states_b,
advantages_b,
returns_b,
picked_actions_b,
old_action_probs_b,
hidden_states_b,
old_values_b,
self.clip_eps,
self.beta,
self.eta,
self.grad_step * self.lr_multiplier)
steps += 1
if cur_kl > self.kl_target * 4 and self.use_kl_loss:
break
if self.use_kl_loss:
if cur_kl > self.kl_target * 2:
self.beta = np.minimum(35, 1.5 * self.beta)
if self.beta > 30.0:
self.lr_multiplier /= 1.5
elif cur_kl < self.kl_target / 2:
self.beta = np.maximum(1 / 35, self.beta / 1.5)
if self.beta <= 1 / 30.0:
self.lr_multiplier *= 1.5
self.lr_multiplier = max(min(self.lr_multiplier, 10.0), 0.1)
train_stats = {
"stats": stats,
"kl": cur_kl,
"entropy": entropy,
"hinge": hinge,
"src_policy_loss": src_policy_loss,
"vloss": vloss,
"ploss": ploss,
"lr_multiplier": self.lr_multiplier,
"beta": self.beta,
"step": self.episode_step,
"idx": self.idx
}
return train_stats
def make_worker(i, env_producer, worker_queue, weights_queue, rollout_size):
return GatheringWorker(i, env_producer, rollout_size, worker_queue, weights_queue)
| 44.765854
| 116
| 0.581345
|
c79295f8d4ef8159414bad8b45c61d20ed81ee06
| 145
|
py
|
Python
|
2675/2675.py3.py
|
isac322/BOJ
|
35959dd1a63d75ebca9ed606051f7a649d5c0c7b
|
[
"MIT"
] | 14
|
2017-05-02T02:00:42.000Z
|
2021-11-16T07:25:29.000Z
|
2675/2675.py3.py
|
isac322/BOJ
|
35959dd1a63d75ebca9ed606051f7a649d5c0c7b
|
[
"MIT"
] | 1
|
2017-12-25T14:18:14.000Z
|
2018-02-07T06:49:44.000Z
|
2675/2675.py3.py
|
isac322/BOJ
|
35959dd1a63d75ebca9ed606051f7a649d5c0c7b
|
[
"MIT"
] | 9
|
2016-03-03T22:06:52.000Z
|
2020-04-30T22:06:24.000Z
|
len = int(input())
while len:
n, st = input().split()
n = int(n)
for i in st:
print(i*n, end = "")
print("")
len -= 1
| 18.125
| 28
| 0.441379
|
4ffb125d73879b0af9fd76d37e830ba53dd240e0
| 1,440
|
py
|
Python
|
resources/legacy/helper_old.py
|
andy897221/Proof-of-Play-Flow-Demo
|
018ec382801f1363711b7680e728535a2ac94d26
|
[
"MIT"
] | null | null | null |
resources/legacy/helper_old.py
|
andy897221/Proof-of-Play-Flow-Demo
|
018ec382801f1363711b7680e728535a2ac94d26
|
[
"MIT"
] | null | null | null |
resources/legacy/helper_old.py
|
andy897221/Proof-of-Play-Flow-Demo
|
018ec382801f1363711b7680e728535a2ac94d26
|
[
"MIT"
] | null | null | null |
import numpy as np
def getMVP(self):
# use highest parameter based total parameter values of all players
enum = ["gold_per_min", "xp_per_min", "kills_per_min", "last_hits_per_min", "hero_damage_per_min",
"hero_healing_per_min", "tower_damage", "stuns_per_min"]
plyrRating, ratingBase = {"param": [], "rating": []}, []
team1Wins = self.matchData[1]
matchData = self.matchData[0]
for key, item in matchData.item():
ratingBase += [[matchData[key][j] for j in enum]]
ratingBase = list(np.asarray(ratingBase).sum(axis=0))
for key, item in matchData.item():
plyrallParam = [(matchData[key][enum[j]] / ratingBase[j]) if ratingBase[j] != 0 else 0 for j in
range(0, len(enum))]
plyrRating["param"] += [enum[int(np.argmax(plyrallParam))]]
plyrRating["rating"] += [max(plyrallParam)]
plyrRating_np = np.asarray(plyrRating["rating"])
plyrWins = []
for key, item in matchData.item():
if matchData[key]["isRadiant"] and team1Wins: plyrWins += [True]
elif matchData[key]["isRadiant"] and not team1Wins: plyrWins += [False]
elif not matchData[key]["isRadiant"] and team1Wins: plyrWins += [False]
elif not matchData[key]["isRadiant"] and not team1Wins: plyrWins += [True]
plyrWins = np.asarray(plyrWins)
mvpIndex = np.where(plyrWins == True)[0][np.argmax(plyrRating_np[plyrWins])]
return mvpIndex
| 46.451613
| 103
| 0.646528
|
5c69c1fd1a48cd5c04cf2fbf4b070bcc5553a510
| 2,620
|
py
|
Python
|
submodules/datasets/tests/test_open_images_v5.py
|
khy0809/WeightNet
|
cd5ea53b42c6169ffd5a0d7d883788fdc871cd1e
|
[
"MIT"
] | null | null | null |
submodules/datasets/tests/test_open_images_v5.py
|
khy0809/WeightNet
|
cd5ea53b42c6169ffd5a0d7d883788fdc871cd1e
|
[
"MIT"
] | null | null | null |
submodules/datasets/tests/test_open_images_v5.py
|
khy0809/WeightNet
|
cd5ea53b42c6169ffd5a0d7d883788fdc871cd1e
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
import datasets
def check_target_value(ds, dst):
idx = 0
image1, (positive_ids, negative_ids) = ds[idx]
_mid_to_idx = {mid: idx for idx, mid in enumerate(ds.classes)}
indicies = [_mid_to_idx[mid] for mid in positive_ids if mid in ds.classes]
image2, labels = dst[idx]
assert labels.shape == (len(ds.classes), )
assert (labels[indicies] == 1).all()
idx = 100
image1, (positive_ids, negative_ids) = ds[idx]
indicies = [_mid_to_idx[mid] for mid in positive_ids if mid in ds.classes]
image2, labels = dst[idx]
assert labels.shape == (len(ds.classes), )
assert (labels[indicies] == 1).all()
idx = -1
image1, (positive_ids, negative_ids) = ds[idx]
indicies = [_mid_to_idx[mid] for mid in positive_ids if mid in ds.classes]
image2, labels = dst[idx]
assert labels.shape == (len(ds.classes), )
assert (labels[indicies] == 1).all()
# ImageLevelLabel
def test_open_image_level_label_train():
t0 = time.time()
ds = datasets.open_images_v5.ImageLevelLabel('train')
assert (time.time() - t0) < (60.0 * 2.5)
assert len(ds) == 5989787
transforms = datasets.open_images_v5.TargetToVector()
dst = datasets.open_images_v5.ImageLevelLabel('train', target_transform=transforms)
check_target_value(ds, dst)
def test_open_image_level_label_val():
t0 = time.time()
ds = datasets.open_images_v5.ImageLevelLabel('validation')
assert (time.time() - t0) < 30.0
assert len(ds) == 41620
transforms = datasets.open_images_v5.TargetToVector()
dst = datasets.open_images_v5.ImageLevelLabel('validation', target_transform=transforms)
check_target_value(ds, dst)
# ImageLevelLabelBoxable
def test_open_image_level_label_boxable_train():
t0 = time.time()
ds = datasets.open_images_v5.ImageLevelLabelBoxable('train')
assert (time.time() - t0) < (60.0 * 2)
assert len(ds) == 1743042
transforms = datasets.open_images_v5.TargetToVector()
dst = datasets.open_images_v5.ImageLevelLabelBoxable('train', target_transform=transforms)
check_target_value(ds, dst)
def test_open_image_level_label_boxable_val():
t0 = time.time()
ds = datasets.open_images_v5.ImageLevelLabelBoxable('validation')
assert (time.time() - t0) < 30.0
assert len(ds) == 37306
transforms = datasets.open_images_v5.TargetToVector()
dst = datasets.open_images_v5.ImageLevelLabelBoxable('validation', target_transform=transforms)
check_target_value(ds, dst)
| 33.164557
| 99
| 0.709542
|
d26a1d21b9a8fe4b753974b37d93a8b45bcf4ef7
| 10,569
|
py
|
Python
|
cyclegan/train.py
|
jesa7955/CycleGAN-PyTorch
|
521a937b23f258d54ee2a5d7a7f9d925e7e3e63c
|
[
"BSD-2-Clause"
] | 6
|
2019-11-22T12:26:40.000Z
|
2021-07-21T01:48:39.000Z
|
cyclegan/train.py
|
jesa7955/CycleGAN-PyTorch
|
521a937b23f258d54ee2a5d7a7f9d925e7e3e63c
|
[
"BSD-2-Clause"
] | null | null | null |
cyclegan/train.py
|
jesa7955/CycleGAN-PyTorch
|
521a937b23f258d54ee2a5d7a7f9d925e7e3e63c
|
[
"BSD-2-Clause"
] | 1
|
2019-12-20T05:03:14.000Z
|
2019-12-20T05:03:14.000Z
|
import argparse
import os
import sys
import itertools
import math
import datetime
import time
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torchvision import datasets
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch
import torch.nn as nn
from PIL import Image
from models import CycleGenerator, Discriminator
from lr_helpers import get_lambda_rule
from dataset import CycleGANDataset
def train_loop(opts):
if opts.image_height == 128:
res_blocks = 6
elif opts.image_height >= 256:
res_blocks = 9
# Create networks
G_AB = CycleGenerator(opts.a_channels, opts.b_channels, res_blocks).to(device)
G_BA = CycleGenerator(opts.b_channels, opts.a_channels, res_blocks).to(device)
D_A = Discriminator(opts.a_channels, opts.d_conv_dim).to(device)
D_B = Discriminator(opts.b_channels, opts.d_conv_dim).to(device)
# Print network architecture
print(" G_AtoB ")
print("---------------------------------------")
print(G_AB)
print("---------------------------------------")
print(" G_BtoA ")
print("---------------------------------------")
print(G_BA)
print("---------------------------------------")
print(" D_A ")
print("---------------------------------------")
print(D_A)
print("---------------------------------------")
print(" D_B ")
print("---------------------------------------")
print(D_B)
print("---------------------------------------")
# Create losses
criterion_gan = torch.nn.MSELoss()
criterion_cycle = torch.nn.L1Loss()
criterion_identity = torch.nn.L1Loss()
if opts.load:
#TODO
pass
# Weights cycle loss and identity loss
lambda_cycle = 10
lambda_id = 0.5 * lambda_cycle
# Create optimizers
g_optimizer = torch.optim.Adam(itertools.chain(G_AB.parameters(), G_BA.parameters()),
lr=opts.lr, betas=(opts.beta1, opts.beta2))
d_a_optimizer = torch.optim.Adam(D_A.parameters(), lr=opts.lr, betas=(opts.beta1, opts.beta2))
d_b_optimizer = torch.optim.Adam(D_B.parameters(), lr=opts.lr, betas=(opts.beta1, opts.beta2))
# Create learning rate update schedulers
LambdaLR = get_lambda_rule(opts)
g_lr_scheduler = torch.optim.lr_scheduler.LambdaLR(g_optimizer, lr_lambda=LambdaLR)
d_a_lr_scheduler = torch.optim.lr_scheduler.LambdaLR(d_a_optimizer, lr_lambda=LambdaLR)
d_b_lr_scheduler = torch.optim.lr_scheduler.LambdaLR(d_b_optimizer, lr_lambda=LambdaLR)
# Image transformations
transform = transforms.Compose([transforms.Resize(int(opts.image_height*1.12), Image.BICUBIC),
transforms.RandomCrop((opts.image_height, opts.image_width)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))])
train_dataloader = DataLoader(CycleGANDataset(opts.dataroot_dir, opts.dataset_name, transform), batch_size=opts.batch_size, shuffle=True, num_workers=opts.n_cpu)
test_dataloader = DataLoader(CycleGANDataset(opts.dataroot_dir, opts.dataset_name, transform, mode='test'), batch_size=5, shuffle=False, num_workers=1)
end_epoch = opts.epochs + opts.start_epoch
total_batch = len(train_dataloader) * opts.epochs
for epoch in range(opts.start_epoch, end_epoch):
for index, batch in enumerate(train_dataloader):
# Create adversarial target
real_A = Variable(batch['A'].to(device))
real_B = Variable(batch['B'].to(device))
fake_A, fake_B = G_BA(real_B), G_AB(real_A)
# Train discriminator A
d_a_optimizer.zero_grad()
patch_real = D_A(real_A)
loss_a_real = criterion_gan(patch_real, torch.tensor(1.0).expand_as(patch_real).to(device))
patch_fake = D_A(fake_A)
loss_a_fake = criterion_gan(patch_fake, torch.tensor(0.0).expand_as(patch_fake).to(device))
loss_d_a = (loss_a_real + loss_a_fake) / 2
loss_d_a.backward()
d_a_optimizer.step()
# Train discriminator B
d_b_optimizer.zero_grad()
patch_real = D_B(real_B)
loss_b_real = criterion_gan(patch_real, torch.tensor(1.0).expand_as(patch_real).to(device))
patch_fake = D_B(fake_B)
loss_b_fake = criterion_gan(patch_fake, torch.tensor(0.0).expand_as(patch_fake).to(device))
loss_d_b = (loss_b_real + loss_b_fake) / 2
loss_d_b.backward()
d_b_optimizer.step()
# Train generator
g_optimizer.zero_grad()
fake_A, fake_B = G_BA(real_B), G_AB(real_A)
reconstructed_A, reconstructed_B = G_BA(fake_B), G_AB(fake_A)
# GAN loss
patch_a = D_A(fake_A)
loss_gan_ba = criterion_gan(patch_a, torch.tensor(1.0).expand_as(patch_a).to(device))
patch_b = D_B(fake_B)
loss_gan_ab = criterion_gan(patch_b, torch.tensor(1.0).expand_as(patch_b).to(device))
loss_gan = (loss_gan_ab + loss_gan_ba) / 2
# Cycle loss
loss_cycle_a = criterion_cycle(reconstructed_A, real_A)
loss_cycle_b = criterion_cycle(reconstructed_B, real_B)
loss_cycle = (loss_cycle_a + loss_cycle_b) / 2
# Identity loss
loss_id_a = criterion_identity(G_BA(real_A), real_A)
loss_id_b = criterion_identity(G_AB(real_B), real_B)
loss_identity = (loss_id_a + loss_id_b) / 2
# Total loss
loss_g = loss_gan + lambda_cycle * loss_cycle + lambda_id * loss_identity
loss_g.backward()
g_optimizer.step()
current_batch = epoch * len(train_dataloader) + index
sys.stdout.write(f"\r[Epoch {epoch+1}/{opts.epochs-opts.start_epoch}] [Index {index}/{len(train_dataloader)}] [D_A loss: {loss_d_a.item():.4f}] [D_B loss: {loss_d_b.item():.4f}] [G loss: adv: {loss_gan.item():.4f}, cycle: {loss_cycle.item():.4f}, identity: {loss_identity.item():.4f}]")
if current_batch % opts.sample_every == 0:
save_sample(G_AB, G_BA, current_batch, opts, test_dataloader)
# Update learning reate
g_lr_scheduler.step()
d_a_lr_scheduler.step()
d_b_lr_scheduler.step()
if epoch % opts.checkpoint_every == 0:
torch.save(G_AB.state_dict(), f'{opts.checkpoint_dir}/{opts.dataset_name}/G_AB_{epoch}.pth')
torch.save(G_BA.state_dict(), f'{opts.checkpoint_dir}/{opts.dataset_name}/G_BA_{epoch}.pth')
torch.save(D_A.state_dict(), f'{opts.checkpoint_dir}/{opts.dataset_name}/D_A_{epoch}.pth')
torch.save(D_B.state_dict(), f'{opts.checkpoint_dir}/{opts.dataset_name}/D_B_{epoch}.pth')
def save_sample(G_AB, G_BA, batch, opts, test_dataloader):
images = next(iter(test_dataloader))
real_A = Variable(images['A'].to(device))
real_B = Variable(images['B'].to(device))
fake_A = G_BA(real_B)
fake_B = G_AB(real_A)
reconstructed_A = G_BA(fake_B)
reconstructed_B = G_AB(fake_A)
image_sample = torch.cat((real_A.data, fake_B.data,
real_B.data, fake_A.data,
reconstructed_A.data, reconstructed_B.data), 0)
save_image(image_sample, f"{opts.sample_dir}/{opts.dataset_name}/{batch}.png", nrow=5, normalize=True)
def create_parser():
parser = argparse.ArgumentParser()
# ใขใใซ็จใใคใใผใใฉใกใผใฟ
parser.add_argument('--image_height', type=int, default=256, help='็ปๅใฎ้ซใ.')
parser.add_argument('--image_width', type=int, default=256, help='็ปๅใฎๅบใ.')
parser.add_argument('--a_channels', type=int, default=3, help='A้ก็ปๅใฎChannelsๆฐ.')
parser.add_argument('--b_channels', type=int, default=3, help='B้ก็ปๅใฎChannelsๆฐ.')
parser.add_argument('--d_conv_dim', type=int, default=64)
# ใใฌใผใใณใฐ็จใใคใใผใใฉใกใผใฟ
parser.add_argument('--dataset_name', type=str, default='facades', help='ไฝฟ็จใใใใผใฟใปใใ.')
parser.add_argument('--epochs', type=int, default=200, help='Epochใฎๆฐ.')
parser.add_argument('--start_epoch', type=int, default=0, help='ๅฎ่ก้ๅงใฎEpochๆฐ.')
parser.add_argument('--decay_epoch', type=int, default=100, help='lr decayใๅฎ่กใๅงใใEpochๆฐ.')
parser.add_argument('--batch_size', type=int, default=1, help='ไธใคใฎBatchใซๅซใพใใ็ปๅใฎๆฐ.')
parser.add_argument('--num_workers', type=int, default=0, help='Dataloaderใซไฝฟใใใในใฌใใๆฐ.')
parser.add_argument('--lr', type=float, default=0.0002, help='ๅญฆ็ฟ็(defaultใฏ0.0002).')
parser.add_argument('--beta1', type=float, default=0.5, help='Adamใชใใใใคใถใผใซไฝฟใใใใใคใใผใใฉใกใผใฟ.')
parser.add_argument('--beta2', type=float, default=0.999, help='Adamใชใใใใคใถใผใซไฝฟใใใใใคใใผใใฉใกใผใฟ.')
parser.add_argument('--n_cpu', type=int, default=8, help='batchใ็ๆใใใจใใซไฝฟ็จใใในใฌใใๆฐ.')
parser.add_argument('--gpu_id', type=int, default=0, help='ไฝฟ็จใใGPUใฎID.')
# ใตใณใใซใใใงใใฏใใคใณใใใจใ้ ปๅบฆใจๅ ดๆ
parser.add_argument('--dataroot_dir', type=str, default='../data/')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints_cyclegan')
parser.add_argument('--sample_dir', type=str, default='samples_cyclegan')
parser.add_argument('--load', type=str, default=None)
parser.add_argument('--log_step', type=int , default=20)
parser.add_argument('--sample_every', type=int , default=100, help='ใตใณใใซใใจใ้ ปๅบฆใbatchๅไฝ.')
parser.add_argument('--checkpoint_every', type=int , default=1, help='Check pointใใจใ้ ปๅบฆใepochๅไฝ.')
return parser
def print_opts(opts):
"""Prints the values of all command-line arguments.
"""
print('=' * 80)
print('Opts'.center(80))
print('-' * 80)
for key in opts.__dict__:
if opts.__dict__[key]:
print('{:>30}: {:<30}'.format(key, opts.__dict__[key]).center(80))
print('=' * 80)
if __name__ == '__main__':
parser = create_parser()
opts = parser.parse_args()
device = torch.device(f'cuda:{opts.gpu_id}' if torch.cuda.is_available() else 'cpu')
os.makedirs(f"{opts.sample_dir}/{opts.dataset_name}", exist_ok=True)
os.makedirs(f"{opts.checkpoint_dir}/{opts.dataset_name}", exist_ok=True)
if opts.load:
opts.sample_dir = '{}_pretrained'.format(opts.sample_dir)
opts.sample_every = 20
print_opts(opts)
train_loop(opts)
| 43.493827
| 298
| 0.633172
|
68e091b2fef8050c355269c5e4679fea9d7169d7
| 3,100
|
py
|
Python
|
bandit/plugins/general_bad_file_permissions.py
|
bittner/bandit
|
87ecc4079ea50d77be13ed72bbf5ad2eb0673c64
|
[
"Apache-2.0"
] | 1
|
2020-10-05T05:38:26.000Z
|
2020-10-05T05:38:26.000Z
|
bandit/plugins/general_bad_file_permissions.py
|
bittner/bandit
|
87ecc4079ea50d77be13ed72bbf5ad2eb0673c64
|
[
"Apache-2.0"
] | null | null | null |
bandit/plugins/general_bad_file_permissions.py
|
bittner/bandit
|
87ecc4079ea50d77be13ed72bbf5ad2eb0673c64
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
r"""
==================================================
B103: Test for setting permissive file permissions
==================================================
POSIX based operating systems utilize a permissions model to protect access to
parts of the file system. This model supports three roles "owner", "group"
and "world" each role may have a combination of "read", "write" or "execute"
flags sets. Python provides ``chmod`` to manipulate POSIX style permissions.
This plugin test looks for the use of ``chmod`` and will alert when it is used
to set particularly permissive control flags. A MEDIUM warning is generated if
a file is set to group executable and a HIGH warning is reported if a file is
set world writable. Warnings are given with HIGH confidence.
:Example:
.. code-block:: none
>> Issue: Probable insecure usage of temp file/directory.
Severity: Medium Confidence: Medium
CWE: CWE-732 (https://cwe.mitre.org/data/definitions/732.html)
Location: ./examples/os-chmod.py:15
14 os.chmod('/etc/hosts', 0o777)
15 os.chmod('/tmp/oh_hai', 0x1ff)
16 os.chmod('/etc/passwd', stat.S_IRWXU)
>> Issue: Chmod setting a permissive mask 0777 on file (key_file).
Severity: High Confidence: High
CWE: CWE-732 (https://cwe.mitre.org/data/definitions/732.html)
Location: ./examples/os-chmod.py:17
16 os.chmod('/etc/passwd', stat.S_IRWXU)
17 os.chmod(key_file, 0o777)
18
.. seealso::
- https://security.openstack.org/guidelines/dg_apply-restrictive-file-permissions.html
- https://en.wikipedia.org/wiki/File_system_permissions
- https://security.openstack.org
- https://cwe.mitre.org/data/definitions/732.html
.. versionadded:: 0.9.0
.. versionchanged:: 1.7.3
CWE information added
""" # noqa: E501
import stat
import bandit
from bandit.core import issue
from bandit.core import test_properties as test
@test.checks("Call")
@test.test_id("B103")
def set_bad_file_permissions(context):
if "chmod" in context.call_function_name:
if context.call_args_count == 2:
mode = context.get_call_arg_at_position(1)
if (
mode is not None
and isinstance(mode, int)
and (mode & stat.S_IWOTH or mode & stat.S_IXGRP)
):
# world writable is an HIGH, group executable is a MEDIUM
if mode & stat.S_IWOTH:
sev_level = bandit.HIGH
else:
sev_level = bandit.MEDIUM
filename = context.get_call_arg_at_position(0)
if filename is None:
filename = "NOT PARSED"
return bandit.Issue(
severity=sev_level,
confidence=bandit.HIGH,
cwe=issue.Cwe.INCORRECT_PERMISSION_ASSIGNMENT,
text="Chmod setting a permissive mask %s on file (%s)."
% (oct(mode), filename),
)
| 35.227273
| 87
| 0.624194
|
599c4c8df9722c0f7cb01f59953ce67c59b8c6d9
| 1,745
|
py
|
Python
|
catlas/load_bulk_structures.py
|
ulissigroup/catlas
|
1796bf562f21f4afaee8999bbdf0ed76e63f8501
|
[
"MIT"
] | null | null | null |
catlas/load_bulk_structures.py
|
ulissigroup/catlas
|
1796bf562f21f4afaee8999bbdf0ed76e63f8501
|
[
"MIT"
] | 34
|
2021-12-02T16:55:34.000Z
|
2022-03-30T17:58:35.000Z
|
catlas/load_bulk_structures.py
|
ulissigroup/catlas
|
1796bf562f21f4afaee8999bbdf0ed76e63f8501
|
[
"MIT"
] | 1
|
2021-12-14T17:22:19.000Z
|
2021-12-14T17:22:19.000Z
|
"""Function to load bulks from an ase.db."""
from ase.db import connect
import os.path
import numpy as np
import pandas as pd
from .dask_utils import SizeDict
import catlas
required_fields = (
"atoms",
"mpid",
"natoms",
"xc",
"nelements",
"elements",
) # These fields are expected to exist in every input file that doesn't allow them to be directly calculated
def load_bulks(bulk_path):
"""
Load bulks from an ase.db
Args:
bulk_path: a relative path (from the main catlas directory) to the ase.db
"""
path = "%s/%s" % (
os.path.join(os.path.dirname(catlas.__file__), os.pardir),
bulk_path,
)
path_name, _ = os.path.splitext(path)
db_name = path_name.split("/")[-1]
with connect(path) as db:
# Turn each entry into a dictionary that will become the dataframe columns
bulk_list = []
for row in db.select():
bulk_list.append(
SizeDict(
{
"bulk_atoms": row.toatoms(),
"bulk_id": row.bulk_id,
"bulk_data_source": db_name,
"bulk_natoms": row.natoms,
"bulk_xc": "RPBE",
"bulk_nelements": len(
np.unique(row.toatoms().get_chemical_symbols())
),
"bulk_elements": np.unique(
row.toatoms().get_chemical_symbols()
),
"bulk_e_above_hull": row.energy_above_hull,
"bulk_band_gap": row.band_gap,
}
)
)
return bulk_list
| 28.606557
| 109
| 0.507163
|
b61d1629fbda944e517ef38687d8e8b4275a489f
| 2,636
|
py
|
Python
|
fairseq/optim/dynamic_loss_scaler.py
|
ictnlp/MoE-Waitk
|
6f8ca9834c2ab77785ebd93fd569f73c3819340b
|
[
"MIT"
] | 2
|
2022-03-21T04:35:46.000Z
|
2022-03-21T04:36:14.000Z
|
fairseq/optim/dynamic_loss_scaler.py
|
ictnlp/MoE-Waitk
|
6f8ca9834c2ab77785ebd93fd569f73c3819340b
|
[
"MIT"
] | null | null | null |
fairseq/optim/dynamic_loss_scaler.py
|
ictnlp/MoE-Waitk
|
6f8ca9834c2ab77785ebd93fd569f73c3819340b
|
[
"MIT"
] | 2
|
2022-03-21T14:22:27.000Z
|
2022-03-27T02:31:27.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class DynamicLossScaler(object):
def __init__(
self,
init_scale=2.0**15,
scale_factor=2.0,
scale_window=2000,
tolerance=0.05,
threshold=None,
min_loss_scale=1e-4,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
self.min_loss_scale = min_loss_scale
def scale(self, outputs):
return self.loss_scale * outputs
def update(self):
if (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
def check_overflow(self, grad_norm):
# detect inf and nan
if grad_norm == float("inf") or grad_norm != grad_norm:
# overflow has occured
prev_scale = self.loss_scale
iter_since_rescale = self._iter - self._last_rescale_iter
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
if self.loss_scale <= self.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
self.loss_scale = prev_scale
raise FloatingPointError(
(
"Minimum loss scale reached ({}). Your loss is probably exploding. "
"Try lowering the learning rate, using gradient clipping or "
"increasing the batch size."
).format(self.min_loss_scale)
)
self._iter += 1
raise OverflowError("setting loss scale to: " + str(self.loss_scale))
| 37.126761
| 92
| 0.605083
|
f4fceb972b8681c0f2ab2e9ade35cf1d73b49b6e
| 1,426
|
py
|
Python
|
alignment/find_bug/get_max_triples.py
|
LaudateCorpus1/code-align-evals-data
|
97446d992c3785d6605f1500b2c9b95d042e7b9c
|
[
"MIT"
] | 3
|
2021-07-29T23:40:15.000Z
|
2021-08-12T10:18:09.000Z
|
alignment/find_bug/get_max_triples.py
|
openai/code-align-evals-data
|
97446d992c3785d6605f1500b2c9b95d042e7b9c
|
[
"MIT"
] | 1
|
2021-09-19T06:44:15.000Z
|
2021-09-19T06:44:15.000Z
|
alignment/find_bug/get_max_triples.py
|
LaudateCorpus1/code-align-evals-data
|
97446d992c3785d6605f1500b2c9b95d042e7b9c
|
[
"MIT"
] | 1
|
2021-09-19T06:44:03.000Z
|
2021-09-19T06:44:03.000Z
|
def get_max_triples(n):
"""
You are given a positive integer n. You have to create an integer array a of length n.
For each i (1 โค i โค n), the value of a[i] = i * i - i + 1.
Return the number of triples (a[i], a[j], a[k]) of a where i < j < k,
and a[i] + a[j] + a[k] is a multiple of 3.
Example :
Input: n = 5
Output: 1
Explanation:
a = [1, 3, 7, 13, 21]
The only valid triple is (1, 7, 13).
Example solution:
# line 1
A = [i*i - i + 1 for i in range(1,n+1)]
# line 2
ans = []
# line 3
for i in range(n):
# line 4
for j in range(i+1,n):
# line 5
for k in range(j,n):
# line 6
if (A[i]+A[j]+A[k])%3 == 0:
# line 7
ans += [(A[i],A[j],A[k])]
# line 8
return len(ans)
"""
# Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4
# END OF CONTEXT
print("5")
# END OF SOLUTION
def check(candidate):
import io
from contextlib import redirect_stdout
f = io.StringIO()
with redirect_stdout(f):
candidate([])
out = f.getvalue().strip('\n')
assert "5" == out
for i in range(0, 10):
if i != 5:
assert str(i) != out
if __name__ == '__main__':
check(get_max_triples)
| 25.464286
| 115
| 0.490884
|
2fddbbc4bf88ee87f7e9ea321d31283e576a346a
| 6,401
|
py
|
Python
|
dm_control/scripts/dataset.py
|
mhauskn/dm_control
|
b7944e0ed4392924f40a3e5c65b1a93c027b9718
|
[
"Apache-2.0"
] | null | null | null |
dm_control/scripts/dataset.py
|
mhauskn/dm_control
|
b7944e0ed4392924f40a3e5c65b1a93c027b9718
|
[
"Apache-2.0"
] | null | null | null |
dm_control/scripts/dataset.py
|
mhauskn/dm_control
|
b7944e0ed4392924f40a3e5c65b1a93c027b9718
|
[
"Apache-2.0"
] | null | null | null |
import torch
import h5py
import numpy as np
import bisect
import random
from absl import logging
from torch.utils.data import Dataset
OBS_KEYS = [
'walker/actuator_activation',
'walker/appendages_pos',
'walker/body_height',
'walker/end_effectors_pos',
'walker/joints_pos',
'walker/joints_vel',
'walker/sensors_accelerometer',
'walker/sensors_gyro',
'walker/sensors_torque',
'walker/sensors_touch',
'walker/sensors_velocimeter',
'walker/world_zaxis',
'walker/clip_id',
'walker/reference_rel_joints',
'walker/reference_rel_bodies_pos_global',
'walker/reference_rel_bodies_quats',
'walker/reference_rel_bodies_pos_local',
'walker/reference_ego_bodies_quats',
'walker/reference_rel_root_quat',
'walker/reference_rel_root_pos_local',
'walker/reference_appendages_pos',
'walker/velocimeter_control',
'walker/gyro_control',
'walker/joints_vel_control',
'walker/time_in_clip',
]
class TrajectoryDataset(Dataset):
def __init__(self, h5py_file, block_size, observables):
logging.info(f'Loading dataset from: {h5py_file}')
dset = h5py.File(h5py_file, 'r')
self.block_size = block_size
# Assemble the observables
self.observables = []
for o in observables:
if not o.strip():
continue
if not o.startswith('walker/'):
o = 'walker/' + o
assert o in OBS_KEYS, f"Unrecognized Observable: {o}"
self.observables.append(o)
# Sort the list of observables so that we are robust against changes in order
sorted(self.observables)
logging.info(f'Observables: {self.observables}')
# Copy the dataset into memory
self.observations = np.concatenate([dset[f'observables/{k}'][...] for k in self.observables], axis=1)
self.actions = dset['actions'][...]
self.dones = dset['dones'][...]
self._remove_short_episodes()
self._create_logical_offset()
def _remove_short_episodes(self):
""" Removes all episodes shorter than block_size. """
all_obs = []
all_acts = []
all_dones = []
episode_ends = np.nonzero(self.dones)[0]
episode_start = 0
episodes_removed = 0
for episode_end in episode_ends:
ep_length = episode_end - episode_start + 1
if ep_length >= self.block_size:
all_obs.append(self.observations[episode_start: episode_end+1])
all_acts.append(self.actions[episode_start: episode_end+1])
all_dones.append(self.dones[episode_start: episode_end+1])
else:
episodes_removed += 1
episode_start = episode_end + 1
self.observations = np.concatenate(all_obs)
self.actions = np.concatenate(all_acts)
self.dones = np.concatenate(all_dones)
logging.info(f"Removed {episodes_removed} episodes shorter than {self.block_size} steps.")
def _create_logical_offset(self):
""" The idea behind the logical offset is to avoid sampling data that crosses episode
boundaries. The strategy is to avoid sampling a datapoint in the tail of an episode
(denoted by |ooooo|) as it would cross an episode boundary when adding context.
Actual Dataset: here shown with 4 episodes, heads + tails.
|-----------|ooooo| |-----|ooooo| |---------------|ooooo| |oooo|
Logical Dataset: contains only the heads of episodes - so that we never sample from the
tail of an episode (and cross an episode boundary).
|-----------| |-----| |---------------| ||
The logical offset tells us for an index into the logical dataset, the corresponding
index in the actual dataset.
For example, if we wanted to retrieve the first timestep of Episode 2, we would need to
offset the logical index by the tail length of Episode 1 to arrive at an index into the
actual dataset.
"""
self.logical_index, self.logical_offset = [-1], [0, 0]
episode_ends = np.nonzero(self.dones)[0]
episode_start = 0
head_sum, tail_sum = 0, 0
for idx, episode_end in enumerate(episode_ends):
ep_length = episode_end - episode_start + 1
assert ep_length >= self.block_size
tail_start = (episode_end+1) - self.block_size + 1
head_steps = tail_start - episode_start
tail_steps = (episode_end+1) - tail_start
assert tail_steps == self.block_size - 1
assert head_steps + tail_steps == ep_length
head_sum += head_steps
tail_sum += tail_steps
self.logical_index.append(head_sum-1)
self.logical_offset.append(tail_sum)
episode_start = episode_end + 1
assert head_sum + tail_sum == self.dones.shape[0]
self.total_len = head_sum
@property
def observation_size(self):
""" Dimension of each observation vector. """
return self.observations.shape[1]
@property
def action_size(self):
""" Dimension of each action vector. """
return self.actions.shape[1]
def __len__(self):
return self.total_len
# return self.actions.shape[0] - self.block_size
def __getitem__(self, idx):
""" Given the logical idx, we need to find the offset to arrive at the
actual index into the dataset.
"""
z = bisect.bisect_left(self.logical_index, idx)
offset = self.logical_offset[z]
start_idx = idx + offset
end_idx = start_idx + self.block_size
# If we've sampled an episode termination, ensure it's at the final step
dones = self.dones[start_idx: end_idx]
s = sum(dones)
assert s == 0 or (s == 1 and dones[-1] == True)
x = self.observations[start_idx: end_idx]
y = self.actions[start_idx: end_idx]
return x, y
if __name__ == "__main__":
block_size = 64
d = TrajectoryDataset('data/complete.hdf5', block_size=block_size, observables=['joints_pos', 'joints_vel'])
# d[193]
N = len(d)
for idx in range(N):
# n = random.randint(0, N-1)
x, y = d[idx]
assert x.shape[0] == block_size
assert y.shape[0] == block_size
| 37.215116
| 112
| 0.62584
|
eb87a19c45df09ce5f4c0c306bd2c68fc6742d76
| 4,521
|
py
|
Python
|
benchmark/startQiskit_noisy1633.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy1633.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy1633.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=58
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[1],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.z(input_qubit[1]) # number=37
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.h(input_qubit[4]) # number=21
prog.x(input_qubit[2]) # number=39
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[3],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.z(input_qubit[3]) # number=46
prog.cx(input_qubit[3],input_qubit[0]) # number=47
prog.x(input_qubit[4]) # number=40
prog.cx(input_qubit[3],input_qubit[0]) # number=35
prog.cx(input_qubit[1],input_qubit[0]) # number=55
prog.x(input_qubit[0]) # number=56
prog.cx(input_qubit[1],input_qubit[0]) # number=57
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=44
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.cx(input_qubit[4],input_qubit[3]) # number=54
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.y(input_qubit[1]) # number=32
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1633.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.292857
| 82
| 0.61712
|
d71cedee5f4690a5e597744a3615d0446aa5c254
| 6,976
|
py
|
Python
|
sample-input/benchmarks/LRA/geometry.py
|
GiudGiud/OpenMOC
|
83623ebe64b558d45fa021132f812d425774673f
|
[
"MIT"
] | null | null | null |
sample-input/benchmarks/LRA/geometry.py
|
GiudGiud/OpenMOC
|
83623ebe64b558d45fa021132f812d425774673f
|
[
"MIT"
] | 1
|
2018-06-26T19:51:58.000Z
|
2018-06-26T19:51:58.000Z
|
sample-input/benchmarks/LRA/geometry.py
|
GiudGiud/OpenMOC
|
83623ebe64b558d45fa021132f812d425774673f
|
[
"MIT"
] | null | null | null |
import openmoc
###############################################################################
# Main Simulation Parameters
###############################################################################
options = openmoc.options.Options()
num_threads = options.getNumThreads()
track_spacing = options.getTrackSpacing()
num_azim = options.getNumAzimAngles()
tolerance = options.getTolerance()
max_iters = options.getMaxIterations()
openmoc.log.set_log_level('NORMAL')
###############################################################################
# Creating Materials
###############################################################################
openmoc.log.py_printf('NORMAL', 'Importing materials data from HDF5...')
materials = openmoc.materialize.load_from_hdf5('LRA-mgxs.h5', '')
###############################################################################
# Creating Surfaces
###############################################################################
openmoc.log.py_printf('NORMAL', 'Creating surfaces...')
left = openmoc.XPlane(x=-82.5)
right = openmoc.XPlane(x=82.5)
bottom = openmoc.YPlane(y=-82.5)
top = openmoc.YPlane(y=82.5)
left.setBoundaryType(openmoc.REFLECTIVE)
right.setBoundaryType(openmoc.VACUUM)
bottom.setBoundaryType(openmoc.REFLECTIVE)
top.setBoundaryType(openmoc.VACUUM)
###############################################################################
# Creating Cells and Universes
###############################################################################
openmoc.log.py_printf('NORMAL', 'Creating cells...')
# Region 1
region1_cell = openmoc.Cell(name='region 1')
region1_cell.setFill(materials['region_1'])
region1 = openmoc.Universe(name='region 1')
region1.addCell(region1_cell)
# Region 2
region2_cell = openmoc.Cell(name='region 2')
region2_cell.setFill(materials['region_2'])
region2 = openmoc.Universe(name='region 2')
region2.addCell(region2_cell)
# Region 3
region3_cell = openmoc.Cell(name='region 3')
region3_cell.setFill(materials['region_3'])
region3 = openmoc.Universe(name='region 3')
region3.addCell(region3_cell)
# Region 4
region4_cell = openmoc.Cell(name='region 4')
region4_cell.setFill(materials['region_4'])
region4 = openmoc.Universe(name='region 4')
region4.addCell(region4_cell)
# Region 5
region5_cell = openmoc.Cell(name='region 5')
region5_cell.setFill(materials['region_5'])
region5 = openmoc.Universe(name='region 5')
region5.addCell(region5_cell)
# Region 5
region6_cell = openmoc.Cell(name='region 6')
region6_cell.setFill(materials['region_6'])
region6 = openmoc.Universe(name='region 6')
region6.addCell(region6_cell)
# Cells
assembly1_cell = openmoc.Cell(name='assembly 1')
assembly2_cell = openmoc.Cell(name='assembly 2')
assembly3_cell = openmoc.Cell(name='assembly 3')
assembly4_cell = openmoc.Cell(name='assembly 4')
assembly5_cell = openmoc.Cell(name='assembly 5')
assembly6_cell = openmoc.Cell(name='assembly 6')
assembly1 = openmoc.Universe(name='assembly 1')
assembly2 = openmoc.Universe(name='assembly 2')
assembly3 = openmoc.Universe(name='assembly 3')
assembly4 = openmoc.Universe(name='assembly 4')
assembly5 = openmoc.Universe(name='assembly 5')
assembly6 = openmoc.Universe(name='assembly 6')
assembly1.addCell(assembly1_cell)
assembly2.addCell(assembly2_cell)
assembly3.addCell(assembly3_cell)
assembly4.addCell(assembly4_cell)
assembly5.addCell(assembly5_cell)
assembly6.addCell(assembly6_cell)
# Root cell/universe
root_cell = openmoc.Cell(name='root cell')
root_cell.addSurface(halfspace=+1, surface=left)
root_cell.addSurface(halfspace=-1, surface=right)
root_cell.addSurface(halfspace=+1, surface=bottom)
root_cell.addSurface(halfspace=-1, surface=top)
root_universe = openmoc.Universe(name='root universe')
root_universe.addCell(root_cell)
###############################################################################
# Creating Lattices
###############################################################################
openmoc.log.py_printf('NORMAL', 'Creating LRA lattices...')
# Assembly 1
assembly1_lattice = openmoc.Lattice(name='assembly 1')
assembly1_lattice.setWidth(width_x=1.5, width_y=1.5)
template = [[region1] * 10] * 10
assembly1_lattice.setUniverses([template])
assembly1_cell.setFill(assembly1_lattice)
# Assembly 2
assembly2_lattice = openmoc.Lattice(name='assembly 2')
assembly2_lattice.setWidth(width_x=1.5, width_y=1.5)
template = [[region2] * 10] * 10
assembly2_lattice.setUniverses([template])
assembly2_cell.setFill(assembly2_lattice)
# Assembly 3
assembly3_lattice = openmoc.Lattice(name='assembly 3')
assembly3_lattice.setWidth(width_x=1.5, width_y=1.5)
template = [[region3] * 10] * 10
assembly3_lattice.setUniverses([template])
assembly3_cell.setFill(assembly3_lattice)
# Assembly 4
assembly4_lattice = openmoc.Lattice(name='assembly 4')
assembly4_lattice.setWidth(width_x=1.5, width_y=1.5)
template = [[region4] * 10] * 10
assembly4_lattice.setUniverses([template])
assembly4_cell.setFill(assembly4_lattice)
# Assembly 5
assembly5_lattice = openmoc.Lattice(name='assembly 5')
assembly5_lattice.setWidth(width_x=1.5, width_y=1.5)
template = [[region5] * 10] * 10
assembly5_lattice.setUniverses([template])
assembly5_cell.setFill(assembly5_lattice)
# Assembly 6
assembly6_lattice = openmoc.Lattice(name='assembly 6')
assembly6_lattice.setWidth(width_x=1.5, width_y=1.5)
template = [[region6] * 10] * 10
assembly6_lattice.setUniverses([template])
assembly6_cell.setFill(assembly6_lattice)
# Full core
core_lattice = openmoc.Lattice(name='core')
core_lattice.setWidth(width_x=15.0, width_y=15.0)
universes = {7 : assembly1, 8 : assembly2, 9: assembly3,
10 : assembly4, 11 : assembly5, 12 : assembly6}
template = [[12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12],
[12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12],
[ 9, 9, 9, 9, 9, 9, 9, 12, 12, 12, 12],
[ 9, 9, 9, 9, 9, 9, 9, 10, 12, 12, 12],
[ 8, 7, 7, 7, 7, 8, 8, 11, 11, 12, 12],
[ 8, 7, 7, 7, 7, 8, 8, 11, 11, 12, 12],
[ 7, 7, 7, 7, 7, 7, 7, 9, 9, 12, 12],
[ 7, 7, 7, 7, 7, 7, 7, 9, 9, 12, 12],
[ 7, 7, 7, 7, 7, 7, 7, 9, 9, 12, 12],
[ 7, 7, 7, 7, 7, 7, 7, 9, 9, 12, 12],
[ 8, 7, 7, 7, 7, 8, 8, 9, 9, 12, 12]]
for i in range(11):
for j in range(11):
template[i][j] = universes[template[i][j]]
core_lattice.setUniverses([template])
root_cell.setFill(core_lattice)
###############################################################################
# Creating the Geometry
###############################################################################
openmoc.log.py_printf('NORMAL', 'Creating geometry...')
geometry = openmoc.Geometry()
geometry.setRootUniverse(root_universe)
geometry.initializeFlatSourceRegions()
| 34.88
| 79
| 0.609375
|
efedaa67534f0eea2de2b39c899b2d47ab60b189
| 11,569
|
py
|
Python
|
queenbee/repository/package.py
|
AntoineDao/queenbee
|
800d5b26a69cffbce85864ea9430304b7fb8d11a
|
[
"MIT"
] | null | null | null |
queenbee/repository/package.py
|
AntoineDao/queenbee
|
800d5b26a69cffbce85864ea9430304b7fb8d11a
|
[
"MIT"
] | null | null | null |
queenbee/repository/package.py
|
AntoineDao/queenbee
|
800d5b26a69cffbce85864ea9430304b7fb8d11a
|
[
"MIT"
] | null | null | null |
import os
import re
import hashlib
from io import BytesIO
from datetime import datetime
from tarfile import TarInfo, TarFile
from typing import Union, Tuple
from pydantic import Field, constr
from ..plugin import Plugin
from ..recipe import Recipe, BakedRecipe
from ..base.request import make_request, urljoin
from ..base.metadata import MetaData
def reset_tar(tarinfo: TarInfo) -> TarInfo:
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = '0'
return tarinfo
def add_to_tar(tar: TarFile, data: bytes, filename: str):
tarinfo = TarInfo(name=filename)
tarinfo.size = len(data)
tarinfo.mtime = int(datetime.timestamp(datetime.utcnow()))
tarinfo.mode = 436
tarinfo.type = b'0'
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = '0'
tar.addfile(tarinfo, BytesIO(data))
class PackageVersion(MetaData):
"""Package Version
A MetaData object to distinguish a specific package version within a repository
index.
"""
type: constr(regex='^PackageVersion$') = 'PackageVersion'
url: str
created: datetime
digest: str
slug: str = Field(
None,
description='A slug of the repository name and the package name.'
)
kind: str = Field(
'',
description='The type of Queenbee package (ie: recipe or plugin)'
)
readme: str = Field(
None,
description='The README file string for this package'
)
manifest: Union[Recipe, Plugin] = Field(
None,
description="The package Recipe or Plugin manifest"
)
@classmethod
def from_resource(
cls,
resource: Union[Plugin, Recipe],
created: datetime = None,
include_manifest: bool = False,
):
"""Generate a Package Version from a resource
Arguments:
resource {Union[Plugin, Recipe]} -- A resource to be versioned (plugin
or recipe)
Keyword Arguments:
created {datetime} -- When version was generated (default: {None})
Raises:
ValueError: The resource is invalid
Returns:
PackageVersion -- A package version object
"""
package_path = f'{resource.metadata.name}-{resource.metadata.tag}.tgz'
if created is None:
created = datetime.utcnow()
input_dict = resource.metadata.to_dict()
input_dict['type'] = 'PackageVersion'
input_dict['digest'] = resource.__hash__
input_dict['created'] = created
input_dict['url'] = package_path
if isinstance(resource, Plugin):
input_dict['kind'] = 'plugin'
elif isinstance(resource, Recipe):
input_dict['kind'] = 'recipe'
if include_manifest:
input_dict['manifest'] = resource.to_dict()
return cls.parse_obj(input_dict)
@classmethod
def pack_tar(cls,
resource: Union[Plugin, Recipe],
readme: str = None,
include_manifest: bool = False,
) -> Tuple['PackageVersion', BytesIO]:
"""Package a resource into a gzipped tar archive
Arguments:
resource {Union[Plugin, Recipe]} -- A resource to be packaged (plugin or
recipe)
Keyword Arguments:
readme {str} -- resource README.md file text if it exists
(default: {None})
Raises:
ValueError: Failed to create the package
Returns:
PackageVersion -- A package version object
BytesIO -- A BytesIO stream of the gzipped tar file
"""
file_object = BytesIO()
resource_version = cls.from_resource(resource)
tar = TarFile.open(
name=resource_version.url,
mode='w:gz',
fileobj=file_object,
)
resource_bytes = bytes(resource.json(
by_alias=True, exclude_unset=False), 'utf-8')
resource_version_bytes = bytes(resource_version.json(
by_alias=True, exclude_unset=False), 'utf-8')
add_to_tar(
tar=tar,
data=resource_bytes,
filename='resource.json'
)
add_to_tar(
tar=tar,
data=resource_version_bytes,
filename='version.json'
)
if readme is not None:
add_to_tar(
tar=tar,
data=bytes(readme, 'utf-8'),
filename='README.md'
)
tar.close()
resource_version.readme = readme
if include_manifest:
resource_version.manifest = resource
return resource_version, file_object
@classmethod
def unpack_tar(
cls,
tar_file: BytesIO,
verify_digest: bool = True,
digest: str = None
) -> 'PackageVersion':
tar = TarFile.open(fileobj=tar_file)
manifest_bytes = None
version = None
readme_string = None
read_digest = None
for member in tar.getmembers():
if member.name == 'resource.json':
manifest_bytes = tar.extractfile(member).read()
read_digest = hashlib.sha256(manifest_bytes).hexdigest()
if verify_digest:
read_digest == digest, \
ValueError(
f'Hash of resource.json file is different from the one'
f' expected from the index Expected {digest} but got'
f' {read_digest}'
)
elif member.name == 'version.json':
version = cls.parse_raw(tar.extractfile(member).read())
elif member.name == 'README.md':
readme_string = tar.extractfile(member).read().decode('utf-8')
if manifest_bytes is None:
raise ValueError(
'package tar file did not contain a resource.json file so could not be'
' decoded.'
)
try:
manifest = Plugin.parse_raw(manifest_bytes)
version.kind = 'plugin'
except Exception as error:
try:
manifest = Recipe.parse_raw(manifest_bytes)
version.kind = 'recipe'
except Exception as error:
raise ValueError(
'Package resource.json could not be read as a Recipe or a plugin')
version.manifest = manifest
version.readme = readme_string
version.digest = read_digest
return version
@classmethod
def from_package(cls, package_path: str):
"""Generate a package version from a packaged resource
Arguments:
package_path {str} -- Path to the package
Returns:
PackageVersion -- A package version object
"""
file_path = os.path.normpath(os.path.abspath(package_path)).replace('\\', '/')
with open(file_path, 'rb') as f:
filebytes = BytesIO(f.read())
version = cls.unpack_tar(tar_file=filebytes, verify_digest=False)
return version
def fetch_package(self, source_url: str = None, verify_digest: bool = True,
auth_header: Dict[str, str] = {}) -> 'PackageVersion':
if source_url.startswith('file:'):
source_path = source_url.split('file:///')[1]
if os.path.isabs(source_path):
package_path = os.path.join(source_path, self.url)
else:
package_path = os.path.join(os.getcwd(), source_path, self.url)
return self.from_package(package_path)
package_url = urljoin(source_url, self.url)
res = make_request(url=package_url, auth_header=auth_header)
filebytes = BytesIO(res.read())
return self.unpack_tar(
tar_file=filebytes,
verify_digest=verify_digest,
digest=self.digest
)
@staticmethod
def read_readme(folder_path: str) -> str:
"""Infer the path to the readme within a folder and read it
Arguments:
folder_path {str} -- Path to the folder where a readme should be found
Returns:
str -- The found Readme text (or None if no readme is found)
"""
path_to_readme = None
readme_pattern = r'^readme\.md$'
for file in os.listdir(folder_path):
res = re.match(readme_pattern, file, re.IGNORECASE)
if res is not None:
path_to_readme = os.path.join(folder_path, file)
if path_to_readme is not None:
with open(path_to_readme, 'r') as f:
return f.read()
@classmethod
def package_resource(cls,
resource: Union[Plugin, Recipe],
check_deps: bool = True,
readme: str = None
) -> Tuple['PackageVersion', BytesIO]:
"""Package a Recipe or Plugin into a gzipped tar file
Arguments:
resource {Union[Plugin, Recipe]} -- A plugin or recipe
Keyword Arguments:
readme {str} -- resource README.md file text if it exists
(default: {None})
Returns:
PackageVersion -- A plugin or recipe version object
BytesIO -- A BytesIO stream of the gzipped tar file
"""
if check_deps and isinstance(resource, Recipe):
BakedRecipe.from_recipe(resource)
return cls.pack_tar(
resource=resource,
readme=readme
)
@classmethod
def package_folder(
cls,
resource_type: str,
folder_path: str,
check_deps: bool = True,
) -> Tuple['PackageVersion', BytesIO]:
"""Package a plugin or Recipe from its folder into a gzipped tar file
Arguments:
folder_path {str} -- Path to the folder where the Plugin or Recipe is defined
Keyword Arguments:
check_deps {bool} -- Fetch the dependencies from their source and validate
the recipe by baking it (default: {True})
Returns:
PackageVersion -- A recipe or plugin version object
BytesIO -- A BytesIO stream of the gzipped tar file
"""
if resource_type == 'recipe':
resource = Recipe.from_folder(folder_path=folder_path)
elif resource_type == 'plugin':
resource = Plugin.from_folder(folder_path=folder_path)
else:
raise ValueError(
f'resource_type must be one of ["recipe", "plugin"], not: {resource_type}')
return cls.package_resource(
resource=resource,
check_deps=check_deps,
readme=cls.read_readme(folder_path)
)
def search_match(self, search_string: str = None) -> bool:
"""Return a boolean indicating whether the search string matches the given package
If no search string is specified this function will return True.
Args:
search_string (str, optional): The search string to use. Defaults to None.
Returns:
bool: Whether the search string matches the package or not
"""
if search_string is None:
return True
search_string = search_string.lower()
if search_string in self.name:
return True
if self.keywords is not None and search_string in self.keywords:
return True
return False
| 30.049351
| 91
| 0.579825
|
86ca9dadf359b2fe4f7d075b2721f428668a7b2f
| 16,029
|
py
|
Python
|
autotest/utilities/test_gdal_contour.py
|
chambbj/gdal
|
3d56aecb5b8e9890dae8f560acd099992e707d12
|
[
"MIT"
] | 1
|
2015-02-16T16:51:38.000Z
|
2015-02-16T16:51:38.000Z
|
autotest/utilities/test_gdal_contour.py
|
theduckylittle/gdal
|
61be261cae524582ba28bceebb027cc1e967e0ab
|
[
"MIT"
] | null | null | null |
autotest/utilities/test_gdal_contour.py
|
theduckylittle/gdal
|
61be261cae524582ba28bceebb027cc1e967e0ab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: gdal_contour testing
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2009, Even Rouault <even dot rouault @ mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import os
sys.path.append( '../pymod' )
from osgeo import gdal
from osgeo import ogr
import gdaltest
import ogrtest
import test_cli_utilities
import array
###############################################################################
# Test with -a and -i options
def test_gdal_contour_1():
if test_cli_utilities.get_gdal_contour_path() is None:
return 'skip'
try:
os.remove('tmp/contour.shp')
except:
pass
try:
os.remove('tmp/contour.dbf')
except:
pass
try:
os.remove('tmp/contour.shx')
except:
pass
drv = gdal.GetDriverByName('GTiff')
wkt = 'GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9108\"]],AUTHORITY[\"EPSG\",\"4326\"]]'
size = 160
precision = 1. / size
ds = drv.Create('tmp/gdal_contour.tif', size, size, 1)
ds.SetProjection( wkt )
ds.SetGeoTransform( [ 1, precision, 0, 50, 0, -precision ] )
raw_data = array.array('h',[10 for i in range(int(size/2))]).tostring()
for i in range(int(size/2)):
ds.WriteRaster( int(size/4), i+int(size/4), int(size/2), 1, raw_data,
buf_type = gdal.GDT_Int16,
band_list = [1] )
raw_data = array.array('h',[20 for i in range(int(size/2))]).tostring()
for i in range(int(size/4)):
ds.WriteRaster( int(size/4)+int(size/8), i+int(size/4)+int(size/8), int(size/4), 1, raw_data,
buf_type = gdal.GDT_Int16,
band_list = [1] )
raw_data = array.array('h',[25 for i in range(int(size/4))]).tostring()
for i in range(int(size/8)):
ds.WriteRaster( int(size/4)+int(size/8)+int(size/16), i+int(size/4)+int(size/8)+int(size/16), int(size/8), 1, raw_data,
buf_type = gdal.GDT_Int16,
band_list = [1] )
ds = None
(out, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdal_contour_path() + ' -a elev -i 10 tmp/gdal_contour.tif tmp/contour.shp')
if not (err is None or err == '') :
gdaltest.post_reason('got error/warning')
print(err)
return 'fail'
ds = ogr.Open('tmp/contour.shp')
expected_envelopes = [ [ 1.25, 1.75, 49.25, 49.75 ],
[ 1.25+0.125, 1.75-0.125, 49.25+0.125, 49.75-0.125 ] ]
expected_height = [ 10, 20 ]
lyr = ds.ExecuteSQL("select * from contour order by elev asc")
if lyr.GetSpatialRef().ExportToWkt().find('GCS_WGS_1984') == -1:
print('Did not get expected spatial ref')
return 'fail'
if lyr.GetFeatureCount() != len(expected_envelopes):
print('Got %d features. Expected %d' % (lyr.GetFeatureCount(), len(expected_envelopes)))
return 'fail'
i = 0
feat = lyr.GetNextFeature()
while feat is not None:
envelope = feat.GetGeometryRef().GetEnvelope()
if feat.GetField('elev') != expected_height[i]:
print('Got %f. Expected %f' % (feat.GetField('elev'), expected_height[i]))
return 'fail'
for j in range(4):
if abs(expected_envelopes[i][j] - envelope[j]) > precision/2*1.001:
print('i=%d, wkt=%s' % (i, feat.GetGeometryRef().ExportToWkt()))
print(feat.GetGeometryRef().GetEnvelope())
print(expected_envelopes[i])
print('%f, %f' % (expected_envelopes[i][j] - envelope[j], precision / 2))
return 'fail'
i = i + 1
feat = lyr.GetNextFeature()
ds.ReleaseResultSet(lyr)
ds.Destroy()
return 'success'
###############################################################################
# Test with -fl option and -3d option
def test_gdal_contour_2():
if test_cli_utilities.get_gdal_contour_path() is None:
return 'skip'
try:
os.remove('tmp/contour.shp')
except:
pass
try:
os.remove('tmp/contour.dbf')
except:
pass
try:
os.remove('tmp/contour.shx')
except:
pass
# put -3d just after -fl to test #2793
gdaltest.runexternal(test_cli_utilities.get_gdal_contour_path() + ' -a elev -fl 10 20 25 -3d tmp/gdal_contour.tif tmp/contour.shp')
size = 160
precision = 1. / size
ds = ogr.Open('tmp/contour.shp')
expected_envelopes = [ [ 1.25, 1.75, 49.25, 49.75 ],
[ 1.25+0.125, 1.75-0.125, 49.25+0.125, 49.75-0.125 ],
[ 1.25+0.125+0.0625, 1.75-0.125-0.0625, 49.25+0.125+0.0625, 49.75-0.125-0.0625 ] ]
expected_height = [ 10, 20, 25 ]
lyr = ds.ExecuteSQL("select * from contour order by elev asc")
if lyr.GetFeatureCount() != len(expected_envelopes):
print('Got %d features. Expected %d' % (lyr.GetFeatureCount(), len(expected_envelopes)))
return 'fail'
i = 0
feat = lyr.GetNextFeature()
while feat is not None:
if feat.GetGeometryRef().GetZ(0) != expected_height[i]:
print('Got %f as z. Expected %f' % (feat.GetGeometryRef().GetZ(0), expected_height[i]))
return 'fail'
envelope = feat.GetGeometryRef().GetEnvelope()
if feat.GetField('elev') != expected_height[i]:
print('Got %f. Expected %f' % (feat.GetField('elev'), expected_height[i]))
return 'fail'
for j in range(4):
if abs(expected_envelopes[i][j] - envelope[j]) > precision/2*1.001:
print('i=%d, wkt=%s' % (i, feat.GetGeometryRef().ExportToWkt()))
print(feat.GetGeometryRef().GetEnvelope())
print(expected_envelopes[i])
print('%f, %f' % (expected_envelopes[i][j] - envelope[j], precision / 2))
return 'fail'
i = i + 1
feat = lyr.GetNextFeature()
ds.ReleaseResultSet(lyr)
ds.Destroy()
return 'success'
###############################################################################
# Test on a real DEM
def test_gdal_contour_3():
if test_cli_utilities.get_gdal_contour_path() is None:
return 'skip'
try:
os.remove('tmp/contour.shp')
except:
pass
try:
os.remove('tmp/contour.dbf')
except:
pass
try:
os.remove('tmp/contour.shx')
except:
pass
# put -3d just after -fl to test #2793
gdaltest.runexternal(test_cli_utilities.get_gdal_contour_path() + ' -a elev -i 50 ../gdrivers/data/n43.dt0 tmp/contour.shp')
ds = ogr.Open('tmp/contour.shp')
lyr = ds.ExecuteSQL("select distinct elev from contour order by elev asc")
expected_heights = [ 100, 150, 200, 250, 300, 350, 400, 450 ]
if lyr.GetFeatureCount() != len(expected_heights):
print('Got %d features. Expected %d' % (lyr.GetFeatureCount(), len(expected_heights)))
return 'fail'
i = 0
feat = lyr.GetNextFeature()
while feat is not None:
if feat.GetField('elev') != expected_heights[i]:
return 'fail'
i = i + 1
feat = lyr.GetNextFeature()
ds.ReleaseResultSet(lyr)
ds.Destroy()
return 'success'
###############################################################################
# Test contour orientation
def test_gdal_contour_4():
if test_cli_utilities.get_gdal_contour_path() is None:
return 'skip'
try:
os.remove('tmp/contour_orientation.shp')
except:
pass
try:
os.remove('tmp/contour_orientation.dbf')
except:
pass
try:
os.remove('tmp/contour_orientation.shx')
except:
pass
drv = gdal.GetDriverByName('GTiff')
wkt = 'GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9108\"]],AUTHORITY[\"EPSG\",\"4326\"]]'
size = 160
precision = 1. / size
ds = drv.Create('tmp/gdal_contour_orientation.tif', size, size, 1)
ds.SetProjection( wkt )
ds.SetGeoTransform( [ 1, precision, 0, 50, 0, -precision ] )
# Make the elevation 15 for the whole image
raw_data = array.array('h',[15 for i in range(int(size))]).tostring()
for i in range(int(size)):
ds.WriteRaster( 0, i, int(size), 1, raw_data,
buf_type = gdal.GDT_Int16,
band_list = [1] )
# Create a hill with elevation 25
raw_data = array.array('h',[25 for i in range(2)]).tostring()
for i in range(2):
ds.WriteRaster( int(size/4)+int(size/8)-1, i+int(size/2)-1, 2, 1, raw_data,
buf_type = gdal.GDT_Int16,
band_list = [1] )
# Create a depression with elevation 5
raw_data = array.array('h',[5 for i in range(2)]).tostring()
for i in range(2):
ds.WriteRaster( int(size/2)+int(size/8)-1, i+int(size/2)-1, 2, 1, raw_data,
buf_type = gdal.GDT_Int16,
band_list = [1] )
ds = None
gdaltest.runexternal(test_cli_utilities.get_gdal_contour_path() + ' -a elev -i 10 tmp/gdal_contour_orientation.tif tmp/contour_orientation1.shp')
ds = ogr.Open('tmp/contour_orientation1.shp')
expected_contours = [ 'LINESTRING (1.621875 49.493749999999999,'+
'1.628125 49.493749999999999,'+
'1.63125 49.496875000000003,'+
'1.63125 49.503124999999997,'+
'1.628125 49.50625,'+
'1.621875 49.50625,'+
'1.61875 49.503124999999997,'+
'1.61875 49.496875000000003,'+
'1.621875 49.493749999999999)',
'LINESTRING (1.371875 49.493749999999999,'+
'1.36875 49.496875000000003,'+
'1.36875 49.503124999999997,'+
'1.371875 49.50625,'+
'1.378125 49.50625,'+
'1.38125 49.503124999999997,'+
'1.38125 49.496875000000003,'+
'1.378125 49.493749999999999,'+
'1.371875 49.493749999999999)' ]
expected_elev = [ 10, 20 ]
lyr = ds.ExecuteSQL("select * from contour_orientation1 order by elev asc")
if lyr.GetFeatureCount() != len(expected_contours):
print('Got %d features. Expected %d' % (lyr.GetFeatureCount(), len(expected_contours)))
return 'fail'
i = 0
test_failed = False
feat = lyr.GetNextFeature()
while feat is not None:
expected_geom = ogr.CreateGeometryFromWkt(expected_contours[i])
if feat.GetField('elev') != expected_elev[i]:
print('Got %f. Expected %f' % (feat.GetField('elev'), expected_elev[i]))
return 'fail'
if ogrtest.check_feature_geometry(feat, expected_geom) != 0:
print('Got %s.\nExpected %s' % (feat.GetGeometryRef().ExportToWkt(),expected_contours[i]))
test_failed = True
i = i + 1
feat = lyr.GetNextFeature()
ds.ReleaseResultSet(lyr)
ds.Destroy()
if test_failed:
return 'fail'
else:
return 'success'
###############################################################################
# Test contour orientation
def test_gdal_contour_5():
if test_cli_utilities.get_gdal_contour_path() is None:
return 'skip'
ds = None
gdaltest.runexternal(test_cli_utilities.get_gdal_contour_path() + ' -a elev -i 10 data/contour_orientation.tif tmp/contour_orientation2.shp')
ds = ogr.Open('tmp/contour_orientation2.shp')
expected_contours = [ 'LINESTRING (0 2,'+
'0.5 2.0,'+
'1.5 2.0,'+
'1.954542932445554 2.5,'+
'2.124997615823304 3.5,'+
'1.5 3.954546085074803,'+
'0.5 4.066665649414062,'+
'0.0 4.066665649414062)' ]
expected_elev = [ 140 ]
lyr = ds.ExecuteSQL("select * from contour_orientation2 order by elev asc")
if lyr.GetFeatureCount() != len(expected_contours):
print('Got %d features. Expected %d' % (lyr.GetFeatureCount(), len(expected_contours)))
return 'fail'
i = 0
test_failed = False
feat = lyr.GetNextFeature()
while feat is not None:
expected_geom = ogr.CreateGeometryFromWkt(expected_contours[i])
if feat.GetField('elev') != expected_elev[i]:
print('Got %f. Expected %f' % (feat.GetField('elev'), expected_elev[i]))
return 'fail'
if ogrtest.check_feature_geometry(feat, expected_geom) != 0:
print('Got %s.\nExpected %s' % (feat.GetGeometryRef().ExportToWkt(),expected_contours[i]))
test_failed = True
i = i + 1
feat = lyr.GetNextFeature()
ds.ReleaseResultSet(lyr)
ds.Destroy()
if test_failed:
return 'fail'
else:
return 'success'
###############################################################################
# Cleanup
def test_gdal_contour_cleanup():
if test_cli_utilities.get_gdal_contour_path() is None:
return 'skip'
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/contour.shp')
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/contour_orientation1.shp')
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/contour_orientation2.shp')
try:
os.remove('tmp/gdal_contour.tif')
os.remove('tmp/gdal_contour_orientation.tif')
except:
pass
return 'success'
gdaltest_list = [
test_gdal_contour_1,
test_gdal_contour_2,
test_gdal_contour_3,
test_gdal_contour_4,
test_gdal_contour_5,
test_gdal_contour_cleanup
]
if __name__ == '__main__':
gdaltest.setup_run( 'test_gdal_contour' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 36.346939
| 298
| 0.560671
|
098863f81a077da1881631e071b5bb3bf0695592
| 7,300
|
py
|
Python
|
mastodon/streaming.py
|
TheoLeCalvar/Mastodon.py
|
f352142dfd39cf00fbf8a0b0257f1169daad5947
|
[
"MIT"
] | 1
|
2019-09-07T17:39:08.000Z
|
2019-09-07T17:39:08.000Z
|
mastodon/streaming.py
|
TheoLeCalvar/Mastodon.py
|
f352142dfd39cf00fbf8a0b0257f1169daad5947
|
[
"MIT"
] | 1
|
2019-09-24T21:14:32.000Z
|
2019-09-24T21:14:32.000Z
|
mastodon/streaming.py
|
TheoLeCalvar/Mastodon.py
|
f352142dfd39cf00fbf8a0b0257f1169daad5947
|
[
"MIT"
] | null | null | null |
"""
Handlers for the Streaming API:
https://github.com/tootsuite/mastodon/blob/master/docs/Using-the-API/Streaming-API.md
"""
import json
import six
from mastodon import Mastodon
from mastodon.Mastodon import MastodonMalformedEventError, MastodonNetworkError, MastodonReadTimeout
from requests.exceptions import ChunkedEncodingError, ReadTimeout
class StreamListener(object):
"""Callbacks for the streaming API. Create a subclass, override the on_xxx
methods for the kinds of events you're interested in, then pass an instance
of your subclass to Mastodon.user_stream(), Mastodon.public_stream(), or
Mastodon.hashtag_stream()."""
def on_update(self, status):
"""A new status has appeared! 'status' is the parsed JSON dictionary
describing the status."""
pass
def on_notification(self, notification):
"""A new notification. 'notification' is the parsed JSON dictionary
describing the notification."""
pass
def on_abort(self, err):
"""There was a connection error, read timeout or other error fatal to
the streaming connection. The exception object about to be raised
is passed to this function for reference.
Note that the exception will be raised properly once you return from this
function, so if you are using this handler to reconnect, either never
return or start a thread and then catch and ignore the exception.
"""
pass
def on_delete(self, status_id):
"""A status has been deleted. status_id is the status' integer ID."""
pass
def on_conversation(self, conversation):
"""A direct message (in the direct stream) has been received. conversation
contains the resulting conversation dict."""
pass
def handle_heartbeat(self):
"""The server has sent us a keep-alive message. This callback may be
useful to carry out periodic housekeeping tasks, or just to confirm
that the connection is still open."""
pass
def handle_stream(self, response):
"""
Handles a stream of events from the Mastodon server. When each event
is received, the corresponding .on_[name]() method is called.
response; a requests response object with the open stream for reading.
"""
event = {}
line_buffer = bytearray()
try:
for chunk in response.iter_content(chunk_size = 1):
if chunk:
for chunk_part in chunk:
chunk_part = bytearray([chunk_part])
if chunk_part == b'\n':
try:
line = line_buffer.decode('utf-8')
except UnicodeDecodeError as err:
exception = MastodonMalformedEventError("Malformed UTF-8")
self.on_abort(exception)
six.raise_from(
exception,
err
)
if line == '':
self._dispatch(event)
event = {}
else:
event = self._parse_line(line, event)
line_buffer = bytearray()
else:
line_buffer.extend(chunk_part)
except ChunkedEncodingError as err:
exception = MastodonNetworkError("Server ceased communication.")
self.on_abort(exception)
six.raise_from(
exception,
err
)
except MastodonReadTimeout as err:
exception = MastodonReadTimeout("Timed out while reading from server."),
self.on_abort(exception)
six.raise_from(
exception,
err
)
def _parse_line(self, line, event):
if line.startswith(':'):
self.handle_heartbeat()
else:
try:
key, value = line.split(': ', 1)
except:
exception = MastodonMalformedEventError("Malformed event.")
self.on_abort(exception)
raise exception
# According to the MDN spec, repeating the 'data' key
# represents a newline(!)
if key in event:
event[key] += '\n' + value
else:
event[key] = value
return event
def _dispatch(self, event):
try:
name = event['event']
data = event['data']
payload = json.loads(data, object_hook = Mastodon._Mastodon__json_hooks)
except KeyError as err:
exception = MastodonMalformedEventError('Missing field', err.args[0], event)
self.on_abort(exception)
six.raise_from(
exception,
err
)
except ValueError as err:
# py2: plain ValueError
# py3: json.JSONDecodeError, a subclass of ValueError
exception = MastodonMalformedEventError('Bad JSON', data)
self.on_abort(exception)
six.raise_from(
exception,
err
)
handler_name = 'on_' + name
try:
handler = getattr(self, handler_name)
except AttributeError as err:
exception = MastodonMalformedEventError('Bad event type', name)
self.on_abort(exception)
six.raise_from(
exception,
err
)
else:
handler(payload)
class CallbackStreamListener(StreamListener):
"""
Simple callback stream handler class.
Can optionally additionally send local update events to a separate handler.
"""
def __init__(self, update_handler = None, local_update_handler = None, delete_handler = None, notification_handler = None, conversation_handler = None):
super(CallbackStreamListener, self).__init__()
self.update_handler = update_handler
self.local_update_handler = local_update_handler
self.delete_handler = delete_handler
self.notification_handler = notification_handler
def on_update(self, status):
if self.update_handler != None:
self.update_handler(status)
try:
if self.local_update_handler != None and not "@" in status["account"]["acct"]:
self.local_update_handler(status)
except Exception as err:
six.raise_from(
MastodonMalformedEventError('received bad update', status),
err
)
def on_delete(self, deleted_id):
if self.delete_handler != None:
self.delete_handler(deleted_id)
def on_notification(self, notification):
if self.notification_handler != None:
self.notification_handler(notification)
def on_conversation(self, conversation):
if self.conversation_handler != None:
self.conversation_handler(conversation)
| 38.421053
| 156
| 0.570685
|
fb4c7c13695cf33b5d774127eb3d8f79c315d094
| 3,825
|
py
|
Python
|
ceda_cc/unitTestsS2.py
|
markelg/ceda-cc
|
47fa271c04c6fe4dcb10c04bdb389b19a56083f5
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
ceda_cc/unitTestsS2.py
|
markelg/ceda-cc
|
47fa271c04c6fe4dcb10c04bdb389b19a56083f5
|
[
"BSD-3-Clause-Clear"
] | 1
|
2019-07-26T14:45:05.000Z
|
2019-07-26T14:45:05.000Z
|
ceda_cc/unitTestsS2.py
|
agstephens/ceda-cc
|
c364d57562fd47a1a1fae8931bcce970975bc3cb
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import logging, time, os, sys
import utils_c4
import ceda_cc_config.config_c4 as config
from c4_run import fileMetadata, dummy, main
from xceptions import *
from file_utils import installedSupportedNetcdf
#### set up log file ####
tstring2 = '%4.4i%2.2i%2.2i' % time.gmtime()[0:3]
testLogFile = '%s__qclog_%s.txt' % ('unitTestsS2',tstring2)
log = logging.getLogger(testLogFile)
fHdlr = logging.FileHandler(testLogFile,mode='w')
fileFormatter = logging.Formatter('%(message)s')
fHdlr.setFormatter(fileFormatter)
log.addHandler(fHdlr)
log.setLevel(logging.INFO)
try:
fmd = fileMetadata(dummy=True)
fmd.loadNc( '/dummyPath/v1_day_a_b_1990-1991.nc')
except:
print 'Failed to parse a simple dummy file path'
raise baseException( 'Failed to parse a simple dummy file path' )
print 'OK: instantiated fileMetaData and parsed a simple dummy path'
p = dummy()
p.log = log
p.abortMessageCount = -1
p.pcfg = config.projectConfig( "__dummy" )
module = 'checkFileName'
c = utils_c4.checkFileName(parent=p)
fn = 'v1_t1_a_b_20060101-20101231.nc'
testId = '#10.001'
c.check( fn )
if c.errorCount == 0:
print 'OK: [%s] %s: valid file name with project=__dummy' % (module,fn)
else:
print 'Failed [%s] %s: valid file name' % (module,fn)
if sys.version_info >= (2,7):
## monitoting file handles uses a "subprocess" method which is not available in python 2.6
testId = '#11.001'
try:
m = main( args=['-p', '__dummy'], monitorFileHandles=True )
print 'OK: [%s]: dummy run completed without exception' % testId
except:
print 'Failed [%s]: dummy run triggered exception' % testId
raise
raise baseException( 'Failed [%s]: dummy run triggered exception' % testId )
testId = '#11.002'
if m.monitor.fhCountMax < 15:
print 'OK: [%s]: fhCountMax = %s' % ( testId, m.monitor.fhCountMax )
else:
print 'Failed [%s]: fhCountMax = %s' % ( testId, m.monitor.fhCountMax )
testId = '#11.003'
try:
m = main( args=['-p', '__dummy'], abortMessageCount=10 )
print 'Failed [%s]: did not trigger exception' % testId
except:
print 'OK: [%s]: attempt to trigger exception successful' % testId
extras = [
( '/data/work/cmip5/output1/pr_20110323/pr_3hr_HadGEM2-ES_historical_r2i1p1_200501010130-200512302230.nc', 'CMIP5', 0 ),
('/data/work/cmip5/output1/pr_20110323/pr_3hr_HadGEM2-ES_historical_r2i1p1_200001010130-200412302230.nc', 'CMIP5', 0 ) ]
kt = 0
for e in extras:
kt += 1
if os.path.isfile( e[0] ):
if 'cdms2' in installedSupportedNetcdf:
testId = '#20.%3.3i' % kt
m = main( args=['-p', e[1], '-f', e[0], '--force-cdms2','--ld', 'ld_test1' ], abortMessageCount=10 )
if m.ok:
print 'OK: [%s]: successfully checked test file with cdms2' % testId
else:
print 'Failed [%s]: incorrect test results' % testId
testId = '#21.%3.3i' % kt
m = main( args=['-p', e[1], '-f', e[0], '--force-ncq','--ld', 'ld_test2' ], abortMessageCount=10 )
if m.ok:
print 'OK: [%s]: successfully checked test file with ncq3' % testId
else:
print 'Failed [%s]: incorrect test results' % testId
if 'netCDF4' in installedSupportedNetcdf:
testId = '#22.%3.3i' % kt
m = main( args=['-p', e[1], '-f', e[0], '--force-pync4','--ld', 'ld_test3' ], abortMessageCount=10 )
if m.ok:
print 'OK: [%s]: successfully checked test file with python NetCDF4' % testId
else:
print 'Failed [%s]: incorrect test results' % testId
if 'Scientific' in installedSupportedNetcdf:
testId = '#23.%3.3i' % kt
m = main( args=['-p', e[1], '-f', e[0], '--force-scientific','--ld', 'ld_test4' ], abortMessageCount=10 )
if m.ok:
print 'OK: [%s]: successfully checked test file with python Scientific' % testId
else:
print 'Failed [%s]: incorrect test results' % testId
| 34.772727
| 120
| 0.654379
|
74853668e78a54577f2e42f18f05a1e22326e1df
| 583
|
py
|
Python
|
scieio/spectrometry/migrations/0009_auto_20200419_0359.py
|
arnelimperial/scieio
|
279a25766f20d074a3df824c0fbc8b2d8e35f272
|
[
"MIT"
] | null | null | null |
scieio/spectrometry/migrations/0009_auto_20200419_0359.py
|
arnelimperial/scieio
|
279a25766f20d074a3df824c0fbc8b2d8e35f272
|
[
"MIT"
] | 8
|
2021-03-19T01:56:44.000Z
|
2022-03-12T00:24:21.000Z
|
scieio/spectrometry/migrations/0009_auto_20200419_0359.py
|
arnelimperial/scieio
|
279a25766f20d074a3df824c0fbc8b2d8e35f272
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-19 00:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spectrometry', '0008_auto_20200419_0304'),
]
operations = [
migrations.AlterField(
model_name='gasms',
name='product_code',
field=models.CharField(max_length=15, unique=True),
),
migrations.AlterField(
model_name='liquidms',
name='product_code',
field=models.CharField(max_length=15, unique=True),
),
]
| 24.291667
| 63
| 0.595197
|
783f13e0173ab1561809d43cbe35bf1fdf6966e4
| 4,315
|
py
|
Python
|
edgelm/examples/textless_nlp/gslm/tools/resynthesize_speech.py
|
guotao0628/DeepNet
|
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
|
[
"MIT"
] | 1
|
2021-11-07T00:30:05.000Z
|
2021-11-07T00:30:05.000Z
|
edgelm/examples/textless_nlp/gslm/tools/resynthesize_speech.py
|
guotao0628/DeepNet
|
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
|
[
"MIT"
] | null | null | null |
edgelm/examples/textless_nlp/gslm/tools/resynthesize_speech.py
|
guotao0628/DeepNet
|
1ae74d8b44d715bf67c7d64a8efafff4b7c7937a
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import logging
import os
import joblib
import soundfile as sf
import torch
from examples.textless_nlp.gslm.speech2unit.pretrained.utils import get_feature_reader
from examples.textless_nlp.gslm.unit2speech.tts_data import TacotronInputDataset
from examples.textless_nlp.gslm.unit2speech.utils import (
load_tacotron,
load_waveglow,
synthesize_audio,
)
def get_logger():
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
def get_parser():
parser = argparse.ArgumentParser(description="GSLM U2S tool")
parser.add_argument(
"--feature_type",
type=str,
choices=["logmel", "hubert", "w2v2", "cpc"],
default=None,
required=True,
help="Acoustic feature type",
)
parser.add_argument(
"--acoustic_model_path",
type=str,
help="Pretrained acoustic model checkpoint",
)
parser.add_argument("--layer", type=int, help="Layer of acoustic model")
parser.add_argument(
"--kmeans_model_path",
type=str,
required=True,
help="K-means model file path to use for inference",
)
parser.add_argument(
"--tts_model_path",
type=str,
help="TTS model file path to use for inference",
)
parser.add_argument(
"--code_dict_path",
type=str,
help="Code dict file path to use for inference",
)
parser.add_argument(
"--waveglow_path",
type=str,
help="Waveglow (vocoder) model file path to use for inference",
)
parser.add_argument("--max_decoder_steps", type=int, default=2000)
parser.add_argument("--denoiser_strength", type=float, default=0.1)
return parser
################################################
def main(args, logger):
# Acoustic Model
logger.info(f"Loading acoustic model from {args.tts_model_path}...")
feature_reader_cls = get_feature_reader(args.feature_type)
reader = feature_reader_cls(
checkpoint_path=args.acoustic_model_path, layer=args.layer
)
# K-means Model
logger.info(f"Loading K-means model from {args.kmeans_model_path} ...")
kmeans_model = joblib.load(open(args.kmeans_model_path, "rb"))
kmeans_model.verbose = False
# TTS Model
logger.info(f"Loading TTS model from {args.tts_model_path}...")
tacotron_model, sample_rate, hparams = load_tacotron(
tacotron_model_path=args.tts_model_path,
max_decoder_steps=args.max_decoder_steps,
)
# Waveglow Model
logger.info(f"Loading Waveglow model from {args.waveglow_path}...")
waveglow, denoiser = load_waveglow(waveglow_path=args.waveglow_path)
# Dataset
if not os.path.exists(hparams.code_dict):
hparams.code_dict = args.code_dict_path
tts_dataset = TacotronInputDataset(hparams)
iters = 0
while True:
in_file_path = input("Input: Enter the full file path of audio file...\n")
out_file_path = input("Output: Enter the full file path of audio file...\n")
feats = reader.get_feats(in_file_path).cpu().numpy()
iters += 1
if iters == 1000:
gc.collect()
torch.cuda.empty_cache()
quantized_units = kmeans_model.predict(feats)
quantized_units_str = " ".join(map(str, quantized_units))
tts_input = tts_dataset.get_tensor(quantized_units_str)
mel, aud, aud_dn, has_eos = synthesize_audio(
tacotron_model,
waveglow,
denoiser,
tts_input.unsqueeze(0),
strength=args.denoiser_strength,
)
sf.write(f"{out_file_path}", aud_dn[0].cpu().float().numpy(), sample_rate)
logger.info("Resynthesis done!\n")
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
logger = get_logger()
logger.info(args)
main(args, logger)
| 32.443609
| 87
| 0.638239
|
e412a783a8f40fa0db336b1df7cd0787e70b688a
| 2,448
|
py
|
Python
|
src/python/src/proteus/websocket.py
|
Xilinx/inference-server
|
7477b7dc420ce4cd0d7e1d9914b71898e97d6814
|
[
"Apache-2.0"
] | 4
|
2021-11-03T21:32:55.000Z
|
2022-02-17T17:13:16.000Z
|
src/python/src/proteus/websocket.py
|
Xilinx/inference-server
|
7477b7dc420ce4cd0d7e1d9914b71898e97d6814
|
[
"Apache-2.0"
] | null | null | null |
src/python/src/proteus/websocket.py
|
Xilinx/inference-server
|
7477b7dc420ce4cd0d7e1d9914b71898e97d6814
|
[
"Apache-2.0"
] | 2
|
2022-03-05T20:01:33.000Z
|
2022-03-25T06:00:35.000Z
|
# Copyright 2021 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import json
import struct
import websocket
from .predict_api import InferenceRequest
from . import exceptions
class WebsocketOpcodes(Enum):
continuation = 0
text = 1
binary = 2
close = 8
ping = 9
pong = 10
class Client:
def __init__(self, address):
self.ws_addr = "ws://" + address
self.wss_addr = "wss://" + address
self.ws = websocket.WebSocket()
def is_connected(self):
return self.ws.connected
def connect(self, endpoint):
url = self.ws_addr + endpoint
try:
self.ws.connect(url)
except websocket.WebSocketBadStatusException:
raise exceptions.ConnectionError(
f"Connecting to {url} over WS failed. Bad status returned."
)
def infer(self, request):
if not self.ws.connected:
self.connect("/models/infer")
if isinstance(request, InferenceRequest):
request = request.asdict()
self.ws.send(json.dumps(request))
def recv(self):
if self.ws.connected:
resp_opcode, msg = self.ws.recv_data()
# https://websocket-client.readthedocs.io/en/latest/examples.html#receiving-connection-close-status-codes
if resp_opcode == WebsocketOpcodes.close.value:
return int(struct.unpack("!H", msg[0:2])[0])
elif resp_opcode == WebsocketOpcodes.text.value:
return json.loads(msg)
else:
raise exceptions.BadResponseError("Unknown response type in websocket.")
else:
raise exceptions.ConnectionError(
"Recv over websocket failed. Websocket not connected."
)
def close(self):
try:
self.ws.close()
except websocket.WebSocketConnectionClosedException:
pass
| 30.987342
| 117
| 0.64134
|
0a20363a0ef22f982f1e97f54f02099ab9e4b94a
| 516
|
py
|
Python
|
virtual/lib/python3.6/site-packages/demo/demoapp/models.py
|
igihozo-stella/smart-parking
|
92c5dcd3eb08b8fccfddd34bb3291a240c563ec8
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.6/site-packages/demo/demoapp/models.py
|
igihozo-stella/smart-parking
|
92c5dcd3eb08b8fccfddd34bb3291a240c563ec8
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.6/site-packages/demo/demoapp/models.py
|
igihozo-stella/smart-parking
|
92c5dcd3eb08b8fccfddd34bb3291a240c563ec8
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _
class MyObject(models.Model):
CHOICES = (
('0', _('Meters')),
('1', _('Milimeters')),
('2', _('Centimeters')),
('3', _('Liters')),
('4', _('Mililiters')),
('5', _('Unit'))
)
name = models.CharField(max_length=200)
quantity = models.FloatField()
measurement_unit = models.CharField(max_length=2, choices=CHOICES)
def __str__(self):
return self.name
| 25.8
| 70
| 0.585271
|
d40ac2adf19626e508078dee5e40f0af1c3fcb1f
| 26,689
|
py
|
Python
|
sensortoolkit/plotting/_performance_metrics.py
|
USEPA/sensortoolkit
|
a9da32fd4df492154c6e4cc570011d14e933ee83
|
[
"MIT"
] | 2
|
2022-02-25T21:59:04.000Z
|
2022-03-01T19:37:38.000Z
|
sensortoolkit/plotting/_performance_metrics.py
|
USEPA/sensortoolkit
|
a9da32fd4df492154c6e4cc570011d14e933ee83
|
[
"MIT"
] | null | null | null |
sensortoolkit/plotting/_performance_metrics.py
|
USEPA/sensortoolkit
|
a9da32fd4df492154c6e4cc570011d14e933ee83
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This module contains a method for displaying air sensor performance using
metrics and target values recommended by U.S. EPA for the evaluation of sensors
measuring either PM2.5 or O3.
================================================================================
@Author:
| Samuel Frederick, NSSC Contractor (ORAU)
| U.S. EPA / ORD / CEMM / AMCD / SFSB
Created:
Mon Jan 27 08:49:12 2020
Last Updated:
Wed Jul 28 14:24:17 2021
"""
import os
import sys
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import seaborn as sns
from sensortoolkit.datetime_utils import get_todays_date
from sensortoolkit.param import Parameter
register_matplotlib_converters()
def performance_metrics(stats_df, deploy_dict, param=None,
param_averaging=None,font_size=12, path=None,
sensor_name=None, write_to_file=True, **kwargs):
"""Display performance metric results via a set of adjacent subplots
corresponding to each metric. Results are displayed as either dots (if
the number of sensors is less than four) or via boxplots (number of sensors
exceeds 3). Target ranges are indicated by gray shaded regions, and target
goals are indicated by dark gray lines.
Args:
stats_df (pandas dataframe):
A dataframe containing regression statistics (sensor vs. FRM/FEM
reference) at averaging intervals corresponding to the
``param_averaging`` attribute.
deploy_dict (dict):
A dictionary containing descriptive statistics and textual
information about the deployment (testing agency, site, time
period, etc.), sensors tested, and site conditions during the
evaluation.
param (str, optional):
Parameter name to evaluate. Defaults to None.
param_averaging (str, optional):
The measurement averaging intervals commonly utilized for
analyzing data corresponding the selected parameter. Defaults
to None.
font_size (int, optional):
The font size for text displayed in the figure. Defaults to 12.
path (str, optional):
The full directory path to the ``/figures`` subfolder housed
within the user's project path. Defaults to None.
sensor_name (str, optional):
The make and model of the air sensor for which the performance
evaluation figure is being generated. Defaults to None.
write_to_file (bool, optional):
If true, the figure will be written to the /figures/[param] sensor
subdirectory (where 'param' is the name of the parameter being
evaluated). Defaults to True.
**Keyword Arguments:**
:param fill_color:
Tuple with color hex code(s) for the fill color assigned to 1-hour and
24-hour metric value markers. Defaults to ``('#80c5c9', '#4ea1c0')``,
which are light and dark teal hues.
:type fill_color: Two-element tuple
:param str marker:
The shape of the plotting marker for metric values. Matplotlib maintains
a `list of markers <https://matplotlib.org/stable/api/markers_api.html?highlight=marker#module-matplotlib.markers>`_.
Defaults to 'o' (circle).
:param marker_size:
Assign the marker size in points. Defaults to 7.
:type marker_size: int or float
:param marker_border_width:
Set the width of the border surrounding each marker. Defaults to 1.
:type marker_border_width: int or float
:param str mean_marker:
(# sensors > 4 only) The marker indicating the mean value of metric
values. Defaults to ``'d'`` (diamond marker).
:param figure_width:
The width of the figure in inches. Defaults to .
:type figure_width: int or float
:param figure_height:
The height of the figure in inches. Defaults to 3.9.
:type figure_height: int or float
:param R^2_ylims:
The y-limits (ymin, ymax) for the metric subplot 15.7.
:type R^2_ylims: Two-element tuple of floats
:param Slope_ylims:
The y-limits (ymin, ymax) for the metric subplot.
:type Slope_ylims: Two-element tuple of floats
:param Intercept_ylims:
The y-limits (ymin, ymax) for the metric subplot.
:type Intercept_ylims: Two-element tuple of floats
:param CV_ylims:
The y-limits (ymin, ymax) for the metric subplot.
:type CV_ylims: Two-element tuple of floats
:param RMSE_ylims:
The y-limits (ymin, ymax) for the metric subplot.
:type RMSE_ylims: Two-element tuple of floats
:param NRMSE_ylims:
The y-limits (ymin, ymax) for the metric subplot.
:type NRMSE_ylims: Two-element tuple of floats
:param SD_ylims:
The y-limits (ymin, ymax) for the metric subplot.
:type SD_ylims: Two-element tuple of floats
:param R^2_hline_dims:
Dimensions for the target goal horizontal line. Tuple containing the
y-coordinate of the target value, x-min (leftmost) coordinate for
drawing horizontal line, and the x-max (rightmost) coordinate for
drawing horizontal line).
:type R^2_hline_dims: Three-element tuple
:param Slope_hline_dims:
Dimensions for the target goal horizontal line. Tuple containing the
y-coordinate of the target value, x-min (leftmost) coordinate for
drawing horizontal line, and the x-max (rightmost) coordinate for
drawing horizontal line).
:type Slope_hline_dims: Three-element tuple
:param Intercept_hline_dims:
Dimensions for the target goal horizontal line. Tuple containing the
y-coordinate of the target value, x-min (leftmost) coordinate for
drawing horizontal line, and the x-max (rightmost) coordinate for
drawing horizontal line).
:type Intercept_hline_dims: Three-element tuple
:param CV_hline_dims:
Dimensions for the target goal horizontal line. Tuple containing the
y-coordinate of the target value, x-min (leftmost) coordinate for
drawing horizontal line, and the x-max (rightmost) coordinate for
drawing horizontal line).
:type CV_hline_dims: Three-element tuple
:param RMSE_hline_dims:
Dimensions for the target goal horizontal line. Tuple containing the
y-coordinate of the target value, x-min (leftmost) coordinate for
drawing horizontal line, and the x-max (rightmost) coordinate for
drawing horizontal line).
:type RMSE_hline_dims: Three-element tuple
:param NRMSE_hline_dims:
Dimensions for the target goal horizontal line. Tuple containing the
y-coordinate of the target value, x-min (leftmost) coordinate for
drawing horizontal line, and the x-max (rightmost) coordinate for
drawing horizontal line).
:type NRMSE_hline_dims: Three-element tuple
:param SD_hline_dims:
Dimensions for the target goal horizontal line. Tuple containing the
y-coordinate of the target value, x-min (leftmost) coordinate for
drawing horizontal line, and the x-max (rightmost) coordinate for
drawing horizontal line).
:type SD_hline_dims: Three-element tuple
:param R^2_box_dims:
Dimensions for a box indicating the metric target range. Four element
tuple containing the x-min (left-most coordinate of the box), y-min
(bottom-most coordinate of the target range box), x-range (the height
of the box, or the difference between the x-min coordinate position
and the x-max coordinate position), and the y-range (the width of the
box, or the difference between the y-min coordinate position and the
y-max coordinate position).
:type R^2_box_dims: Four-element tuple
:param Slope_box_dims:
Dimensions for a box indicating the metric target range. Four element
tuple containing the x-min (left-most coordinate of the box), y-min
(bottom-most coordinate of the target range box), x-range (the height
of the box, or the difference between the x-min coordinate position
and the x-max coordinate position), and the y-range (the width of the
box, or the difference between the y-min coordinate position and the
y-max coordinate position).
:type Slope_box_dims: Four-element tuple
:param Intercept_box_dims:
Dimensions for a box indicating the metric target range. Four element
tuple containing the x-min (left-most coordinate of the box), y-min
(bottom-most coordinate of the target range box), x-range (the height
of the box, or the difference between the x-min coordinate position
and the x-max coordinate position), and the y-range (the width of the
box, or the difference between the y-min coordinate position and the
y-max coordinate position).
:type Intercept_box_dims: Four-element tuple
:param CV_box_dims:
Dimensions for a box indicating the metric target range. Four element
tuple containing the x-min (left-most coordinate of the box), y-min
(bottom-most coordinate of the target range box), x-range (the height
of the box, or the difference between the x-min coordinate position
and the x-max coordinate position), and the y-range (the width of the
box, or the difference between the y-min coordinate position and the
y-max coordinate position).
:type CV_box_dims: Four-element tuple
:param RMSE_box_dims:
Dimensions for a box indicating the metric target range. Four element
tuple containing the x-min (left-most coordinate of the box), y-min
(bottom-most coordinate of the target range box), x-range (the height
of the box, or the difference between the x-min coordinate position
and the x-max coordinate position), and the y-range (the width of the
box, or the difference between the y-min coordinate position and the
y-max coordinate position).
:type RMSE_box_dims: Four-element tuple
:param NRMSE_box_dims:
Dimensions for a box indicating the metric target range. Four element
tuple containing the x-min (left-most coordinate of the box), y-min
(bottom-most coordinate of the target range box), x-range (the height
of the box, or the difference between the x-min coordinate position
and the x-max coordinate position), and the y-range (the width of the
box, or the difference between the y-min coordinate position and the
y-max coordinate position).
:type NRMSE_box_dims: Four-element tuple
:param SD_box_dims:
Dimensions for a box indicating the metric target range. Four element
tuple containing the x-min (left-most coordinate of the box), y-min
(bottom-most coordinate of the target range box), x-range (the height
of the box, or the difference between the x-min coordinate position
and the x-max coordinate position), and the y-range (the width of the
box, or the difference between the y-min coordinate position and the
y-max coordinate position).
:type SD_box_dims: Four-element tuple
:param str hline_color:
Set the color of horizontal lines indicating the target value for metric
subplots. Defaults to #8b8b8b (light gray).
:param str box_facecolor:
Set the color of boxes indicating the target range for each metric
subplot. Defaults to #8b8b8b (light gray).
:param font_scale:
Set the size and scale of the font elements in the figure.
:type font_scale: int or str
:param float fig_wspace:
Modify the width of padding between subplots. Passed to
``Matplotlib.subplots.subplots_adjust()`` ``'wspace'`` argument.
:param float fig_hspace:
Modify the height of padding between subplots. Passed to
``Matplotlib.subplots.subplots_adjust()`` ``'hspace'`` argument.
:param float fig_left:
Modify the left-most bounds of the figure. Passed to
``Matplotlib.subplots.subplots_adjust()`` ``'left'`` argument.
:param float fig_right:
Modify the right-most bounds of the figure. Passed to
``Matplotlib.subplots.subplots_adjust()`` ``'right'`` argument.
:param float fig_top:
Modify the upper-most bounds of the figure. Passed to
``Matplotlib.subplots.subplots_adjust()`` ``'top'`` argument.
:param float fig_bottom:
Modify the lower-most bounds of the figure. Passed to
``Matplotlib.subplots.subplots_adjust()`` ``'bottom'`` argument.
Returns:
None.
"""
sns.set_style('darkgrid')
param_obj = Parameter(param)
targets = param_obj.PerformanceTargets.get_all_metrics()
if any(metric_info == {} for metric_info in targets.values()):
sys.exit('Performance metrics and target values not set for ' + param)
if len(param_obj.averaging) == 2:
PLOT_XMIN = -0.50
PLOT_XMAX = 1.5
if len(param_obj.averaging) == 1:
PLOT_XMIN = -1
PLOT_XMAX = 1
PLOT_XRANGE = PLOT_XMAX - PLOT_XMIN
plotting_dims = {
# Plot y-limits
# y-min, y-max
'ylims': {'R^2': (-0.02, 1.02)},
# Target goal horizontal line
# (y-coord, x-min, x-max)
'hline_dims': {'R^2': (targets['Linearity']['R^2']['goal'],
PLOT_XMIN,
PLOT_XMAX),
'Slope': (targets['Bias']['Slope']['goal'],
PLOT_XMIN,
PLOT_XMAX),
'Intercept': (targets['Bias']['Intercept']['goal'],
PLOT_XMIN,
PLOT_XMAX),
'CV': (targets['Precision']['CV']['goal'],
PLOT_XMIN,
PLOT_XMAX),
'RMSE': (targets['Error']['RMSE']['goal'],
PLOT_XMIN,
PLOT_XMAX),
'NRMSE': (targets['Error']['NRMSE']['goal'],
PLOT_XMIN,
PLOT_XMAX),
'SD': (targets['Precision']['SD']['goal'],
PLOT_XMIN,
PLOT_XMAX)},
# x-min, y-min, x-range, y-range
'box_dims': {'R^2': (PLOT_XMIN,
targets['Linearity']['R^2']['bounds'][0],
PLOT_XRANGE,
targets['Linearity']['R^2']['bounds'][1] -
targets['Linearity']['R^2']['bounds'][0]),
'Slope': (PLOT_XMIN,
targets['Bias']['Slope']['bounds'][0],
PLOT_XRANGE,
targets['Bias']['Slope']['bounds'][1] -
targets['Bias']['Slope']['bounds'][0]),
'Intercept': (PLOT_XMIN,
targets['Bias']['Intercept']['bounds'][0],
PLOT_XRANGE,
targets['Bias']['Intercept']['bounds'][1] -
targets['Bias']['Intercept']['bounds'][0]),
'CV': (PLOT_XMIN,
targets['Precision']['CV']['bounds'][0],
PLOT_XRANGE,
targets['Precision']['CV']['bounds'][1] -
targets['Precision']['CV']['bounds'][0]),
'RMSE': (PLOT_XMIN,
targets['Error']['RMSE']['bounds'][0],
PLOT_XRANGE,
targets['Error']['RMSE']['bounds'][1] -
targets['Error']['RMSE']['bounds'][0]),
'NRMSE': (PLOT_XMIN,
targets['Error']['NRMSE']['bounds'][0],
PLOT_XRANGE,
targets['Error']['NRMSE']['bounds'][1] -
targets['Error']['NRMSE']['bounds'][0]),
'SD': (PLOT_XMIN,
targets['Precision']['SD']['bounds'][0],
PLOT_XRANGE,
targets['Precision']['SD']['bounds'][1] -
targets['Precision']['SD']['bounds'][0])}
}
remove_keys = {}
for category in targets.keys():
for metric in targets[category].keys():
goal = targets[category][metric]['goal']
if goal is None:
remove_keys[category] = metric
for category in remove_keys:
metric = remove_keys[category]
targets[category].pop(metric)
plotting_dims['hline_dims'].pop(metric)
plotting_dims['box_dims'].pop(metric)
cv_vals = {interval: [] for interval in param_averaging}
std_vals = {interval: [] for interval in param_averaging}
rmse_vals = {interval: [] for interval in param_averaging}
nrmse_vals = {interval: [] for interval in param_averaging}
# Extract metric values into metric dictionaries
for group in deploy_dict['Deployment Groups']:
param_stats = deploy_dict['Deployment Groups'][group][param]
for interval in param_averaging:
cv_vals[interval].append(
param_stats['Precision']['cv_' + interval])
std_vals[interval].append(
param_stats['Precision']['std_' + interval])
rmse_vals[interval].append(
param_stats['Error']['rmse_' + interval])
nrmse_vals[interval].append(
param_stats['Error']['nrmse_' + interval])
# Boxplot fill colors
default_fill = ['#80c5c9', '#4ea1c0']
fill_color = kwargs.get('fill_colors', default_fill)
# Marker properties
marker = kwargs.get('marker', 'o')
marker_size = kwargs.get('marker_size', 7)
marker_border_width = kwargs.get('marker_border_width', 1)
mean_marker = kwargs.get('mean_marker', 'd')
# List of metrics to plot
metrics = []
for category in targets:
metrics.extend(targets[category].keys())
metric_names = ['R$^2$', 'Slope', 'Intercept', 'RMSE',
'NRMSE', 'CV', 'SD']
if 'NRMSE' in remove_keys.values():
metric_names.remove('NRMSE')
fig_width = kwargs.get('figure_width', 15.7)
fig_height = kwargs.get('figure_height', 3.9)
fig, axs = plt.subplots(1, len(metrics), figsize=(fig_width, fig_height))
n_sensors = stats_df.where((stats_df['Sensor Name'].notna()) &
(stats_df['R$^2$'].notna())
).Sensor_Number.nunique()
stats_df = stats_df[['R$^2$', 'Slope', 'Intercept', 'Averaging Interval']]
stats_df = stats_df.where(
stats_df['Averaging Interval'].isin(param_averaging))
for ax_idx, metric_name in enumerate(metric_names):
with sns.plotting_context(context="notebook", font_scale=1):
if metric_name in ['R$^2$', 'Slope', 'Intercept']:
axs[ax_idx].set_title(stats_df.columns[ax_idx],
fontsize=font_size)
if n_sensors > 3:
sns.boxplot(x='Averaging Interval', y=metric_name,
data=stats_df,
order=param_averaging,
ax=axs[ax_idx],
palette=fill_color,
showmeans=True,
meanprops={"marker": mean_marker,
"markerfacecolor": "#8b8b8b",
'markeredgecolor': '#6f6f6f'})
else:
sns.swarmplot(x='Averaging Interval', y=metric_name,
data=stats_df,
order=param_averaging,
ax=axs[ax_idx],
palette=fill_color,
marker=marker,
linewidth=marker_border_width,
size=marker_size)
else:
if metric_name == 'CV':
metric_data = cv_vals
if metric_name == 'SD':
metric_data = std_vals
if metric_name == 'RMSE':
metric_data = rmse_vals
if metric_name == 'NRMSE':
metric_data = nrmse_vals
data_df = pd.DataFrame(metric_data).T.reset_index()
data_df.columns = ['Averaging Interval', metric_name]
sns.stripplot(x='Averaging Interval',
y=metric_name,
data=data_df,
order=param_averaging,
ax=axs[ax_idx],
palette=fill_color,
s=marker_size,
marker=marker,
linewidth=marker_border_width,
jitter=False)
if n_sensors == 1 and metric_name in ['CV', 'SD']:
props = dict(boxstyle='round',
facecolor='lightblue',
alpha=0.5)
axs[ax_idx].text(0.5, 0.6, 'N/A$^โ $',
color = '#1A315C', fontsize=14,
horizontalalignment='center',
verticalalignment='center',
transform=axs[ax_idx].transAxes,
bbox=props)
boxes = []
if metric_name == 'R$^2$':
dim_key = 'R^2'
lower_lim = None
upper_lim = None
if metric_name == 'Slope':
dim_key = 'Slope'
upper_lim = abs(1.5*stats_df[metric_name]).max()
lower_lim = 1.5*stats_df[metric_name].min()
if upper_lim < 2.0 and upper_lim > 0.25:
upper_lim = 2.0
elif upper_lim < 0.25:
upper_lim = 2.5
if lower_lim > 0:
lower_lim = -1*upper_lim + 2
else:
lower_lim = 1.5*lower_lim
if metric_name == 'Intercept':
dim_key = 'Intercept'
upper_lim = abs(1.5*stats_df[metric_name]).max()
if upper_lim < 10:
upper_lim = 10
lower_lim = -1*upper_lim
metric_name = rf'{metric_name} ({param_obj.units})'
if metric_name == 'CV':
dim_key = 'CV'
lower_lim = 0
upper_lim = 1.5*data_df[metric_name].max()
if upper_lim < 50:
upper_lim = 50
if n_sensors == 1:
upper_lim = 60
metric_name = rf'{metric_name} (%)'
if metric_name == 'RMSE':
dim_key = 'RMSE'
upper_lim = 1.5*data_df[metric_name].max()
lower_lim = 0
if upper_lim < 10:
upper_lim = 10
metric_name = rf'{metric_name} ({param_obj.units})'
if metric_name == 'NRMSE':
dim_key = 'NRMSE'
lower_lim = 0
upper_lim = 1.5*data_df[metric_name].max()
if upper_lim < 50:
upper_lim = 50
metric_name = r'NRMSE ($\%$)'
if metric_name == 'SD':
dim_key = 'SD'
lower_lim = 0
upper_lim = 1.5*data_df[metric_name].max()
metric_name = rf'{metric_name} ({param_obj.units})'
if upper_lim < 10:
upper_lim = 10
if n_sensors == 1:
upper_lim = 10
# Get formatting values
ylims = kwargs.get(dim_key + '_ylims',
plotting_dims['ylims'].get(dim_key, (lower_lim, upper_lim)))
hline_dims = kwargs.get(dim_key + '_hline_dims',
plotting_dims.get('hline_dims').get(dim_key))
box_dims = kwargs.get(dim_key + '_box_dims',
plotting_dims.get('box_dims').get(dim_key))
# Assign to local variables
ymin, ymax = ylims
hline_y, hline_xmin, hline_xmax = hline_dims
rec_x0, rec_y0, rec_xspan, rec_yspan = box_dims
axs[ax_idx].set_xlim(PLOT_XMIN, PLOT_XMAX)
axs[ax_idx].hlines(y=hline_y, xmin=hline_xmin,
xmax=hline_xmax, linewidth=1.5,
color=kwargs.get('hline_color', '#8b8b8b'))
target_rec = Rectangle((rec_x0, rec_y0),
rec_xspan, rec_yspan, color='r')
boxes.append(target_rec)
pc = PatchCollection(boxes, alpha=.3,
facecolor=kwargs.get('box_facecolor',
'#8b8b8b'))
axs[ax_idx].add_collection(pc)
axs[ax_idx].set_title(metric_name, fontsize=font_size)
axs[ax_idx].set_ylim(ymin, ymax)
axs[ax_idx].yaxis.set_label_text('')
plt.tight_layout()
sns.set(font_scale=kwargs.get('font_scale', 1))
fig.subplots_adjust(wspace=kwargs.get('fig_wspace', 0.35),
hspace=kwargs.get('fig_hspace', 0.1),
left=kwargs.get('fig_left', 0.03),
right=kwargs.get('fig_right', 0.97),
top=kwargs.get('fig_top', 0.93),
bottom=kwargs.get('fig_bottom', 0.13))
if write_to_file is True:
todays_date = get_todays_date()
fig_path = os.path.join(path, param,
f'{sensor_name}_regression_boxplot_{param}_{todays_date}.png')
plt.savefig(fig_path, dpi=300)
plt.close()
| 45.857388
| 125
| 0.563303
|
b38a5a44f4869eab63cf93c20f50a34b85c06b21
| 2,323
|
py
|
Python
|
saas/backend/biz/org_sync/department_member.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 7
|
2021-08-13T03:48:16.000Z
|
2021-12-20T15:31:38.000Z
|
saas/backend/biz/org_sync/department_member.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 456
|
2021-08-16T02:13:57.000Z
|
2022-03-30T10:02:49.000Z
|
saas/backend/biz/org_sync/department_member.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 17
|
2021-08-10T04:08:46.000Z
|
2022-03-14T14:24:36.000Z
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making ่้ฒธๆบไบ-ๆ้ไธญๅฟ(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from backend.apps.organization.models import DepartmentMember
from backend.component import usermgr
from .base import BaseSyncDBService
class DBDepartmentMemberSyncService(BaseSyncDBService):
"""้จ้จๆๅๅๆญฅๆๅก"""
def __init__(self):
"""ๅๅงๆฐๆฎ"""
# ๅๅงๅๆฐๆฎ
self.new_department_members = usermgr.list_department_profile()
self.old_department_members = list(DepartmentMember.objects.all())
def created_handler(self):
"""ๅ
ณไบๆฐๅปบ้จ้จๆๅ๏ผDB็ๅค็"""
old_department_member_set = {(i.department_id, i.user_id) for i in self.old_department_members}
created_department_members = [
DepartmentMember(department_id=i["department_id"], user_id=i["profile_id"])
for i in self.new_department_members
if (i["department_id"], i["profile_id"]) not in old_department_member_set
]
if not created_department_members:
return
DepartmentMember.objects.bulk_create(created_department_members, batch_size=1000)
def deleted_handler(self):
"""ๅ
ณไบๅ ้ค้จ้จๆๅ๏ผDB็ๅค็"""
new_department_member_set = {(i["department_id"], i["profile_id"]) for i in self.new_department_members}
deleted_ids = [
i.id for i in self.old_department_members if (i.department_id, i.user_id) not in new_department_member_set
]
if not deleted_ids:
return
DepartmentMember.objects.filter(id__in=deleted_ids).delete()
def sync_to_db(self):
"""SaaS DB ็ธๅ
ณๅๆด"""
# ๆฐๅข้จ้จๆๅ
self.created_handler()
# ๅ ้ค้จ้จๆๅ
self.deleted_handler()
| 40.051724
| 118
| 0.708567
|
3b647797b6e2cd12a478d45b7b2e61bb9ddb0646
| 4,234
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/availability_set_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/availability_set_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/availability_set_py3.py
|
NMijat1024/azure-sdk-for-python
|
c49e1d6d797dceaca81813cafb1a486d67185182
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class AvailabilitySet(Resource):
"""Specifies information about the availability set that the virtual machine
should be assigned to. Virtual machines specified in the same availability
set are allocated to different nodes to maximize availability. For more
information about availability sets, see [Manage the availability of
virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
<br><br> For more information on Azure planned maintainance, see [Planned
maintenance for virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param platform_update_domain_count: Update Domain count.
:type platform_update_domain_count: int
:param platform_fault_domain_count: Fault Domain count.
:type platform_fault_domain_count: int
:param virtual_machines: A list of references to all virtual machines in
the availability set.
:type virtual_machines:
list[~azure.mgmt.compute.v2016_04_30_preview.models.SubResource]
:ivar statuses: The resource status information.
:vartype statuses:
list[~azure.mgmt.compute.v2016_04_30_preview.models.InstanceViewStatus]
:param managed: If the availability set supports managed disks.
:type managed: bool
:param sku: Sku of the availability set
:type sku: ~azure.mgmt.compute.v2016_04_30_preview.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'statuses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
'managed': {'key': 'properties.managed', 'type': 'bool'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(self, *, location: str, tags=None, platform_update_domain_count: int=None, platform_fault_domain_count: int=None, virtual_machines=None, managed: bool=None, sku=None, **kwargs) -> None:
super(AvailabilitySet, self).__init__(location=location, tags=tags, **kwargs)
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.statuses = None
self.managed = managed
self.sku = sku
| 47.044444
| 202
| 0.671233
|
e7e14ac6f01dc9c75ba48972fb685b075d120cd8
| 2,883
|
py
|
Python
|
examples/GeoSchelling/model.py
|
xyudha/mesa-geo
|
a4e446f1f115055095829d1239c45eb31242eeff
|
[
"Apache-2.0"
] | 74
|
2017-09-20T14:12:01.000Z
|
2022-03-23T10:13:17.000Z
|
examples/GeoSchelling/model.py
|
xyudha/mesa-geo
|
a4e446f1f115055095829d1239c45eb31242eeff
|
[
"Apache-2.0"
] | 33
|
2017-09-21T17:57:09.000Z
|
2022-03-28T10:30:50.000Z
|
examples/GeoSchelling/model.py
|
xyudha/mesa-geo
|
a4e446f1f115055095829d1239c45eb31242eeff
|
[
"Apache-2.0"
] | 32
|
2018-04-16T23:39:02.000Z
|
2022-02-09T20:10:35.000Z
|
from mesa.datacollection import DataCollector
from mesa import Model
from mesa.time import RandomActivation
from mesa_geo.geoagent import GeoAgent, AgentCreator
from mesa_geo import GeoSpace
import random
class SchellingAgent(GeoAgent):
"""Schelling segregation agent."""
def __init__(self, unique_id, model, shape, agent_type=None):
"""Create a new Schelling agent.
Args:
unique_id: Unique identifier for the agent.
agent_type: Indicator for the agent's type (minority=1, majority=0)
"""
super().__init__(unique_id, model, shape)
self.atype = agent_type
def step(self):
"""Advance agent one step."""
similar = 0
different = 0
neighbors = self.model.grid.get_neighbors(self)
if neighbors:
for neighbor in neighbors:
if neighbor.atype is None:
continue
elif neighbor.atype == self.atype:
similar += 1
else:
different += 1
# If unhappy, move:
if similar < different:
# Select an empty region
empties = [a for a in self.model.grid.agents if a.atype is None]
# Switch atypes and add/remove from scheduler
new_region = random.choice(empties)
new_region.atype = self.atype
self.model.schedule.add(new_region)
self.atype = None
self.model.schedule.remove(self)
else:
self.model.happy += 1
def __repr__(self):
return "Agent " + str(self.unique_id)
class SchellingModel(Model):
"""Model class for the Schelling segregation model."""
def __init__(self, density, minority_pc):
self.density = density
self.minority_pc = minority_pc
self.schedule = RandomActivation(self)
self.grid = GeoSpace()
self.happy = 0
self.datacollector = DataCollector({"happy": "happy"})
self.running = True
# Set up the grid with patches for every NUTS region
AC = AgentCreator(SchellingAgent, {"model": self})
agents = AC.from_file("nuts_rg_60M_2013_lvl_2.geojson")
self.grid.add_agents(agents)
# Set up agents
for agent in agents:
if random.random() < self.density:
if random.random() < self.minority_pc:
agent.atype = 1
else:
agent.atype = 0
self.schedule.add(agent)
def step(self):
"""Run one step of the model.
If All agents are happy, halt the model.
"""
self.happy = 0 # Reset counter of happy agents
self.schedule.step()
# self.datacollector.collect(self)
if self.happy == self.schedule.get_agent_count():
self.running = False
| 31
| 79
| 0.584807
|
9201de17284cb37602664c6b2af1db9a82dd94ba
| 550
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testappauto207-dev-23520
|
e9d91aa3b146f3fccc5b2b705c39dc0c02756774
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testappauto207-dev-23520
|
e9d91aa3b146f3fccc5b2b705c39dc0c02756774
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-dev/testappauto207-dev-23520
|
e9d91aa3b146f3fccc5b2b705c39dc0c02756774
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "testappauto207-dev-23520.botics.co"
site_params = {
"name": "TestAppAuto207",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 21.153846
| 61
| 0.663636
|
0823f368fcc63bcbf42f97cfe47442a7c4c400e0
| 4,437
|
py
|
Python
|
scitbx/rigid_body/proto/tst_free_motion.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
scitbx/rigid_body/proto/tst_free_motion.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
scitbx/rigid_body/proto/tst_free_motion.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
from scitbx.rigid_body.proto import free_motion_reference_impl as fmri
from scitbx.rigid_body.proto import featherstone
import scitbx.math
from scitbx.array_family import flex
from scitbx import matrix
from libtbx.test_utils import approx_equal
from libtbx.utils import format_cpu_times, null_out
import sys
from six.moves import range
def exercise_reference_impl_quick():
sites_cart = fmri.create_triangle_with_center_of_mass_at_origin()
assert approx_equal(flex.vec3_double(sites_cart).mean(), (0,0,0))
inertia1 = fmri.body_inertia(sites_cart=sites_cart)
inertia2 = matrix.sym(sym_mat3=scitbx.math.inertia_tensor(
points=flex.vec3_double(sites_cart), pivot=(0,0,0)))
assert approx_equal(inertia1, inertia2)
#
for use_classical_accel in [False, True]:
sim = fmri.simulation()
assert approx_equal(
[sim.e_pot, sim.e_kin_ang, sim.e_kin_lin, sim.e_kin, sim.e_tot],
[0.64030878777041611,
0.012310594130384761, 0.02835, 0.04066059413038476,
0.68096938190080092])
for i in range(100):
sim.dynamics_step(delta_t=0.01, use_classical_accel=use_classical_accel)
expected = [
[0.028505221929112364,
0.091503230553568404, 0.56329655444242244, 0.65479978499599079,
0.6833050069251031],
[0.053276067541032097,
0.091503230553568404, 0.53805622991666513, 0.62955946047023348,
0.68283552801126557]][int(use_classical_accel)]
assert approx_equal(
[sim.e_pot, sim.e_kin_ang, sim.e_kin_lin, sim.e_kin, sim.e_tot],
expected)
def exercise_reference_impl_long(n_dynamics_steps, out):
sim = fmri.simulation()
e_tots = flex.double([sim.e_tot])
print("i_step, [e_pot, e_kin_ang, e_kin_lin, e_kin, e_tot]", file=out)
def show(i_step):
print(i_step, [sim.e_pot, sim.e_kin_ang, sim.e_kin_lin, sim.e_kin, sim.e_tot], file=out)
out.flush()
n_show = max(1, n_dynamics_steps // 10)
for i_step in range(n_dynamics_steps):
sim.dynamics_step(delta_t=0.001)
e_tots.append(sim.e_tot)
if (i_step % n_show == 0):
show(i_step)
show(n_dynamics_steps)
print(file=out)
print("number of dynamics steps:", n_dynamics_steps, file=out)
print("e_tot start:", e_tots[0], file=out)
print(" final:", e_tots[-1], file=out)
print(" min:", flex.min(e_tots), file=out)
print(" max:", flex.max(e_tots), file=out)
print(" max-min:", flex.max(e_tots) - flex.min(e_tots), file=out)
print(file=out)
out.flush()
class featherstone_system_model(object):
def __init__(model, m, I, J):
model.NB = 1
model.pitch = [J]
model.parent =[-1]
model.Xtree = [matrix.identity(n=6)]
model.I = [featherstone.mcI(m, (0,0,0), I)]
class six_dof_joint_euler_params_featherstone(fmri.six_dof_joint_euler_params):
def Xj_S(O, q):
assert q is None
Xj = featherstone.Xrot(O.E) \
* featherstone.Xtrans(O.r) # RBDA Tab. 4.1 footnote
S = None
return Xj, S
def exercise_featherstone_FDab(out):
def check():
model = featherstone_system_model(
m=sim.m,
I=sim.I_F1,
J=six_dof_joint_euler_params_featherstone(qE=sim.J1.qE, qr=sim.J1.qr))
q = [None] # already stored in J1 as qE and qr
qd = [sim.qd]
tau = None
f_ext = [matrix.col((sim.nc_F1, sim.f_F1)).resolve_partitions()]
grav_accn = [0,0,0]
qdd = featherstone.FDab(model, q, qd, tau, f_ext, grav_accn)
if (i_step % 10 == 0):
print("ang acc 3D:", sim.wd_F1.elems, file=out)
print(" 6D:", qdd[0].elems[:3], file=out)
print(file=out)
print("lin acc 3D:", sim.as_F1.elems, file=out)
print(" 6D:", qdd[0].elems[3:], file=out)
print(file=out)
assert approx_equal(qdd[0].elems[:3], sim.wd_F1)
assert approx_equal(qdd[0].elems[3:], sim.as_F1)
sim = fmri.simulation()
for i_step in range(100):
check()
sim.dynamics_step(delta_t=0.1) # large time step to sample
check() # diverse configurations
def run(args):
assert len(args) in [0,1]
if (len(args) == 0):
n_dynamics_steps = 1
out = null_out()
else:
n_dynamics_steps = max(1, int(args[0]))
out = sys.stdout
#
exercise_reference_impl_quick()
exercise_featherstone_FDab(out=out)
exercise_reference_impl_long(n_dynamics_steps=n_dynamics_steps, out=out)
#
print(format_cpu_times())
if (__name__ == "__main__"):
run(sys.argv[1:])
| 35.214286
| 92
| 0.686049
|
a60b51e09c8c41aaf87510cb6180a3b199799efb
| 104
|
py
|
Python
|
config.py
|
duyng404/burogu
|
3752189dfe655ff003fda1437da358992d74ddbb
|
[
"MIT"
] | null | null | null |
config.py
|
duyng404/burogu
|
3752189dfe655ff003fda1437da358992d74ddbb
|
[
"MIT"
] | null | null | null |
config.py
|
duyng404/burogu
|
3752189dfe655ff003fda1437da358992d74ddbb
|
[
"MIT"
] | null | null | null |
WTF_CSRF_ENABLED=True
SECRET_KEY='1a2s3d4f5g6'
CONTENT_DIR='content'
INTRO_LENGTH=50 #words
PER_PAGE=10
| 17.333333
| 24
| 0.846154
|
d79ad00d003b2c28bafb7f3fc06652c46fbdc62e
| 14,195
|
py
|
Python
|
flask/ctx.py
|
Aaron1992/flask
|
84efebd6be3809388160e8654d0fccc9ce0b2395
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T19:46:16.000Z
|
2015-11-05T19:46:16.000Z
|
flask/ctx.py
|
Aaron1992/flask
|
84efebd6be3809388160e8654d0fccc9ce0b2395
|
[
"BSD-3-Clause"
] | null | null | null |
flask/ctx.py
|
Aaron1992/flask
|
84efebd6be3809388160e8654d0fccc9ce0b2395
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import appcontext_pushed, appcontext_popped
from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
reraise(exc_type, exc_value, tb)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in test suite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
reraise(exc_type, exc_value, tb)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
| 35.84596
| 80
| 0.643607
|
e0eb9878780e1e20b1bc4759f18a4effd6dc6e2b
| 880
|
py
|
Python
|
projects/700c/web/views/status.py
|
erik/sketches
|
0a454ada58dee6db576e93cb2216dd750290329e
|
[
"MIT"
] | 1
|
2020-02-11T06:00:11.000Z
|
2020-02-11T06:00:11.000Z
|
projects/700c/web/views/status.py
|
erik/sketches
|
0a454ada58dee6db576e93cb2216dd750290329e
|
[
"MIT"
] | 1
|
2017-09-23T19:41:29.000Z
|
2017-09-25T05:12:38.000Z
|
projects/700c/web/views/status.py
|
erik/sketches
|
0a454ada58dee6db576e93cb2216dd750290329e
|
[
"MIT"
] | null | null | null |
import json
import flask
from flask import request
from web.models import Location, Status, Trip
from web.views.util import requires_login, lookup_request_user
mod = flask.Blueprint('status', __name__)
mod.before_request(lookup_request_user)
@mod.route('/create', methods=['POST'])
@requires_login
def create():
# TODO: real form parsing
location = request.form['location']
if location != '':
location = Location(**json.loads(location))
else:
location = None
active_trip = Trip.get_active(flask.g.user.id)
Status.create(
title=request.form['title'],
body=request.form['body'],
location=location,
user_id=flask.g.user.id,
trip_id=(active_trip and active_trip.id))
return flask.redirect(flask.url_for('general.index'))
@mod.route('/<int:id>/edit', methods=['POST'])
def edit():
pass
| 22.564103
| 62
| 0.676136
|
b9ee7fabe0713a1a890f8cee29fb55987e4e1efd
| 3,775
|
py
|
Python
|
datalad_osf/osfclient/osfclient/tests/test_utils.py
|
adswa/datalad-osf-2
|
25988f898ffc6f489c0855933136f39f79cf8c65
|
[
"BSD-3-Clause"
] | 80
|
2017-10-24T05:06:00.000Z
|
2022-02-20T18:44:17.000Z
|
datalad_osf/osfclient/osfclient/tests/test_utils.py
|
adswa/datalad-osf-2
|
25988f898ffc6f489c0855933136f39f79cf8c65
|
[
"BSD-3-Clause"
] | 133
|
2017-03-07T22:36:54.000Z
|
2017-10-24T04:43:57.000Z
|
datalad_osf/osfclient/osfclient/tests/test_utils.py
|
adswa/datalad-osf-2
|
25988f898ffc6f489c0855933136f39f79cf8c65
|
[
"BSD-3-Clause"
] | 37
|
2017-10-26T03:34:17.000Z
|
2021-11-17T05:24:10.000Z
|
from mock import call, patch, Mock
from osfclient.utils import file_empty
from osfclient.utils import norm_remote_path
from osfclient.utils import makedirs
from osfclient.utils import split_storage
def test_default_storage():
store, path = split_storage('foo/bar/baz')
assert store == 'osfstorage'
assert path == 'foo/bar/baz'
store, path = split_storage('/foo/bar/baz')
assert store == 'osfstorage'
assert path == 'foo/bar/baz'
def test_split_storage():
store, path = split_storage('osfstorage/foo/bar/baz')
assert store == 'osfstorage'
assert path == 'foo/bar/baz'
store, path = split_storage('github/foo/bar/baz')
assert store == 'github'
assert path == 'foo/bar/baz'
store, path = split_storage('/github/foo/bar/baz')
assert store == 'github'
assert path == 'foo/bar/baz'
store, path = split_storage('figshare/foo/bar/baz')
assert store == 'figshare'
assert path == 'foo/bar/baz'
store, path = split_storage('/figshare/foo/bar/baz')
assert store == 'figshare'
assert path == 'foo/bar/baz'
store, path = split_storage('googledrive/foo/bar/baz')
assert store == 'googledrive'
assert path == 'foo/bar/baz'
store, path = split_storage('/googledrive/foo/bar/baz')
assert store == 'googledrive'
assert path == 'foo/bar/baz'
def test_norm_remote_path():
path = 'foo/bar/baz.txt'
new_path = norm_remote_path(path)
assert new_path == path
new_path = norm_remote_path('/' + path)
assert new_path == path
@patch('osfclient.utils.os.path')
@patch('osfclient.utils.os.makedirs')
def test_makedirs_py2(mock_makedirs, mock_path):
# pretend to be in python 2 land
# path already exists, expect to call makedirs and that will raise
mock_path.exists.return_value = True
with patch('osfclient.utils.six.PY3', False):
makedirs('/this/path/exists')
expected = [call('/this/path/exists', 511)]
assert expected == mock_makedirs.mock_calls
@patch('osfclient.utils.os.path')
@patch('osfclient.utils.os.makedirs')
def test_makedirs_exist_ok_py2(mock_makedirs, mock_path):
# pretend to be in python 2 land
# path already exists, expect NOT to call makedirs as we set exist_ok
mock_path.exists.return_value = True
with patch('osfclient.utils.six.PY3', False):
makedirs('/this/path/exists', exist_ok=True)
assert not mock_makedirs.called
@patch('osfclient.utils.os.path')
@patch('osfclient.utils.os.makedirs')
def test_makedirs_doesnt_exist_py2(mock_makedirs, mock_path):
# pretend to be in python 2 land
mock_path.exists.return_value = False
with patch('osfclient.utils.six.PY3', False):
makedirs('/this/path/doesnt/exists')
expected = [call('/this/path/doesnt/exists', 511)]
assert expected == mock_makedirs.mock_calls
@patch('osfclient.utils.os.makedirs')
def test_makedirs_py3(mock_makedirs):
# just check stuff get's forwarded
with patch('osfclient.utils.six.PY3', True):
makedirs('/this/path/exists', exist_ok=True)
expected = [call('/this/path/exists', 511, True)]
assert expected == mock_makedirs.mock_calls
def test_empty_file():
fake_fp = Mock()
with patch('osfclient.utils.six.PY2', False):
empty = file_empty(fake_fp)
expected = [call.peek()]
assert expected == fake_fp.mock_calls
# mocks and calls on mocks always return True, so this should be False
assert not empty
def test_empty_file_py2():
fake_fp = Mock()
with patch('osfclient.utils.six.PY2', True):
empty = file_empty(fake_fp)
expected = [call.read(), call.seek(0)]
assert expected == fake_fp.mock_calls
# mocks and calls on mocks always return True, so this should be False
assert not empty
| 30.2
| 74
| 0.691656
|
ca0dd48246ac2eddb4b2761232e42480a4158573
| 11,243
|
py
|
Python
|
benepar/base_parser.py
|
icewing1996/self-attentive-parser
|
35ba70951eddefa63a294186077373fbe3e74185
|
[
"MIT"
] | null | null | null |
benepar/base_parser.py
|
icewing1996/self-attentive-parser
|
35ba70951eddefa63a294186077373fbe3e74185
|
[
"MIT"
] | null | null | null |
benepar/base_parser.py
|
icewing1996/self-attentive-parser
|
35ba70951eddefa63a294186077373fbe3e74185
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import os
import sys
import json
import codecs
from . import chart_decoder
from .downloader import load_model
from .bert_tokenization import BertTokenizer
#%%
IS_PY2 = sys.version_info < (3,0)
if IS_PY2:
STRING_TYPES = (str, unicode)
else:
STRING_TYPES = (str,)
ELMO_START_SENTENCE = 256
ELMO_STOP_SENTENCE = 257
ELMO_START_WORD = 258
ELMO_STOP_WORD = 259
ELMO_CHAR_PAD = 260
PTB_TOKEN_ESCAPE = {u"(": u"-LRB-",
u")": u"-RRB-",
u"{": u"-LCB-",
u"}": u"-RCB-",
u"[": u"-LSB-",
u"]": u"-RSB-"}
BERT_TOKEN_MAPPING = {u"-LRB-": u"(",
u"-RRB-": u")",
u"-LCB-": u"{",
u"-RCB-": u"}",
u"-LSB-": u"[",
u"-RSB-": u"]",
u"``": u'"',
u"''": u'"',
u"`": u"'",
u"\u201c": u'"',
u"\u201d": u'"',
u"\u2018": u"'",
u"\u2019": u"'",
u"\xab": u'"',
u"\xbb": u'"',
u"\u201e": u'"',
u"\u2039": u"'",
u"\u203a": u"'",
}
# Label vocab is made immutable because it is potentially exposed to users
# through the spacy plugin
LABEL_VOCAB = ((),
('S',),
('PP',),
('NP',),
('PRN',),
('VP',),
('ADVP',),
('SBAR', 'S'),
('ADJP',),
('QP',),
('UCP',),
('S', 'VP'),
('SBAR',),
('WHNP',),
('SINV',),
('FRAG',),
('NAC',),
('WHADVP',),
('NP', 'QP'),
('PRT',),
('S', 'PP'),
('S', 'NP'),
('NX',),
('S', 'ADJP'),
('WHPP',),
('SBAR', 'S', 'VP'),
('SBAR', 'SINV'),
('SQ',),
('NP', 'NP'),
('SBARQ',),
('SQ', 'VP'),
('CONJP',),
('ADJP', 'QP'),
('FRAG', 'NP'),
('FRAG', 'ADJP'),
('WHADJP',),
('ADJP', 'ADJP'),
('FRAG', 'PP'),
('S', 'ADVP'),
('FRAG', 'SBAR'),
('PRN', 'S'),
('PRN', 'S', 'VP'),
('INTJ',),
('X',),
('NP', 'NP', 'NP'),
('FRAG', 'S', 'VP'),
('ADVP', 'ADVP'),
('RRC',),
('VP', 'PP'),
('VP', 'VP'),
('SBAR', 'FRAG'),
('ADVP', 'ADJP'),
('LST',),
('NP', 'NP', 'QP'),
('PRN', 'SBAR'),
('VP', 'S', 'VP'),
('S', 'UCP'),
('FRAG', 'WHNP'),
('NP', 'PP'),
('NP', 'SBAR', 'S', 'VP'),
('WHNP', 'QP'),
('VP', 'FRAG', 'ADJP'),
('FRAG', 'WHADVP'),
('NP', 'ADJP'),
('VP', 'SBAR'),
('NP', 'S', 'VP'),
('X', 'PP'),
('S', 'VP', 'VP'),
('S', 'VP', 'ADVP'),
('WHNP', 'WHNP'),
('NX', 'NX'),
('FRAG', 'ADVP'),
('FRAG', 'VP'),
('VP', 'ADVP'),
('SBAR', 'WHNP'),
('FRAG', 'SBARQ'),
('PP', 'PP'),
('PRN', 'PP'),
('VP', 'NP'),
('X', 'NP'),
('PRN', 'SINV'),
('NP', 'SBAR'),
('PP', 'NP'),
('NP', 'INTJ'),
('FRAG', 'INTJ'),
('X', 'VP'),
('PRN', 'NP'),
('FRAG', 'UCP'),
('NP', 'ADVP'),
('SBAR', 'SBARQ'),
('SBAR', 'SBAR', 'S'),
('SBARQ', 'WHADVP'),
('ADVP', 'PRT'),
('UCP', 'ADJP'),
('PRN', 'FRAG', 'WHADJP'),
('FRAG', 'S'),
('S', 'S'),
('FRAG', 'S', 'ADJP'),
('INTJ', 'S'),
('ADJP', 'NP'),
('X', 'ADVP'),
('FRAG', 'WHPP'),
('NP', 'FRAG'),
('NX', 'QP'),
('NP', 'S'),
('SBAR', 'WHADVP'),
('X', 'SBARQ'),
('NP', 'PRN'),
('NX', 'S', 'VP'),
('NX', 'S'),
('UCP', 'PP'),
('RRC', 'VP'),
('ADJP', 'ADVP'))
SENTENCE_MAX_LEN = 300
BERT_MAX_LEN = 512
#%%
class BaseParser(object):
def __init__(self, name, batch_size=64):
self._graph = tf.Graph()
with self._graph.as_default():
if isinstance(name, STRING_TYPES) and '/' not in name:
model = load_model(name)
elif not os.path.exists(name):
raise Exception("Argument is neither a valid module name nor a path to an existing file/folder: {}".format(name))
else:
if not os.path.isdir(name):
with open(name, 'rb') as f:
model = f.read()
else:
model = {}
with open(os.path.join(name, 'meta.json')) as f:
model['meta'] = json.load(f)
with open(os.path.join(name, 'model.pb'), 'rb') as f:
model['model'] = f.read()
with codecs.open(os.path.join(name, 'vocab.txt'), encoding='utf-8') as f:
model['vocab'] = f.read()
if isinstance(model, dict):
graph_def = tf.GraphDef.FromString(model['model'])
else:
graph_def = tf.GraphDef.FromString(model)
tf.import_graph_def(graph_def, name='')
self._sess = tf.Session(graph=self._graph)
if not isinstance(model, dict):
# Older model format (for ELMo-based models)
self._chars = self._graph.get_tensor_by_name('chars:0')
self._charts = self._graph.get_tensor_by_name('charts:0')
self._label_vocab = LABEL_VOCAB
self._language_code = 'en'
self._provides_tags = False
self._make_feed_dict = self._make_feed_dict_elmo
else:
# Newer model format (for BERT-based models)
meta = model['meta']
# Label vocab is made immutable because it is potentially exposed to
# users through the spacy plugin
self._label_vocab = tuple([tuple(label) for label in meta['label_vocab']])
self._language_code = meta['language_code']
self._provides_tags = meta['provides_tags']
self._input_ids = self._graph.get_tensor_by_name('input_ids:0')
self._word_end_mask = self._graph.get_tensor_by_name('word_end_mask:0')
self._charts = self._graph.get_tensor_by_name('charts:0')
if self._provides_tags:
self._tag_vocab = meta['tag_vocab']
self._tags = self._graph.get_tensor_by_name('tags:0')
self._bert_tokenizer = BertTokenizer(
model['vocab'], do_lower_case=meta['bert_do_lower_case'])
self._make_feed_dict = self._make_feed_dict_bert
self.batch_size = batch_size
def _make_feed_dict_elmo(self, sentences):
padded_len = max([len(sentence) + 2 for sentence in sentences])
if padded_len > SENTENCE_MAX_LEN:
raise ValueError("Sentence of length {} exceeds the maximum supported length of {}".format(
padded_len - 2, SENTENCE_MAX_LEN - 2))
all_chars = np.zeros((len(sentences), padded_len, 50), dtype=np.int32)
for snum, sentence in enumerate(sentences):
all_chars[snum, :len(sentence)+2,:] = ELMO_CHAR_PAD
all_chars[snum, 0, 0] = ELMO_START_WORD
all_chars[snum, 0, 1] = ELMO_START_SENTENCE
all_chars[snum, 0, 2] = ELMO_STOP_WORD
for i, word in enumerate(sentence):
word = PTB_TOKEN_ESCAPE.get(word, word)
if IS_PY2:
chars = [ELMO_START_WORD] + [ord(char) for char in word.encode('utf-8', 'ignore')[:(50-2)]] + [ELMO_STOP_WORD]
else:
chars = [ELMO_START_WORD] + list(word.encode('utf-8', 'ignore')[:(50-2)]) + [ELMO_STOP_WORD]
all_chars[snum, i+1, :len(chars)] = chars
all_chars[snum, len(sentence)+1, 0] = ELMO_START_WORD
all_chars[snum, len(sentence)+1, 1] = ELMO_STOP_SENTENCE
all_chars[snum, len(sentence)+1, 2] = ELMO_STOP_WORD
# Add 1; 0 is a reserved value for signaling words past the end of the
# sentence, which we don't have because batch_size=1
all_chars[snum, :len(sentence)+2,:] += 1
return {self._chars: all_chars}
def _make_feed_dict_bert(self, sentences):
all_input_ids = np.zeros((len(sentences), BERT_MAX_LEN), dtype=int)
all_word_end_mask = np.zeros((len(sentences), BERT_MAX_LEN), dtype=int)
subword_max_len = 0
for snum, sentence in enumerate(sentences):
tokens = []
word_end_mask = []
tokens.append(u"[CLS]")
word_end_mask.append(1)
cleaned_words = []
for word in sentence:
word = BERT_TOKEN_MAPPING.get(word, word)
# BERT is pre-trained with a tokenizer that doesn't split off
# n't as its own token
if word == u"n't" and cleaned_words:
cleaned_words[-1] = cleaned_words[-1] + u"n"
word = u"'t"
cleaned_words.append(word)
for word in cleaned_words:
word_tokens = self._bert_tokenizer.tokenize(word)
if not word_tokens:
# The tokenizer used in conjunction with the parser may not
# align with BERT; in particular spaCy will create separate
# tokens for whitespace when there is more than one space in
# a row, and will sometimes separate out characters of
# unicode category Mn (which BERT strips when do_lower_case
# is enabled). Substituting UNK is not strictly correct, but
# it's better than failing to return a valid parse.
word_tokens = ["[UNK]"]
for _ in range(len(word_tokens)):
word_end_mask.append(0)
word_end_mask[-1] = 1
tokens.extend(word_tokens)
tokens.append(u"[SEP]")
word_end_mask.append(1)
input_ids = self._bert_tokenizer.convert_tokens_to_ids(tokens)
if len(sentence) + 2 > SENTENCE_MAX_LEN or len(input_ids) > BERT_MAX_LEN:
raise ValueError("Sentence of length {} is too long to be parsed".format(
len(sentence)))
subword_max_len = max(subword_max_len, len(input_ids))
all_input_ids[snum, :len(input_ids)] = input_ids
all_word_end_mask[snum, :len(word_end_mask)] = word_end_mask
all_input_ids = all_input_ids[:, :subword_max_len]
all_word_end_mask = all_word_end_mask[:, :subword_max_len]
return {
self._input_ids: all_input_ids,
self._word_end_mask: all_word_end_mask
}
def _make_charts_and_tags(self, sentences):
feed_dict = self._make_feed_dict(sentences)
if self._provides_tags:
charts_val, tags_val = self._sess.run((self._charts, self._tags), feed_dict)
else:
charts_val = self._sess.run(self._charts, feed_dict)
for snum, sentence in enumerate(sentences):
chart_size = len(sentence) + 1
chart = charts_val[snum,:chart_size,:chart_size,:]
if self._provides_tags:
tags = tags_val[snum,1:chart_size]
else:
tags = None
yield chart, tags
def _batched_parsed_raw(self, sentence_data_pairs):
batch_sentences = []
batch_data = []
for sentence, datum in sentence_data_pairs:
batch_sentences.append(sentence)
batch_data.append(datum)
if len(batch_sentences) >= self.batch_size:
for (chart_np, tags_np), datum in zip(self._make_charts_and_tags(batch_sentences), batch_data):
yield chart_decoder.decode(chart_np), tags_np, datum
batch_sentences = []
batch_data = []
if batch_sentences:
for (chart_np, tags_np), datum in zip(self._make_charts_and_tags(batch_sentences), batch_data):
yield chart_decoder.decode(chart_np), tags_np, datum
| 31.849858
| 130
| 0.536067
|
a76788e65d728d4a39881ba7fdd1173f91cecb4f
| 130,625
|
py
|
Python
|
netbox/dcim/tests/test_filters.py
|
letic/netbox
|
0930745e16330edf00da081150b079d5ed6ecc02
|
[
"Apache-2.0"
] | 2
|
2021-07-08T03:58:12.000Z
|
2022-02-11T21:50:46.000Z
|
netbox/dcim/tests/test_filters.py
|
letic/netbox
|
0930745e16330edf00da081150b079d5ed6ecc02
|
[
"Apache-2.0"
] | 25
|
2019-09-17T19:40:50.000Z
|
2022-03-11T04:01:55.000Z
|
netbox/dcim/tests/test_filters.py
|
letic/netbox
|
0930745e16330edf00da081150b079d5ed6ecc02
|
[
"Apache-2.0"
] | 1
|
2022-02-11T21:50:58.000Z
|
2022-02-11T21:50:58.000Z
|
from django.contrib.auth.models import User
from django.test import TestCase
from dcim.choices import *
from dcim.filters import *
from dcim.models import (
Cable, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,
DeviceBayTemplate, DeviceRole, DeviceType, FrontPort, FrontPortTemplate, Interface, InterfaceTemplate,
InventoryItem, Manufacturer, Platform, PowerFeed, PowerPanel, PowerPort, PowerPortTemplate, PowerOutlet,
PowerOutletTemplate, Rack, RackGroup, RackReservation, RackRole, RearPort, RearPortTemplate, Region, Site,
VirtualChassis,
)
from ipam.models import IPAddress
from tenancy.models import Tenant, TenantGroup
from virtualization.models import Cluster, ClusterType
class RegionTestCase(TestCase):
queryset = Region.objects.all()
filterset = RegionFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1', description='A'),
Region(name='Region 2', slug='region-2', description='B'),
Region(name='Region 3', slug='region-3', description='C'),
)
for region in regions:
region.save()
child_regions = (
Region(name='Region 1A', slug='region-1a', parent=regions[0]),
Region(name='Region 1B', slug='region-1b', parent=regions[0]),
Region(name='Region 2A', slug='region-2a', parent=regions[1]),
Region(name='Region 2B', slug='region-2b', parent=regions[1]),
Region(name='Region 3A', slug='region-3a', parent=regions[2]),
Region(name='Region 3B', slug='region-3b', parent=regions[2]),
)
for region in child_regions:
region.save()
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Region 1', 'Region 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['region-1', 'region-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_parent(self):
parent_regions = Region.objects.filter(parent__isnull=True)[:2]
params = {'parent_id': [parent_regions[0].pk, parent_regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'parent': [parent_regions[0].slug, parent_regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
class SiteTestCase(TestCase):
queryset = Site.objects.all()
filterset = SiteFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
tenant_groups = (
TenantGroup(name='Tenant group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
sites = (
Site(name='Site 1', slug='site-1', region=regions[0], tenant=tenants[0], status=SiteStatusChoices.STATUS_ACTIVE, facility='Facility 1', asn=65001, latitude=10, longitude=10, contact_name='Contact 1', contact_phone='123-555-0001', contact_email='contact1@example.com'),
Site(name='Site 2', slug='site-2', region=regions[1], tenant=tenants[1], status=SiteStatusChoices.STATUS_PLANNED, facility='Facility 2', asn=65002, latitude=20, longitude=20, contact_name='Contact 2', contact_phone='123-555-0002', contact_email='contact2@example.com'),
Site(name='Site 3', slug='site-3', region=regions[2], tenant=tenants[2], status=SiteStatusChoices.STATUS_RETIRED, facility='Facility 3', asn=65003, latitude=30, longitude=30, contact_name='Contact 3', contact_phone='123-555-0003', contact_email='contact3@example.com'),
)
Site.objects.bulk_create(sites)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Site 1', 'Site 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['site-1', 'site-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_facility(self):
params = {'facility': ['Facility 1', 'Facility 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asn(self):
params = {'asn': [65001, 65002]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_latitude(self):
params = {'latitude': [10, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_longitude(self):
params = {'longitude': [10, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_contact_name(self):
params = {'contact_name': ['Contact 1', 'Contact 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_contact_phone(self):
params = {'contact_phone': ['123-555-0001', '123-555-0002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_contact_email(self):
params = {'contact_email': ['contact1@example.com', 'contact2@example.com']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_status(self):
params = {'status': [SiteStatusChoices.STATUS_ACTIVE, SiteStatusChoices.STATUS_PLANNED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant(self):
tenants = Tenant.objects.all()[:2]
params = {'tenant_id': [tenants[0].pk, tenants[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant': [tenants[0].slug, tenants[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant_group(self):
tenant_groups = TenantGroup.objects.all()[:2]
params = {'tenant_group_id': [tenant_groups[0].pk, tenant_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RackGroupTestCase(TestCase):
queryset = RackGroup.objects.all()
filterset = RackGroupFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
)
Site.objects.bulk_create(sites)
parent_rack_groups = (
RackGroup(name='Parent Rack Group 1', slug='parent-rack-group-1', site=sites[0]),
RackGroup(name='Parent Rack Group 2', slug='parent-rack-group-2', site=sites[1]),
RackGroup(name='Parent Rack Group 3', slug='parent-rack-group-3', site=sites[2]),
)
for rackgroup in parent_rack_groups:
rackgroup.save()
rack_groups = (
RackGroup(name='Rack Group 1', slug='rack-group-1', site=sites[0], parent=parent_rack_groups[0], description='A'),
RackGroup(name='Rack Group 2', slug='rack-group-2', site=sites[1], parent=parent_rack_groups[1], description='B'),
RackGroup(name='Rack Group 3', slug='rack-group-3', site=sites[2], parent=parent_rack_groups[2], description='C'),
)
for rackgroup in rack_groups:
rackgroup.save()
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Rack Group 1', 'Rack Group 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['rack-group-1', 'rack-group-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_parent(self):
parent_groups = RackGroup.objects.filter(name__startswith='Parent')[:2]
params = {'parent_id': [parent_groups[0].pk, parent_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'parent': [parent_groups[0].slug, parent_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RackRoleTestCase(TestCase):
queryset = RackRole.objects.all()
filterset = RackRoleFilterSet
@classmethod
def setUpTestData(cls):
rack_roles = (
RackRole(name='Rack Role 1', slug='rack-role-1', color='ff0000'),
RackRole(name='Rack Role 2', slug='rack-role-2', color='00ff00'),
RackRole(name='Rack Role 3', slug='rack-role-3', color='0000ff'),
)
RackRole.objects.bulk_create(rack_roles)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Rack Role 1', 'Rack Role 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['rack-role-1', 'rack-role-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_color(self):
params = {'color': ['ff0000', '00ff00']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RackTestCase(TestCase):
queryset = Rack.objects.all()
filterset = RackFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
)
Site.objects.bulk_create(sites)
rack_groups = (
RackGroup(name='Rack Group 1', slug='rack-group-1', site=sites[0]),
RackGroup(name='Rack Group 2', slug='rack-group-2', site=sites[1]),
RackGroup(name='Rack Group 3', slug='rack-group-3', site=sites[2]),
)
for rackgroup in rack_groups:
rackgroup.save()
rack_roles = (
RackRole(name='Rack Role 1', slug='rack-role-1'),
RackRole(name='Rack Role 2', slug='rack-role-2'),
RackRole(name='Rack Role 3', slug='rack-role-3'),
)
RackRole.objects.bulk_create(rack_roles)
tenant_groups = (
TenantGroup(name='Tenant group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
racks = (
Rack(name='Rack 1', facility_id='rack-1', site=sites[0], group=rack_groups[0], tenant=tenants[0], status=RackStatusChoices.STATUS_ACTIVE, role=rack_roles[0], serial='ABC', asset_tag='1001', type=RackTypeChoices.TYPE_2POST, width=RackWidthChoices.WIDTH_19IN, u_height=42, desc_units=False, outer_width=100, outer_depth=100, outer_unit=RackDimensionUnitChoices.UNIT_MILLIMETER),
Rack(name='Rack 2', facility_id='rack-2', site=sites[1], group=rack_groups[1], tenant=tenants[1], status=RackStatusChoices.STATUS_PLANNED, role=rack_roles[1], serial='DEF', asset_tag='1002', type=RackTypeChoices.TYPE_4POST, width=RackWidthChoices.WIDTH_21IN, u_height=43, desc_units=False, outer_width=200, outer_depth=200, outer_unit=RackDimensionUnitChoices.UNIT_MILLIMETER),
Rack(name='Rack 3', facility_id='rack-3', site=sites[2], group=rack_groups[2], tenant=tenants[2], status=RackStatusChoices.STATUS_RESERVED, role=rack_roles[2], serial='GHI', asset_tag='1003', type=RackTypeChoices.TYPE_CABINET, width=RackWidthChoices.WIDTH_23IN, u_height=44, desc_units=True, outer_width=300, outer_depth=300, outer_unit=RackDimensionUnitChoices.UNIT_INCH),
)
Rack.objects.bulk_create(racks)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Rack 1', 'Rack 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_facility_id(self):
params = {'facility_id': ['rack-1', 'rack-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asset_tag(self):
params = {'asset_tag': ['1001', '1002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [RackTypeChoices.TYPE_2POST, RackTypeChoices.TYPE_4POST]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_width(self):
params = {'width': [RackWidthChoices.WIDTH_19IN, RackWidthChoices.WIDTH_21IN]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_u_height(self):
params = {'u_height': [42, 43]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_desc_units(self):
params = {'desc_units': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'desc_units': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_outer_width(self):
params = {'outer_width': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_outer_depth(self):
params = {'outer_depth': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_outer_unit(self):
self.assertEqual(Rack.objects.filter(outer_unit__isnull=False).count(), 3)
params = {'outer_unit': RackDimensionUnitChoices.UNIT_MILLIMETER}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_group(self):
groups = RackGroup.objects.all()[:2]
params = {'group_id': [groups[0].pk, groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'group': [groups[0].slug, groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_status(self):
params = {'status': [RackStatusChoices.STATUS_ACTIVE, RackStatusChoices.STATUS_PLANNED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_role(self):
roles = RackRole.objects.all()[:2]
params = {'role_id': [roles[0].pk, roles[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'role': [roles[0].slug, roles[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_serial(self):
params = {'serial': 'ABC'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'serial': 'abc'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_tenant(self):
tenants = Tenant.objects.all()[:2]
params = {'tenant_id': [tenants[0].pk, tenants[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant': [tenants[0].slug, tenants[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant_group(self):
tenant_groups = TenantGroup.objects.all()[:2]
params = {'tenant_group_id': [tenant_groups[0].pk, tenant_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RackReservationTestCase(TestCase):
queryset = RackReservation.objects.all()
filterset = RackReservationFilterSet
@classmethod
def setUpTestData(cls):
sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
Site(name='Site 3', slug='site-3'),
)
Site.objects.bulk_create(sites)
rack_groups = (
RackGroup(name='Rack Group 1', slug='rack-group-1', site=sites[0]),
RackGroup(name='Rack Group 2', slug='rack-group-2', site=sites[1]),
RackGroup(name='Rack Group 3', slug='rack-group-3', site=sites[2]),
)
for rackgroup in rack_groups:
rackgroup.save()
racks = (
Rack(name='Rack 1', site=sites[0], group=rack_groups[0]),
Rack(name='Rack 2', site=sites[1], group=rack_groups[1]),
Rack(name='Rack 3', site=sites[2], group=rack_groups[2]),
)
Rack.objects.bulk_create(racks)
users = (
User(username='User 1'),
User(username='User 2'),
User(username='User 3'),
)
User.objects.bulk_create(users)
tenant_groups = (
TenantGroup(name='Tenant group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
reservations = (
RackReservation(rack=racks[0], units=[1, 2, 3], user=users[0], tenant=tenants[0]),
RackReservation(rack=racks[1], units=[4, 5, 6], user=users[1], tenant=tenants[1]),
RackReservation(rack=racks[2], units=[7, 8, 9], user=users[2], tenant=tenants[2]),
)
RackReservation.objects.bulk_create(reservations)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_group(self):
groups = RackGroup.objects.all()[:2]
params = {'group_id': [groups[0].pk, groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'group': [groups[0].slug, groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_user(self):
users = User.objects.all()[:2]
params = {'user_id': [users[0].pk, users[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'user': [users[0].username, users[1].username]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant(self):
tenants = Tenant.objects.all()[:2]
params = {'tenant_id': [tenants[0].pk, tenants[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant': [tenants[0].slug, tenants[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant_group(self):
tenant_groups = TenantGroup.objects.all()[:2]
params = {'tenant_group_id': [tenant_groups[0].pk, tenant_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ManufacturerTestCase(TestCase):
queryset = Manufacturer.objects.all()
filterset = ManufacturerFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1', description='A'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2', description='B'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3', description='C'),
)
Manufacturer.objects.bulk_create(manufacturers)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Manufacturer 1', 'Manufacturer 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['manufacturer-1', 'manufacturer-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceTypeTestCase(TestCase):
queryset = DeviceType.objects.all()
filterset = DeviceTypeFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
device_types = (
DeviceType(manufacturer=manufacturers[0], model='Model 1', slug='model-1', part_number='Part Number 1', u_height=1, is_full_depth=True),
DeviceType(manufacturer=manufacturers[1], model='Model 2', slug='model-2', part_number='Part Number 2', u_height=2, is_full_depth=True, subdevice_role=SubdeviceRoleChoices.ROLE_PARENT),
DeviceType(manufacturer=manufacturers[2], model='Model 3', slug='model-3', part_number='Part Number 3', u_height=3, is_full_depth=False, subdevice_role=SubdeviceRoleChoices.ROLE_CHILD),
)
DeviceType.objects.bulk_create(device_types)
# Add component templates for filtering
ConsolePortTemplate.objects.bulk_create((
ConsolePortTemplate(device_type=device_types[0], name='Console Port 1'),
ConsolePortTemplate(device_type=device_types[1], name='Console Port 2'),
))
ConsoleServerPortTemplate.objects.bulk_create((
ConsoleServerPortTemplate(device_type=device_types[0], name='Console Server Port 1'),
ConsoleServerPortTemplate(device_type=device_types[1], name='Console Server Port 2'),
))
PowerPortTemplate.objects.bulk_create((
PowerPortTemplate(device_type=device_types[0], name='Power Port 1'),
PowerPortTemplate(device_type=device_types[1], name='Power Port 2'),
))
PowerOutletTemplate.objects.bulk_create((
PowerOutletTemplate(device_type=device_types[0], name='Power Outlet 1'),
PowerOutletTemplate(device_type=device_types[1], name='Power Outlet 2'),
))
InterfaceTemplate.objects.bulk_create((
InterfaceTemplate(device_type=device_types[0], name='Interface 1'),
InterfaceTemplate(device_type=device_types[1], name='Interface 2'),
))
rear_ports = (
RearPortTemplate(device_type=device_types[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C),
RearPortTemplate(device_type=device_types[1], name='Rear Port 2', type=PortTypeChoices.TYPE_8P8C),
)
RearPortTemplate.objects.bulk_create(rear_ports)
FrontPortTemplate.objects.bulk_create((
FrontPortTemplate(device_type=device_types[0], name='Front Port 1', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[0]),
FrontPortTemplate(device_type=device_types[1], name='Front Port 2', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[1]),
))
DeviceBayTemplate.objects.bulk_create((
DeviceBayTemplate(device_type=device_types[0], name='Device Bay 1'),
DeviceBayTemplate(device_type=device_types[1], name='Device Bay 2'),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_model(self):
params = {'model': ['Model 1', 'Model 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['model-1', 'model-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_part_number(self):
params = {'part_number': ['Part Number 1', 'Part Number 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_u_height(self):
params = {'u_height': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_is_full_depth(self):
params = {'is_full_depth': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'is_full_depth': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_subdevice_role(self):
params = {'subdevice_role': SubdeviceRoleChoices.ROLE_PARENT}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_manufacturer(self):
manufacturers = Manufacturer.objects.all()[:2]
params = {'manufacturer_id': [manufacturers[0].pk, manufacturers[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'manufacturer': [manufacturers[0].slug, manufacturers[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_console_ports(self):
params = {'console_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'console_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_console_server_ports(self):
params = {'console_server_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'console_server_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_power_ports(self):
params = {'power_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'power_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_power_outlets(self):
params = {'power_outlets': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'power_outlets': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_interfaces(self):
params = {'interfaces': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'interfaces': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_pass_through_ports(self):
params = {'pass_through_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'pass_through_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_device_bays(self):
params = {'device_bays': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device_bays': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class ConsolePortTemplateTestCase(TestCase):
queryset = ConsolePortTemplate.objects.all()
filterset = ConsolePortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
ConsolePortTemplate.objects.bulk_create((
ConsolePortTemplate(device_type=device_types[0], name='Console Port 1'),
ConsolePortTemplate(device_type=device_types[1], name='Console Port 2'),
ConsolePortTemplate(device_type=device_types[2], name='Console Port 3'),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Console Port 1', 'Console Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ConsoleServerPortTemplateTestCase(TestCase):
queryset = ConsoleServerPortTemplate.objects.all()
filterset = ConsoleServerPortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
ConsoleServerPortTemplate.objects.bulk_create((
ConsoleServerPortTemplate(device_type=device_types[0], name='Console Server Port 1'),
ConsoleServerPortTemplate(device_type=device_types[1], name='Console Server Port 2'),
ConsoleServerPortTemplate(device_type=device_types[2], name='Console Server Port 3'),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Console Server Port 1', 'Console Server Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class PowerPortTemplateTestCase(TestCase):
queryset = PowerPortTemplate.objects.all()
filterset = PowerPortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
PowerPortTemplate.objects.bulk_create((
PowerPortTemplate(device_type=device_types[0], name='Power Port 1', maximum_draw=100, allocated_draw=50),
PowerPortTemplate(device_type=device_types[1], name='Power Port 2', maximum_draw=200, allocated_draw=100),
PowerPortTemplate(device_type=device_types[2], name='Power Port 3', maximum_draw=300, allocated_draw=150),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Power Port 1', 'Power Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_maximum_draw(self):
params = {'maximum_draw': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_allocated_draw(self):
params = {'allocated_draw': [50, 100]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class PowerOutletTemplateTestCase(TestCase):
queryset = PowerOutletTemplate.objects.all()
filterset = PowerOutletTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
PowerOutletTemplate.objects.bulk_create((
PowerOutletTemplate(device_type=device_types[0], name='Power Outlet 1', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_A),
PowerOutletTemplate(device_type=device_types[1], name='Power Outlet 2', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_B),
PowerOutletTemplate(device_type=device_types[2], name='Power Outlet 3', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_C),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Power Outlet 1', 'Power Outlet 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_feed_leg(self):
# TODO: Support filtering for multiple values
params = {'feed_leg': PowerOutletFeedLegChoices.FEED_LEG_A}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class InterfaceTemplateTestCase(TestCase):
queryset = InterfaceTemplate.objects.all()
filterset = InterfaceTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
InterfaceTemplate.objects.bulk_create((
InterfaceTemplate(device_type=device_types[0], name='Interface 1', type=InterfaceTypeChoices.TYPE_1GE_FIXED, mgmt_only=True),
InterfaceTemplate(device_type=device_types[1], name='Interface 2', type=InterfaceTypeChoices.TYPE_1GE_GBIC, mgmt_only=False),
InterfaceTemplate(device_type=device_types[2], name='Interface 3', type=InterfaceTypeChoices.TYPE_1GE_SFP, mgmt_only=False),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Interface 1', 'Interface 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
# TODO: Support filtering for multiple values
params = {'type': InterfaceTypeChoices.TYPE_1GE_FIXED}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_mgmt_only(self):
params = {'mgmt_only': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'mgmt_only': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class FrontPortTemplateTestCase(TestCase):
queryset = FrontPortTemplate.objects.all()
filterset = FrontPortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
rear_ports = (
RearPortTemplate(device_type=device_types[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C),
RearPortTemplate(device_type=device_types[1], name='Rear Port 2', type=PortTypeChoices.TYPE_8P8C),
RearPortTemplate(device_type=device_types[2], name='Rear Port 3', type=PortTypeChoices.TYPE_8P8C),
)
RearPortTemplate.objects.bulk_create(rear_ports)
FrontPortTemplate.objects.bulk_create((
FrontPortTemplate(device_type=device_types[0], name='Front Port 1', rear_port=rear_ports[0], type=PortTypeChoices.TYPE_8P8C),
FrontPortTemplate(device_type=device_types[1], name='Front Port 2', rear_port=rear_ports[1], type=PortTypeChoices.TYPE_110_PUNCH),
FrontPortTemplate(device_type=device_types[2], name='Front Port 3', rear_port=rear_ports[2], type=PortTypeChoices.TYPE_BNC),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Front Port 1', 'Front Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
# TODO: Support filtering for multiple values
params = {'type': PortTypeChoices.TYPE_8P8C}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class RearPortTemplateTestCase(TestCase):
queryset = RearPortTemplate.objects.all()
filterset = RearPortTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
RearPortTemplate.objects.bulk_create((
RearPortTemplate(device_type=device_types[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C, positions=1),
RearPortTemplate(device_type=device_types[1], name='Rear Port 2', type=PortTypeChoices.TYPE_110_PUNCH, positions=2),
RearPortTemplate(device_type=device_types[2], name='Rear Port 3', type=PortTypeChoices.TYPE_BNC, positions=3),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Rear Port 1', 'Rear Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
# TODO: Support filtering for multiple values
params = {'type': PortTypeChoices.TYPE_8P8C}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_positions(self):
params = {'positions': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceBayTemplateTestCase(TestCase):
queryset = DeviceBayTemplate.objects.all()
filterset = DeviceBayTemplateFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_types = (
DeviceType(manufacturer=manufacturer, model='Model 1', slug='model-1'),
DeviceType(manufacturer=manufacturer, model='Model 2', slug='model-2'),
DeviceType(manufacturer=manufacturer, model='Model 3', slug='model-3'),
)
DeviceType.objects.bulk_create(device_types)
DeviceBayTemplate.objects.bulk_create((
DeviceBayTemplate(device_type=device_types[0], name='Device Bay 1'),
DeviceBayTemplate(device_type=device_types[1], name='Device Bay 2'),
DeviceBayTemplate(device_type=device_types[2], name='Device Bay 3'),
))
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Device Bay 1', 'Device Bay 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype_id(self):
device_types = DeviceType.objects.all()[:2]
params = {'devicetype_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceRoleTestCase(TestCase):
queryset = DeviceRole.objects.all()
filterset = DeviceRoleFilterSet
@classmethod
def setUpTestData(cls):
device_roles = (
DeviceRole(name='Device Role 1', slug='device-role-1', color='ff0000', vm_role=True),
DeviceRole(name='Device Role 2', slug='device-role-2', color='00ff00', vm_role=True),
DeviceRole(name='Device Role 3', slug='device-role-3', color='0000ff', vm_role=False),
)
DeviceRole.objects.bulk_create(device_roles)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Device Role 1', 'Device Role 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['device-role-1', 'device-role-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_color(self):
params = {'color': ['ff0000', '00ff00']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_vm_role(self):
params = {'vm_role': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'vm_role': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class PlatformTestCase(TestCase):
queryset = Platform.objects.all()
filterset = PlatformFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
platforms = (
Platform(name='Platform 1', slug='platform-1', manufacturer=manufacturers[0], napalm_driver='driver-1', description='A'),
Platform(name='Platform 2', slug='platform-2', manufacturer=manufacturers[1], napalm_driver='driver-2', description='B'),
Platform(name='Platform 3', slug='platform-3', manufacturer=manufacturers[2], napalm_driver='driver-3', description='C'),
)
Platform.objects.bulk_create(platforms)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Platform 1', 'Platform 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['platform-1', 'platform-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_napalm_driver(self):
params = {'napalm_driver': ['driver-1', 'driver-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_manufacturer(self):
manufacturers = Manufacturer.objects.all()[:2]
params = {'manufacturer_id': [manufacturers[0].pk, manufacturers[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'manufacturer': [manufacturers[0].slug, manufacturers[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceTestCase(TestCase):
queryset = Device.objects.all()
filterset = DeviceFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
device_types = (
DeviceType(manufacturer=manufacturers[0], model='Model 1', slug='model-1', is_full_depth=True),
DeviceType(manufacturer=manufacturers[1], model='Model 2', slug='model-2', is_full_depth=True),
DeviceType(manufacturer=manufacturers[2], model='Model 3', slug='model-3', is_full_depth=False),
)
DeviceType.objects.bulk_create(device_types)
device_roles = (
DeviceRole(name='Device Role 1', slug='device-role-1'),
DeviceRole(name='Device Role 2', slug='device-role-2'),
DeviceRole(name='Device Role 3', slug='device-role-3'),
)
DeviceRole.objects.bulk_create(device_roles)
platforms = (
Platform(name='Platform 1', slug='platform-1'),
Platform(name='Platform 2', slug='platform-2'),
Platform(name='Platform 3', slug='platform-3'),
)
Platform.objects.bulk_create(platforms)
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
)
Site.objects.bulk_create(sites)
rack_groups = (
RackGroup(name='Rack Group 1', slug='rack-group-1', site=sites[0]),
RackGroup(name='Rack Group 2', slug='rack-group-2', site=sites[1]),
RackGroup(name='Rack Group 3', slug='rack-group-3', site=sites[2]),
)
for rackgroup in rack_groups:
rackgroup.save()
racks = (
Rack(name='Rack 1', site=sites[0], group=rack_groups[0]),
Rack(name='Rack 2', site=sites[1], group=rack_groups[1]),
Rack(name='Rack 3', site=sites[2], group=rack_groups[2]),
)
Rack.objects.bulk_create(racks)
cluster_type = ClusterType.objects.create(name='Cluster Type 1', slug='cluster-type-1')
clusters = (
Cluster(name='Cluster 1', type=cluster_type),
Cluster(name='Cluster 2', type=cluster_type),
Cluster(name='Cluster 3', type=cluster_type),
)
Cluster.objects.bulk_create(clusters)
tenant_groups = (
TenantGroup(name='Tenant group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants)
devices = (
Device(name='Device 1', device_type=device_types[0], device_role=device_roles[0], platform=platforms[0], tenant=tenants[0], serial='ABC', asset_tag='1001', site=sites[0], rack=racks[0], position=1, face=DeviceFaceChoices.FACE_FRONT, status=DeviceStatusChoices.STATUS_ACTIVE, cluster=clusters[0], local_context_data={"foo": 123}),
Device(name='Device 2', device_type=device_types[1], device_role=device_roles[1], platform=platforms[1], tenant=tenants[1], serial='DEF', asset_tag='1002', site=sites[1], rack=racks[1], position=2, face=DeviceFaceChoices.FACE_FRONT, status=DeviceStatusChoices.STATUS_STAGED, cluster=clusters[1]),
Device(name='Device 3', device_type=device_types[2], device_role=device_roles[2], platform=platforms[2], tenant=tenants[2], serial='GHI', asset_tag='1003', site=sites[2], rack=racks[2], position=3, face=DeviceFaceChoices.FACE_REAR, status=DeviceStatusChoices.STATUS_FAILED, cluster=clusters[2]),
)
Device.objects.bulk_create(devices)
# Add components for filtering
ConsolePort.objects.bulk_create((
ConsolePort(device=devices[0], name='Console Port 1'),
ConsolePort(device=devices[1], name='Console Port 2'),
))
ConsoleServerPort.objects.bulk_create((
ConsoleServerPort(device=devices[0], name='Console Server Port 1'),
ConsoleServerPort(device=devices[1], name='Console Server Port 2'),
))
PowerPort.objects.bulk_create((
PowerPort(device=devices[0], name='Power Port 1'),
PowerPort(device=devices[1], name='Power Port 2'),
))
PowerOutlet.objects.bulk_create((
PowerOutlet(device=devices[0], name='Power Outlet 1'),
PowerOutlet(device=devices[1], name='Power Outlet 2'),
))
interfaces = (
Interface(device=devices[0], name='Interface 1', mac_address='00-00-00-00-00-01'),
Interface(device=devices[1], name='Interface 2', mac_address='00-00-00-00-00-02'),
)
Interface.objects.bulk_create(interfaces)
rear_ports = (
RearPort(device=devices[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C),
RearPort(device=devices[1], name='Rear Port 2', type=PortTypeChoices.TYPE_8P8C),
)
RearPort.objects.bulk_create(rear_ports)
FrontPort.objects.bulk_create((
FrontPort(device=devices[0], name='Front Port 1', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[0]),
FrontPort(device=devices[1], name='Front Port 2', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[1]),
))
DeviceBay.objects.bulk_create((
DeviceBay(device=devices[0], name='Device Bay 1'),
DeviceBay(device=devices[1], name='Device Bay 2'),
))
# Assign primary IPs for filtering
ipaddresses = (
IPAddress(address='192.0.2.1/24', assigned_object=interfaces[0]),
IPAddress(address='192.0.2.2/24', assigned_object=interfaces[1]),
)
IPAddress.objects.bulk_create(ipaddresses)
Device.objects.filter(pk=devices[0].pk).update(primary_ip4=ipaddresses[0])
Device.objects.filter(pk=devices[1].pk).update(primary_ip4=ipaddresses[1])
# VirtualChassis assignment for filtering
virtual_chassis = VirtualChassis.objects.create(master=devices[0])
Device.objects.filter(pk=devices[0].pk).update(virtual_chassis=virtual_chassis, vc_position=1, vc_priority=1)
Device.objects.filter(pk=devices[1].pk).update(virtual_chassis=virtual_chassis, vc_position=2, vc_priority=2)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Device 1', 'Device 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asset_tag(self):
params = {'asset_tag': ['1001', '1002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_face(self):
params = {'face': DeviceFaceChoices.FACE_FRONT}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_position(self):
params = {'position': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_vc_position(self):
params = {'vc_position': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_vc_priority(self):
params = {'vc_priority': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_manufacturer(self):
manufacturers = Manufacturer.objects.all()[:2]
params = {'manufacturer_id': [manufacturers[0].pk, manufacturers[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'manufacturer': [manufacturers[0].slug, manufacturers[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicetype(self):
device_types = DeviceType.objects.all()[:2]
params = {'device_type_id': [device_types[0].pk, device_types[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_devicerole(self):
device_roles = DeviceRole.objects.all()[:2]
params = {'role_id': [device_roles[0].pk, device_roles[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'role': [device_roles[0].slug, device_roles[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_platform(self):
platforms = Platform.objects.all()[:2]
params = {'platform_id': [platforms[0].pk, platforms[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'platform': [platforms[0].slug, platforms[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rackgroup(self):
rack_groups = RackGroup.objects.all()[:2]
params = {'rack_group_id': [rack_groups[0].pk, rack_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rack(self):
racks = Rack.objects.all()[:2]
params = {'rack_id': [racks[0].pk, racks[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cluster(self):
clusters = Cluster.objects.all()[:2]
params = {'cluster_id': [clusters[0].pk, clusters[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_model(self):
params = {'model': ['model-1', 'model-2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_status(self):
params = {'status': [DeviceStatusChoices.STATUS_ACTIVE, DeviceStatusChoices.STATUS_STAGED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_is_full_depth(self):
params = {'is_full_depth': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'is_full_depth': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_mac_address(self):
params = {'mac_address': ['00-00-00-00-00-01', '00-00-00-00-00-02']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_serial(self):
params = {'serial': 'ABC'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'serial': 'abc'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_has_primary_ip(self):
params = {'has_primary_ip': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'has_primary_ip': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_virtual_chassis_id(self):
params = {'virtual_chassis_id': [VirtualChassis.objects.first().pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_virtual_chassis_member(self):
params = {'virtual_chassis_member': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'virtual_chassis_member': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_console_ports(self):
params = {'console_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'console_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_console_server_ports(self):
params = {'console_server_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'console_server_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_power_ports(self):
params = {'power_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'power_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_power_outlets(self):
params = {'power_outlets': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'power_outlets': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_interfaces(self):
params = {'interfaces': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'interfaces': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_pass_through_ports(self):
params = {'pass_through_ports': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'pass_through_ports': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_device_bays(self):
params = {'device_bays': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device_bays': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_local_context_data(self):
params = {'local_context_data': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'local_context_data': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant(self):
tenants = Tenant.objects.all()[:2]
params = {'tenant_id': [tenants[0].pk, tenants[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant': [tenants[0].slug, tenants[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_tenant_group(self):
tenant_groups = TenantGroup.objects.all()[:2]
params = {'tenant_group_id': [tenant_groups[0].pk, tenant_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'tenant_group': [tenant_groups[0].slug, tenant_groups[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class ConsolePortTestCase(TestCase):
queryset = ConsolePort.objects.all()
filterset = ConsolePortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
console_server_ports = (
ConsoleServerPort(device=devices[3], name='Console Server Port 1'),
ConsoleServerPort(device=devices[3], name='Console Server Port 2'),
)
ConsoleServerPort.objects.bulk_create(console_server_ports)
console_ports = (
ConsolePort(device=devices[0], name='Console Port 1', description='First'),
ConsolePort(device=devices[1], name='Console Port 2', description='Second'),
ConsolePort(device=devices[2], name='Console Port 3', description='Third'),
)
ConsolePort.objects.bulk_create(console_ports)
# Cables
Cable(termination_a=console_ports[0], termination_b=console_server_ports[0]).save()
Cable(termination_a=console_ports[1], termination_b=console_server_ports[1]).save()
# Third port is not connected
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Console Port 1', 'Console Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class ConsoleServerPortTestCase(TestCase):
queryset = ConsoleServerPort.objects.all()
filterset = ConsoleServerPortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
console_ports = (
ConsolePort(device=devices[3], name='Console Server Port 1'),
ConsolePort(device=devices[3], name='Console Server Port 2'),
)
ConsolePort.objects.bulk_create(console_ports)
console_server_ports = (
ConsoleServerPort(device=devices[0], name='Console Server Port 1', description='First'),
ConsoleServerPort(device=devices[1], name='Console Server Port 2', description='Second'),
ConsoleServerPort(device=devices[2], name='Console Server Port 3', description='Third'),
)
ConsoleServerPort.objects.bulk_create(console_server_ports)
# Cables
Cable(termination_a=console_server_ports[0], termination_b=console_ports[0]).save()
Cable(termination_a=console_server_ports[1], termination_b=console_ports[1]).save()
# Third port is not connected
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Console Server Port 1', 'Console Server Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class PowerPortTestCase(TestCase):
queryset = PowerPort.objects.all()
filterset = PowerPortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
power_outlets = (
PowerOutlet(device=devices[3], name='Power Outlet 1'),
PowerOutlet(device=devices[3], name='Power Outlet 2'),
)
PowerOutlet.objects.bulk_create(power_outlets)
power_ports = (
PowerPort(device=devices[0], name='Power Port 1', maximum_draw=100, allocated_draw=50, description='First'),
PowerPort(device=devices[1], name='Power Port 2', maximum_draw=200, allocated_draw=100, description='Second'),
PowerPort(device=devices[2], name='Power Port 3', maximum_draw=300, allocated_draw=150, description='Third'),
)
PowerPort.objects.bulk_create(power_ports)
# Cables
Cable(termination_a=power_ports[0], termination_b=power_outlets[0]).save()
Cable(termination_a=power_ports[1], termination_b=power_outlets[1]).save()
# Third port is not connected
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Power Port 1', 'Power Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_maximum_draw(self):
params = {'maximum_draw': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_allocated_draw(self):
params = {'allocated_draw': [50, 100]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class PowerOutletTestCase(TestCase):
queryset = PowerOutlet.objects.all()
filterset = PowerOutletFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
power_ports = (
PowerPort(device=devices[3], name='Power Outlet 1'),
PowerPort(device=devices[3], name='Power Outlet 2'),
)
PowerPort.objects.bulk_create(power_ports)
power_outlets = (
PowerOutlet(device=devices[0], name='Power Outlet 1', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_A, description='First'),
PowerOutlet(device=devices[1], name='Power Outlet 2', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_B, description='Second'),
PowerOutlet(device=devices[2], name='Power Outlet 3', feed_leg=PowerOutletFeedLegChoices.FEED_LEG_C, description='Third'),
)
PowerOutlet.objects.bulk_create(power_outlets)
# Cables
Cable(termination_a=power_outlets[0], termination_b=power_ports[0]).save()
Cable(termination_a=power_outlets[1], termination_b=power_ports[1]).save()
# Third port is not connected
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Power Outlet 1', 'Power Outlet 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_feed_leg(self):
# TODO: Support filtering for multiple values
params = {'feed_leg': PowerOutletFeedLegChoices.FEED_LEG_A}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class InterfaceTestCase(TestCase):
queryset = Interface.objects.all()
filterset = InterfaceFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
interfaces = (
Interface(device=devices[0], name='Interface 1', type=InterfaceTypeChoices.TYPE_1GE_SFP, enabled=True, mgmt_only=True, mtu=100, mode=InterfaceModeChoices.MODE_ACCESS, mac_address='00-00-00-00-00-01', description='First'),
Interface(device=devices[1], name='Interface 2', type=InterfaceTypeChoices.TYPE_1GE_GBIC, enabled=True, mgmt_only=True, mtu=200, mode=InterfaceModeChoices.MODE_TAGGED, mac_address='00-00-00-00-00-02', description='Second'),
Interface(device=devices[2], name='Interface 3', type=InterfaceTypeChoices.TYPE_1GE_FIXED, enabled=False, mgmt_only=False, mtu=300, mode=InterfaceModeChoices.MODE_TAGGED_ALL, mac_address='00-00-00-00-00-03', description='Third'),
Interface(device=devices[3], name='Interface 4', type=InterfaceTypeChoices.TYPE_OTHER, enabled=True, mgmt_only=True),
Interface(device=devices[3], name='Interface 5', type=InterfaceTypeChoices.TYPE_OTHER, enabled=True, mgmt_only=True),
Interface(device=devices[3], name='Interface 6', type=InterfaceTypeChoices.TYPE_OTHER, enabled=False, mgmt_only=False),
)
Interface.objects.bulk_create(interfaces)
# Cables
Cable(termination_a=interfaces[0], termination_b=interfaces[3]).save()
Cable(termination_a=interfaces[1], termination_b=interfaces[4]).save()
# Third pair is not connected
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Interface 1', 'Interface 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_enabled(self):
params = {'enabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'enabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_mtu(self):
params = {'mtu': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_mgmt_only(self):
params = {'mgmt_only': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'mgmt_only': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_mode(self):
params = {'mode': InterfaceModeChoices.MODE_ACCESS}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_kind(self):
params = {'kind': 'physical'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 6)
params = {'kind': 'virtual'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 0)
def test_mac_address(self):
params = {'mac_address': ['00-00-00-00-00-01', '00-00-00-00-00-02']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
params = {'type': [InterfaceTypeChoices.TYPE_1GE_FIXED, InterfaceTypeChoices.TYPE_1GE_GBIC]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class FrontPortTestCase(TestCase):
queryset = FrontPort.objects.all()
filterset = FrontPortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
rear_ports = (
RearPort(device=devices[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[1], name='Rear Port 2', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[2], name='Rear Port 3', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[3], name='Rear Port 4', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[3], name='Rear Port 5', type=PortTypeChoices.TYPE_8P8C, positions=6),
RearPort(device=devices[3], name='Rear Port 6', type=PortTypeChoices.TYPE_8P8C, positions=6),
)
RearPort.objects.bulk_create(rear_ports)
front_ports = (
FrontPort(device=devices[0], name='Front Port 1', type=PortTypeChoices.TYPE_8P8C, rear_port=rear_ports[0], rear_port_position=1, description='First'),
FrontPort(device=devices[1], name='Front Port 2', type=PortTypeChoices.TYPE_110_PUNCH, rear_port=rear_ports[1], rear_port_position=2, description='Second'),
FrontPort(device=devices[2], name='Front Port 3', type=PortTypeChoices.TYPE_BNC, rear_port=rear_ports[2], rear_port_position=3, description='Third'),
FrontPort(device=devices[3], name='Front Port 4', type=PortTypeChoices.TYPE_FC, rear_port=rear_ports[3], rear_port_position=1),
FrontPort(device=devices[3], name='Front Port 5', type=PortTypeChoices.TYPE_FC, rear_port=rear_ports[4], rear_port_position=1),
FrontPort(device=devices[3], name='Front Port 6', type=PortTypeChoices.TYPE_FC, rear_port=rear_ports[5], rear_port_position=1),
)
FrontPort.objects.bulk_create(front_ports)
# Cables
Cable(termination_a=front_ports[0], termination_b=front_ports[3]).save()
Cable(termination_a=front_ports[1], termination_b=front_ports[4]).save()
# Third port is not connected
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Front Port 1', 'Front Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
# TODO: Test for multiple values
params = {'type': PortTypeChoices.TYPE_8P8C}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class RearPortTestCase(TestCase):
queryset = RearPort.objects.all()
filterset = RearPortFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
Device(name=None, device_type=device_type, device_role=device_role, site=sites[3]), # For cable connections
)
Device.objects.bulk_create(devices)
rear_ports = (
RearPort(device=devices[0], name='Rear Port 1', type=PortTypeChoices.TYPE_8P8C, positions=1, description='First'),
RearPort(device=devices[1], name='Rear Port 2', type=PortTypeChoices.TYPE_110_PUNCH, positions=2, description='Second'),
RearPort(device=devices[2], name='Rear Port 3', type=PortTypeChoices.TYPE_BNC, positions=3, description='Third'),
RearPort(device=devices[3], name='Rear Port 4', type=PortTypeChoices.TYPE_FC, positions=4),
RearPort(device=devices[3], name='Rear Port 5', type=PortTypeChoices.TYPE_FC, positions=5),
RearPort(device=devices[3], name='Rear Port 6', type=PortTypeChoices.TYPE_FC, positions=6),
)
RearPort.objects.bulk_create(rear_ports)
# Cables
Cable(termination_a=rear_ports[0], termination_b=rear_ports[3]).save()
Cable(termination_a=rear_ports[1], termination_b=rear_ports[4]).save()
# Third port is not connected
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Rear Port 1', 'Rear Port 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_type(self):
# TODO: Test for multiple values
params = {'type': PortTypeChoices.TYPE_8P8C}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_positions(self):
params = {'positions': [1, 2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class DeviceBayTestCase(TestCase):
queryset = DeviceBay.objects.all()
filterset = DeviceBayFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = Site.objects.bulk_create((
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
Site(name='Site X', slug='site-x'),
))
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
)
Device.objects.bulk_create(devices)
device_bays = (
DeviceBay(device=devices[0], name='Device Bay 1', description='First'),
DeviceBay(device=devices[1], name='Device Bay 2', description='Second'),
DeviceBay(device=devices[2], name='Device Bay 3', description='Third'),
)
DeviceBay.objects.bulk_create(device_bays)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Device Bay 1', 'Device Bay 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_description(self):
params = {'description': ['First', 'Second']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class InventoryItemTestCase(TestCase):
queryset = InventoryItem.objects.all()
filterset = InventoryItemFilterSet
@classmethod
def setUpTestData(cls):
manufacturers = (
Manufacturer(name='Manufacturer 1', slug='manufacturer-1'),
Manufacturer(name='Manufacturer 2', slug='manufacturer-2'),
Manufacturer(name='Manufacturer 3', slug='manufacturer-3'),
)
Manufacturer.objects.bulk_create(manufacturers)
device_type = DeviceType.objects.create(manufacturer=manufacturers[0], model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
)
Site.objects.bulk_create(sites)
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[1]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[2]),
)
Device.objects.bulk_create(devices)
inventory_items = (
InventoryItem(device=devices[0], manufacturer=manufacturers[0], name='Inventory Item 1', part_id='1001', serial='ABC', asset_tag='1001', discovered=True, description='First'),
InventoryItem(device=devices[1], manufacturer=manufacturers[1], name='Inventory Item 2', part_id='1002', serial='DEF', asset_tag='1002', discovered=True, description='Second'),
InventoryItem(device=devices[2], manufacturer=manufacturers[2], name='Inventory Item 3', part_id='1003', serial='GHI', asset_tag='1003', discovered=False, description='Third'),
)
for i in inventory_items:
i.save()
child_inventory_items = (
InventoryItem(device=devices[0], name='Inventory Item 1A', parent=inventory_items[0]),
InventoryItem(device=devices[1], name='Inventory Item 2A', parent=inventory_items[1]),
InventoryItem(device=devices[2], name='Inventory Item 3A', parent=inventory_items[2]),
)
for i in child_inventory_items:
i.save()
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Inventory Item 1', 'Inventory Item 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_part_id(self):
params = {'part_id': ['1001', '1002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_asset_tag(self):
params = {'asset_tag': ['1001', '1002']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_discovered(self):
# TODO: Fix boolean value
params = {'discovered': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'discovered': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_device(self):
# TODO: Allow multiple values
device = Device.objects.first()
params = {'device_id': device.pk}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'device': device.name}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_parent_id(self):
parent_items = InventoryItem.objects.filter(parent__isnull=True)[:2]
params = {'parent_id': [parent_items[0].pk, parent_items[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_manufacturer(self):
manufacturers = Manufacturer.objects.all()[:2]
params = {'manufacturer_id': [manufacturers[0].pk, manufacturers[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'manufacturer': [manufacturers[0].slug, manufacturers[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_serial(self):
params = {'serial': 'ABC'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
params = {'serial': 'abc'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
class VirtualChassisTestCase(TestCase):
queryset = VirtualChassis.objects.all()
filterset = VirtualChassisFilterSet
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
)
Site.objects.bulk_create(sites)
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], vc_position=1),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[0], vc_position=2),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[1], vc_position=1),
Device(name='Device 4', device_type=device_type, device_role=device_role, site=sites[1], vc_position=2),
Device(name='Device 5', device_type=device_type, device_role=device_role, site=sites[2], vc_position=1),
Device(name='Device 6', device_type=device_type, device_role=device_role, site=sites[2], vc_position=2),
)
Device.objects.bulk_create(devices)
virtual_chassis = (
VirtualChassis(name='VC 1', master=devices[0], domain='Domain 1'),
VirtualChassis(name='VC 2', master=devices[2], domain='Domain 2'),
VirtualChassis(name='VC 3', master=devices[4], domain='Domain 3'),
)
VirtualChassis.objects.bulk_create(virtual_chassis)
Device.objects.filter(pk=devices[1].pk).update(virtual_chassis=virtual_chassis[0])
Device.objects.filter(pk=devices[3].pk).update(virtual_chassis=virtual_chassis[1])
Device.objects.filter(pk=devices[5].pk).update(virtual_chassis=virtual_chassis[2])
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_domain(self):
params = {'domain': ['Domain 1', 'Domain 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_master(self):
masters = Device.objects.all()
params = {'master_id': [masters[0].pk, masters[2].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'master': [masters[0].name, masters[2].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['VC 1', 'VC 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class CableTestCase(TestCase):
queryset = Cable.objects.all()
filterset = CableFilterSet
@classmethod
def setUpTestData(cls):
sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
Site(name='Site 3', slug='site-3'),
)
Site.objects.bulk_create(sites)
tenants = (
Tenant(name='Tenant 1', slug='tenant-1'),
Tenant(name='Tenant 2', slug='tenant-2'),
)
Tenant.objects.bulk_create(tenants)
racks = (
Rack(name='Rack 1', site=sites[0]),
Rack(name='Rack 2', site=sites[1]),
Rack(name='Rack 3', site=sites[2]),
)
Rack.objects.bulk_create(racks)
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model 1', slug='model-1')
device_role = DeviceRole.objects.create(name='Device Role 1', slug='device-role-1')
devices = (
Device(name='Device 1', device_type=device_type, device_role=device_role, site=sites[0], rack=racks[0], position=1, tenant=tenants[0]),
Device(name='Device 2', device_type=device_type, device_role=device_role, site=sites[0], rack=racks[0], position=2, tenant=tenants[0]),
Device(name='Device 3', device_type=device_type, device_role=device_role, site=sites[1], rack=racks[1], position=1, tenant=tenants[1]),
Device(name='Device 4', device_type=device_type, device_role=device_role, site=sites[1], rack=racks[1], position=2),
Device(name='Device 5', device_type=device_type, device_role=device_role, site=sites[2], rack=racks[2], position=1),
Device(name='Device 6', device_type=device_type, device_role=device_role, site=sites[2], rack=racks[2], position=2),
)
Device.objects.bulk_create(devices)
interfaces = (
Interface(device=devices[0], name='Interface 1', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[0], name='Interface 2', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[1], name='Interface 3', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[1], name='Interface 4', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[2], name='Interface 5', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[2], name='Interface 6', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[3], name='Interface 7', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[3], name='Interface 8', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[4], name='Interface 9', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[4], name='Interface 10', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[5], name='Interface 11', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
Interface(device=devices[5], name='Interface 12', type=InterfaceTypeChoices.TYPE_1GE_FIXED),
)
Interface.objects.bulk_create(interfaces)
# Cables
Cable(termination_a=interfaces[1], termination_b=interfaces[2], label='Cable 1', type=CableTypeChoices.TYPE_CAT3, status=CableStatusChoices.STATUS_CONNECTED, color='aa1409', length=10, length_unit=CableLengthUnitChoices.UNIT_FOOT).save()
Cable(termination_a=interfaces[3], termination_b=interfaces[4], label='Cable 2', type=CableTypeChoices.TYPE_CAT3, status=CableStatusChoices.STATUS_CONNECTED, color='aa1409', length=20, length_unit=CableLengthUnitChoices.UNIT_FOOT).save()
Cable(termination_a=interfaces[5], termination_b=interfaces[6], label='Cable 3', type=CableTypeChoices.TYPE_CAT5E, status=CableStatusChoices.STATUS_CONNECTED, color='f44336', length=30, length_unit=CableLengthUnitChoices.UNIT_FOOT).save()
Cable(termination_a=interfaces[7], termination_b=interfaces[8], label='Cable 4', type=CableTypeChoices.TYPE_CAT5E, status=CableStatusChoices.STATUS_PLANNED, color='f44336', length=40, length_unit=CableLengthUnitChoices.UNIT_FOOT).save()
Cable(termination_a=interfaces[9], termination_b=interfaces[10], label='Cable 5', type=CableTypeChoices.TYPE_CAT6, status=CableStatusChoices.STATUS_PLANNED, color='e91e63', length=10, length_unit=CableLengthUnitChoices.UNIT_METER).save()
Cable(termination_a=interfaces[11], termination_b=interfaces[0], label='Cable 6', type=CableTypeChoices.TYPE_CAT6, status=CableStatusChoices.STATUS_PLANNED, color='e91e63', length=20, length_unit=CableLengthUnitChoices.UNIT_METER).save()
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_label(self):
params = {'label': ['Cable 1', 'Cable 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_length(self):
params = {'length': [10, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_length_unit(self):
params = {'length_unit': CableLengthUnitChoices.UNIT_FOOT}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_type(self):
params = {'type': [CableTypeChoices.TYPE_CAT3, CableTypeChoices.TYPE_CAT5E]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_status(self):
params = {'status': [CableStatusChoices.STATUS_CONNECTED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
params = {'status': [CableStatusChoices.STATUS_PLANNED]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
def test_color(self):
params = {'color': ['aa1409', 'f44336']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
def test_device(self):
devices = Device.objects.all()[:2]
params = {'device_id': [devices[0].pk, devices[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
params = {'device': [devices[0].name, devices[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 3)
def test_rack(self):
racks = Rack.objects.all()[:2]
params = {'rack_id': [racks[0].pk, racks[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 5)
params = {'rack': [racks[0].name, racks[1].name]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 5)
def test_site(self):
site = Site.objects.all()[:2]
params = {'site_id': [site[0].pk, site[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 5)
params = {'site': [site[0].slug, site[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 5)
def test_tenant(self):
tenant = Tenant.objects.all()[:2]
params = {'tenant_id': [tenant[0].pk, tenant[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
params = {'tenant': [tenant[0].slug, tenant[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 4)
class PowerPanelTestCase(TestCase):
queryset = PowerPanel.objects.all()
filterset = PowerPanelFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
)
Site.objects.bulk_create(sites)
rack_groups = (
RackGroup(name='Rack Group 1', slug='rack-group-1', site=sites[0]),
RackGroup(name='Rack Group 2', slug='rack-group-2', site=sites[1]),
RackGroup(name='Rack Group 3', slug='rack-group-3', site=sites[2]),
)
for rackgroup in rack_groups:
rackgroup.save()
power_panels = (
PowerPanel(name='Power Panel 1', site=sites[0], rack_group=rack_groups[0]),
PowerPanel(name='Power Panel 2', site=sites[1], rack_group=rack_groups[1]),
PowerPanel(name='Power Panel 3', site=sites[2], rack_group=rack_groups[2]),
)
PowerPanel.objects.bulk_create(power_panels)
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Power Panel 1', 'Power Panel 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rack_group(self):
rack_groups = RackGroup.objects.all()[:2]
params = {'rack_group_id': [rack_groups[0].pk, rack_groups[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
class PowerFeedTestCase(TestCase):
queryset = PowerFeed.objects.all()
filterset = PowerFeedFilterSet
@classmethod
def setUpTestData(cls):
regions = (
Region(name='Region 1', slug='region-1'),
Region(name='Region 2', slug='region-2'),
Region(name='Region 3', slug='region-3'),
)
for region in regions:
region.save()
sites = (
Site(name='Site 1', slug='site-1', region=regions[0]),
Site(name='Site 2', slug='site-2', region=regions[1]),
Site(name='Site 3', slug='site-3', region=regions[2]),
)
Site.objects.bulk_create(sites)
racks = (
Rack(name='Rack 1', site=sites[0]),
Rack(name='Rack 2', site=sites[1]),
Rack(name='Rack 3', site=sites[2]),
)
Rack.objects.bulk_create(racks)
power_panels = (
PowerPanel(name='Power Panel 1', site=sites[0]),
PowerPanel(name='Power Panel 2', site=sites[1]),
PowerPanel(name='Power Panel 3', site=sites[2]),
)
PowerPanel.objects.bulk_create(power_panels)
power_feeds = (
PowerFeed(power_panel=power_panels[0], rack=racks[0], name='Power Feed 1', status=PowerFeedStatusChoices.STATUS_ACTIVE, type=PowerFeedTypeChoices.TYPE_PRIMARY, supply=PowerFeedSupplyChoices.SUPPLY_AC, phase=PowerFeedPhaseChoices.PHASE_3PHASE, voltage=100, amperage=100, max_utilization=10),
PowerFeed(power_panel=power_panels[1], rack=racks[1], name='Power Feed 2', status=PowerFeedStatusChoices.STATUS_FAILED, type=PowerFeedTypeChoices.TYPE_PRIMARY, supply=PowerFeedSupplyChoices.SUPPLY_AC, phase=PowerFeedPhaseChoices.PHASE_3PHASE, voltage=200, amperage=200, max_utilization=20),
PowerFeed(power_panel=power_panels[2], rack=racks[2], name='Power Feed 3', status=PowerFeedStatusChoices.STATUS_OFFLINE, type=PowerFeedTypeChoices.TYPE_REDUNDANT, supply=PowerFeedSupplyChoices.SUPPLY_DC, phase=PowerFeedPhaseChoices.PHASE_SINGLE, voltage=300, amperage=300, max_utilization=30),
)
PowerFeed.objects.bulk_create(power_feeds)
manufacturer = Manufacturer.objects.create(name='Manufacturer', slug='manufacturer')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Model', slug='model')
device_role = DeviceRole.objects.create(name='Device Role', slug='device-role')
device = Device.objects.create(name='Device', device_type=device_type, device_role=device_role, site=sites[0])
power_ports = [
PowerPort(device=device, name='Power Port 1'),
PowerPort(device=device, name='Power Port 2'),
]
PowerPort.objects.bulk_create(power_ports)
Cable(termination_a=power_feeds[0], termination_b=power_ports[0]).save()
Cable(termination_a=power_feeds[1], termination_b=power_ports[1]).save()
def test_id(self):
params = {'id': self.queryset.values_list('pk', flat=True)[:2]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Power Feed 1', 'Power Feed 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_status(self):
# TODO: Test for multiple values
params = {'status': PowerFeedStatusChoices.STATUS_ACTIVE}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_type(self):
params = {'type': PowerFeedTypeChoices.TYPE_PRIMARY}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_supply(self):
params = {'supply': PowerFeedSupplyChoices.SUPPLY_AC}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_phase(self):
params = {'phase': PowerFeedPhaseChoices.PHASE_3PHASE}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_voltage(self):
params = {'voltage': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_amperage(self):
params = {'amperage': [100, 200]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_max_utilization(self):
params = {'max_utilization': [10, 20]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_region(self):
regions = Region.objects.all()[:2]
params = {'region_id': [regions[0].pk, regions[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'region': [regions[0].slug, regions[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_site(self):
sites = Site.objects.all()[:2]
params = {'site_id': [sites[0].pk, sites[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'site': [sites[0].slug, sites[1].slug]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_power_panel_id(self):
power_panels = PowerPanel.objects.all()[:2]
params = {'power_panel_id': [power_panels[0].pk, power_panels[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_rack_id(self):
racks = Rack.objects.all()[:2]
params = {'rack_id': [racks[0].pk, racks[1].pk]}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
def test_cabled(self):
params = {'cabled': 'true'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'cabled': 'false'}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
def test_connected(self):
params = {'connected': True}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)
params = {'connected': False}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 1)
# TODO: Connection filters
| 47.242315
| 389
| 0.644685
|
ba5c05eb5d92898744a457c38f4d57748d954efa
| 1,870
|
py
|
Python
|
nebulae/astrobase/hangar/ImgGen/resgan.py
|
SeriaZheng/Nebulae
|
27a0b78560e66f6ad6540ee7e04c29e03e632bbd
|
[
"MIT"
] | 2
|
2021-12-24T08:28:44.000Z
|
2022-01-05T09:03:32.000Z
|
nebulae/astrobase/hangar/ImgGen/resgan.py
|
SeriaZheng/Nebulae
|
27a0b78560e66f6ad6540ee7e04c29e03e632bbd
|
[
"MIT"
] | null | null | null |
nebulae/astrobase/hangar/ImgGen/resgan.py
|
SeriaZheng/Nebulae
|
27a0b78560e66f6ad6540ee7e04c29e03e632bbd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
garage
Created by Seria at 03/01/2019 8:32 PM
Email: zzqsummerai@yeah.net
_ooOoo_
o888888888o
o88`_ . _`88o
(| 0 0 |)
O \ ใ / O
_____/`-----โ\_____
.โ \|| _ _ ||/ `.
| _ ||| | ||| _ |
| | \\ // | |
| | \-----/ | |
\ .\ ___/- -\___ /. /
,--- / ___\<|>/___ \ ---,
| |: \ \ / / :| |
`\--\_ -. ___ .- _/--/โ
=========== \__ NOBUG __/ ===========
'''
# -*- coding:utf-8 -*-
from ... import dock
from .architect import ResG, ResD, BN
class Discriminator(dock.Craft):
def __init__(self, in_shape, base_chs, norm_fn, attention, spec_norm, w_init, scope='DSC'):
super(Discriminator, self).__init__(scope)
H, W, C = in_shape
min_size = min(H, W)
factor = {128: base_chs / 16,
256: base_chs / 64}
self.backbone = ResD(in_shape, base_chs, norm_fn, attention, spec_norm, w_init)
if spec_norm:
self.cls = dock.SN(dock.Dense(int(H * W * factor[min_size]), 1))
else:
self.cls = dock.Dense(int(H * W * factor[min_size]), 1)
def run(self, x):
x = self.backbone(x)
self['out'] = self.cls(x)
return self['out']
class ResGAN(dock.Craft):
def __init__(self, in_shape, latent_dim=128, base_chs=64, norm_fn=BN, attention=False,
spec_norm=False, w_init=dock.XavierNorm(), scope='RESGAN'):
super(ResGAN, self).__init__(scope)
self.G = ResG(in_shape, latent_dim, base_chs, norm_fn, attention, spec_norm, w_init)
self.D = Discriminator(in_shape, base_chs, norm_fn, attention, spec_norm, w_init)
def run(self):
pass
| 32.241379
| 95
| 0.481283
|
ccec35b07ac355abec681a1cdb6c77b7f6e47ca1
| 2,982
|
py
|
Python
|
tests/profile/simple_program_fork.py
|
zhammer/dd-trace-py
|
4c30f6e36bfa34a63cd9b6884677c977f76d2a01
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/profile/simple_program_fork.py
|
zhammer/dd-trace-py
|
4c30f6e36bfa34a63cd9b6884677c977f76d2a01
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/profile/simple_program_fork.py
|
zhammer/dd-trace-py
|
4c30f6e36bfa34a63cd9b6884677c977f76d2a01
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import threading
import ddtrace.profile.auto
import ddtrace.profile.bootstrap
import ddtrace.profile.profiler
from ddtrace.profile.collector import memory
from ddtrace.profile.collector import stack
from ddtrace.profile.collector import threading as cthreading
def _allocate_mem():
# Do some serious memory allocations!
for x in range(5000000):
object()
lock = threading.Lock()
lock.acquire()
test_lock_name = "simple_program_fork.py:19"
_ = _allocate_mem()
assert ddtrace.profile.bootstrap.profiler.status == ddtrace.profile.profiler.ProfilerStatus.RUNNING
parent_recorder = list(ddtrace.profile.bootstrap.profiler.recorders)[0]
child_pid = os.fork()
if child_pid == 0:
# Child
# This is the first thing done on Python 3.7 and later, so mimick it here
if sys.version_info[:2] < (3, 7):
ddtrace.profile.auto.start_profiler()
recorder = list(ddtrace.profile.bootstrap.profiler.recorders)[0]
assert recorder is not parent_recorder
release_events_nb = len(recorder.events[cthreading.LockReleaseEvent])
# Release it
lock.release()
# We don't track it
assert test_lock_name not in set(e.lock_name for e in recorder.events[cthreading.LockReleaseEvent])
assert release_events_nb == len(recorder.events[cthreading.LockReleaseEvent])
# We track this one though
lock = threading.Lock()
test_lock_name = "simple_program_fork.py:50"
assert test_lock_name not in set(e.lock_name for e in recorder.events[cthreading.LockAcquireEvent])
lock.acquire()
assert test_lock_name in set(e.lock_name for e in recorder.events[cthreading.LockAcquireEvent])
assert test_lock_name not in set(e.lock_name for e in recorder.events[cthreading.LockReleaseEvent])
lock.release()
assert test_lock_name in set(e.lock_name for e in recorder.events[cthreading.LockReleaseEvent])
# Let's sure our copy of the parent recorder does not receive it since the parent profiler has been stopped
assert test_lock_name not in set(e.lock_name for e in parent_recorder.events[cthreading.LockAcquireEvent])
assert test_lock_name not in set(e.lock_name for e in parent_recorder.events[cthreading.LockReleaseEvent])
_ = _allocate_mem()
if sys.version_info[0] >= 3:
assert recorder.events[memory.MemorySampleEvent]
assert recorder.events[stack.StackSampleEvent]
assert recorder.events[cthreading.LockAcquireEvent]
else:
recorder = list(ddtrace.profile.bootstrap.profiler.recorders)[0]
assert recorder is parent_recorder
assert test_lock_name not in set(e.lock_name for e in recorder.events[cthreading.LockReleaseEvent])
lock.release()
assert test_lock_name in set(e.lock_name for e in recorder.events[cthreading.LockReleaseEvent])
assert ddtrace.profile.bootstrap.profiler.status == ddtrace.profile.profiler.ProfilerStatus.RUNNING
print(child_pid)
pid, status = os.waitpid(child_pid, 0)
sys.exit(os.WEXITSTATUS(status))
| 38.230769
| 111
| 0.765594
|
4eb47770005be9f00c1695b77beab2803dd1dfbf
| 1,396
|
py
|
Python
|
mysite/urls.py
|
gnidoc327/django_web_dev_bookmark
|
128003c4d94ebb896138c0878ecc8c0270481bc4
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
gnidoc327/django_web_dev_bookmark
|
128003c4d94ebb896138c0878ecc8c0270481bc4
|
[
"MIT"
] | null | null | null |
mysite/urls.py
|
gnidoc327/django_web_dev_bookmark
|
128003c4d94ebb896138c0878ecc8c0270481bc4
|
[
"MIT"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
# from bookmark.views import * - ๋ทฐ ํ๋ํ๋ ์ํฌํธํ ํ์์์
# from bookmark.views import BookmarkLV, BookmarkDV
# url(regex, view, kwargs=None, name=None, prefix='')
from mysite.view import HomeView
urlpatterns = [
# include - ๋ค๋ฅธ ๊ณณ์์ ์ ์ํ URLconf๋ฅผ ๊ฐ์ ธ์์ ์ฌํ์ฉํ ๋ ์ฌ์ฉ
url(r'^admin/', include(admin.site.urls)),
url(r'^$', HomeView.as_view(), name='home'),
url(r'^bookmark/', include('bookmark.urls', namespace='bookmark')), #์ถ๊ฐ
url(r'^blog/', include('blog.urls', namespace='blog')), #์ถ๊ฐ
# Class-based views for Bookmark app
# url(r'^bookmark/$', BookmarkLV.as_view(), name='index'),
# url(r'^bookmark/(?P<pk>\d+)/$', BookmarkDV.as_view(), name='detail'),
]
| 37.72973
| 79
| 0.682665
|
a2c78b923c9ca7f55160d2da0b04b38fe207b849
| 1,853
|
py
|
Python
|
setup.py
|
kanutsanun-b/pressure2qnh
|
8283d25b1bf697a6f85ae4948fc609de1ea214c4
|
[
"MIT"
] | null | null | null |
setup.py
|
kanutsanun-b/pressure2qnh
|
8283d25b1bf697a6f85ae4948fc609de1ea214c4
|
[
"MIT"
] | null | null | null |
setup.py
|
kanutsanun-b/pressure2qnh
|
8283d25b1bf697a6f85ae4948fc609de1ea214c4
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
import os.path
setup(
name = 'pressure2qnh', # How you named your package folder (MyLib)
packages = ['pressure2qnh'], # Chose the same as "name"
version = '1.0.0', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'Correcting station pressure to qnh', # Give a short description about your library
long_description='plese read in: https://github.com/kanutsanun-b/pressure2qnh',
author = 'Kanutsanun Bouking', # Type in your name
author_email = 'kanutsanun.b@gmail.com', # Type in your E-Mail
url = 'https://github.com/kanutsanun-b/pressure2qnh', # Provide either the link to your github or to your website
download_url = 'https://github.com/kanutsanun-b/pressure2qnh/archive/1.0.0.zip', # I explain this later on
keywords = ['pressure', 'qnh', 'Raspberry Pi','kanutsanun bouking'], # Keywords that define your package best
install_requires=[
'numpy'
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| 57.90625
| 147
| 0.658931
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.