hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c34dd2058c81b02d9bdb503007afab45b651249 | 2,785 | py | Python | word2vec.py | SilhouettesForYou/Word2vec | 57c22f29e319362f6a63eb9d5caaa7445ff7aa5a | [
"MIT"
] | 1 | 2017-01-17T07:41:11.000Z | 2017-01-17T07:41:11.000Z | word2vec.py | SilhouettesForYou/Word2vec | 57c22f29e319362f6a63eb9d5caaa7445ff7aa5a | [
"MIT"
] | null | null | null | word2vec.py | SilhouettesForYou/Word2vec | 57c22f29e319362f6a63eb9d5caaa7445ff7aa5a | [
"MIT"
] | 1 | 2020-12-24T04:14:50.000Z | 2020-12-24T04:14:50.000Z | from __future__ import print_function
import math
import tensorflow as tf
from sklearn.manifold import TSNE
from word2vec_input import *
from word2vec_plot import *
dataset_path = 'dataset/'
dataset = 'text8.zip'
vocabulary_size = 50000
batch_size = 128
embedding_size = 128
skip_window = 1
num_skips = 2
num_sampled = 64
num_steps = 100001
num_points = 400
def run(param):
# Building my graph
graph = tf.Graph()
with graph.as_default():
# Input data
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# Variables
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0/math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Loss
loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed, train_labels, num_sampled, vocabulary_size))
# Optimizer
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Normalizing the final embeddings
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
# Creating saver to write embeddings
saver = tf.train.Saver()
# Getting dataset
words = read_data(dataset_path, dataset)
data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
if param == 'training':
# Training word embeddings
with tf.Session(graph=graph) as sess:
# Initializing all variables
init = tf.initialize_all_variables()
sess.run(init)
print('Graph Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_data, batch_labels = generate_batch(data, batch_size, num_skips, skip_window)
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, l = sess.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
print('Average loss at step %d: %f' % (step, average_loss))
saver.save(sess, 'dataset/embeddings')
else:
# Visualizing word embeddings
with tf.Session(graph=graph) as sess:
saver.restore(sess, 'dataset/embeddings')
print('Embeddings restored')
final_embeddings = sess.run(normalized_embeddings)
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
words = [reverse_dictionary[i] for i in xrange(1, num_points+1)]
plot(two_d_embeddings, words)
plt.show()
if __name__ == '__main__':
run('training')
run('visualization')
| 27.85 | 135 | 0.735368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.149372 |
8c35b9a559288af50f129e1366cee6c00d138f00 | 621 | py | Python | citeyoursoftware/main.py | rodluger/citeyoursoftware | 8bccb33a268653a7267163ccdc67df72e191b388 | [
"MIT"
] | 3 | 2021-11-12T17:17:00.000Z | 2021-11-12T20:30:41.000Z | citeyoursoftware/main.py | rodluger/citeyoursoftware | 8bccb33a268653a7267163ccdc67df72e191b388 | [
"MIT"
] | null | null | null | citeyoursoftware/main.py | rodluger/citeyoursoftware | 8bccb33a268653a7267163ccdc67df72e191b388 | [
"MIT"
] | null | null | null | from .packages import get_packages
from .pypi import get_pypi_bib
def get_bibliography(
env_file="environment.yml", env_path=None, exclude=["python"]
):
# Get all user-listed packages w/ channels & exact versions
packages = get_packages(env_file=env_file, env_path=None)
# Try to find BibTeX entries for all packages
for name in packages:
packages[name]["bib"] = []
version = packages[name]["version"]
channel = packages[name]["channel"]
if channel == "pypi":
packages[name]["bib"] += get_pypi_bib(name, version)
# TODO!!!
return packages | 27 | 65 | 0.652174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.276973 |
8c361112a2676ad3ae3884f67f664ad4e61ad19c | 179 | py | Python | Exercises/folhaDePagamento.py | JeffersonOliveira/Exercises--OO2-with-Python3 | 4c9022a06ea933cd83f30b8b60c2e947e0a3250e | [
"MIT"
] | null | null | null | Exercises/folhaDePagamento.py | JeffersonOliveira/Exercises--OO2-with-Python3 | 4c9022a06ea933cd83f30b8b60c2e947e0a3250e | [
"MIT"
] | null | null | null | Exercises/folhaDePagamento.py | JeffersonOliveira/Exercises--OO2-with-Python3 | 4c9022a06ea933cd83f30b8b60c2e947e0a3250e | [
"MIT"
] | null | null | null | class FolhaDePagamento:
@staticmethod
def log():
return f'Isso é um log qualquer.'
#folha = FolhaDePagamento()
#print(folha.log())
print(FolhaDePagamento.log()) | 17.9 | 41 | 0.681564 | 100 | 0.555556 | 0 | 0 | 71 | 0.394444 | 0 | 0 | 73 | 0.405556 |
8c37000baf6496730973b65b17d03f9252cd094e | 3,305 | py | Python | pytorch-edu/torch_learning.py | kedaduck/Python-Projects | 1fe41789d216d6c791587696a5ba67990479d5c2 | [
"Apache-2.0"
] | null | null | null | pytorch-edu/torch_learning.py | kedaduck/Python-Projects | 1fe41789d216d6c791587696a5ba67990479d5c2 | [
"Apache-2.0"
] | null | null | null | pytorch-edu/torch_learning.py | kedaduck/Python-Projects | 1fe41789d216d6c791587696a5ba67990479d5c2 | [
"Apache-2.0"
] | null | null | null | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
print("Python Version:", torch.__version__)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(model, device, train_dataloader, optimizer, epoch):
model.train()
for idx, (data, target) in enumerate(train_dataloader):
data, target = data.to(device), target.to(device)
pred = model(data)
loss = F.nll_loss(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 100 == 0:
print("Train Epoch: {}, iteration: {}, Loss: {}".format(
epoch, idx, loss.item()))
def test(model, device, test_dataloader):
model.eval()
total_loss = 0.
correct = 0.
with torch.no_grad():
for idx, (data, target) in enumerate(test_dataloader):
data, target = data.to(device), target.to(device)
output = model(data)
total_loss += F.nll_loss(output, target, reduction="sum").item()
pred = output.argmax(dim=1)
correct += pred.eq(target.view_as(pred)).sum().item()
total_loss /= len(test_dataloader.dataset)
acc = correct / len(test_dataloader.dataset) * 100
print("Test loss: {}, Accuracy: {}".format(total_loss, acc))
mnist_data = datasets.MNIST("./mnist_data", train=True, download=True,
transform = transforms.Compose([
transforms.ToTensor(),
]))
# print(mnist_data)
# print(mnist_data[233][0].shape)
data = [d[0].data.cpu().numpy() for d in mnist_data]
np.mean(data)
np.std(data)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 32
train_dataloader = torch.utils.data.DataLoader(
datasets.FashionMNIST("./fashion_mnist_data", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True
)
test_dataloader = torch.utils.data.DataLoader(
datasets.FashionMNIST("./fashion_mnist_data", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True
)
lr = 0.01
momentum = 0.5
model = Net().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
num_epochs = 2
for epoch in range(num_epochs):
train(model, device, train_dataloader, optimizer, epoch)
test(model, device, test_dataloader)
torch.save(model.state_dict(), "fashion_mnist_cnn.pt")
| 32.087379 | 77 | 0.621785 | 539 | 0.163086 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.071407 |
8c372a7bab044ef6bbb830a527f234fab884969a | 4,680 | py | Python | utils.py | aimir-lab/hebbian-learning-cnn | ddaf3a66c1c374960dc680f671e64f3f20387590 | [
"MIT"
] | 18 | 2019-09-13T10:19:11.000Z | 2021-11-13T22:05:06.000Z | utils.py | GabrieleLagani/HebbianLearningThesis | 0f98f7a3e380e55c9fca6340f4fb0cc5f24917d8 | [
"MIT"
] | null | null | null | utils.py | GabrieleLagani/HebbianLearningThesis | 0f98f7a3e380e55c9fca6340f4fb0cc5f24917d8 | [
"MIT"
] | 5 | 2019-11-24T08:16:14.000Z | 2021-02-15T11:41:18.000Z | import os
import csv
import matplotlib.pyplot as plt
import torch
import params as P
# Compute the shape of the output of the convolutional layers of a network. This is useful to correctly set the size of
# successive FC layers
def get_conv_output_shape(net):
training = net.training
net.eval()
# In order to compute the shape of the output of the network convolutional layers, we can feed the network with
# a simulated input and return the resulting output shape
with torch.no_grad(): res = tuple(net.get_conv_output(torch.ones(1, *net.input_shape))[net.CONV_OUTPUT].size())[1:]
net.train(training)
return res
# Compute the shape of the output feature map from any layer of a network. This is useful to correctly set the size of
# the layers of successive network branches
def get_output_fmap_shape(net, output_layer):
training = net.training
net.eval()
# In order to compute the shape of the output of the network convolutional layers, we can feed the network with
# a simulated input and return the resulting output shape
with torch.no_grad(): res = tuple(net(torch.ones(1, *net.input_shape, device=P.DEVICE))[output_layer].size())[1:]
net.train(training)
return res
# Convert tensor shape to total tensor size
def shape2size(shape):
size = 1
for s in shape: size *= s
return size
# Convert dense-encoded vector to one-hot encoded
def dense2onehot(tensor, n=P.NUM_CLASSES):
return torch.zeros(tensor.size(0), n, device=tensor.device).scatter_(1, tensor.unsqueeze(1), 1)
# Save a dictionary (e.g. representing a trained model) in the specified path
def save_dict(d, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
torch.save(d, path)
# Load a dictionary (e.g. representing a traied model) from the specified path
def load_dict(path):
d = None
try: d = torch.load(path, map_location='cpu')
except: pass
return d
# Return formatted string with time information
def format_time(seconds):
seconds = int(seconds)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return str(hours) + "h " + str(minutes) + "m " + str(seconds) + "s"
# Print information on the training progress
def print_train_progress(current_epoch, total_epochs, elapsed_time, best_acc, best_epoch):
print("\nEPOCH " + str(current_epoch) + "/" + str(total_epochs))
elapsed_epochs = current_epoch - 1
if elapsed_epochs == 0:
elapsed_time_str = "-"
avg_epoch_duration_str = "-"
exp_remaining_time_str = "-"
else:
avg_epoch_duration = elapsed_time / elapsed_epochs
remaining_epochs = total_epochs - elapsed_epochs
elapsed_time_str = format_time(elapsed_time)
avg_epoch_duration_str = format_time(avg_epoch_duration)
exp_remaining_time_str = format_time(remaining_epochs * avg_epoch_duration)
print("Elapsed time: " + elapsed_time_str)
print("Average epoch duration: " + avg_epoch_duration_str)
print("Expected remaining time: " + exp_remaining_time_str)
print("Top accuracy so far: {:.2f}%".format(best_acc * 100) + ", at epoch: " + str(best_epoch))
# Save a figure showing train and validation error statistics in the specified file
def save_figure(train_acc_data, val_acc_data, path):
graph = plt.axes(xlabel='Epoch', ylabel='Accuracy')
graph.plot(range(1, len(train_acc_data)+1), train_acc_data, label='Train Acc.')
graph.plot(range(1, len(val_acc_data)+1), val_acc_data, label='Val. Acc.')
graph.grid(True)
graph.legend()
os.makedirs(os.path.dirname(path), exist_ok=True)
graph.get_figure().savefig(path, bbox_inches='tight')
graph.get_figure().clear()
plt.close(graph.get_figure())
# Function to print a grid of images (e.g. representing learned kernels)
def plot_grid(tensor, path, num_rows=8, num_cols=12):
#tensor = torch.sigmoid((tensor-tensor.mean())/tensor.std()).permute(0, 2, 3, 1).cpu().detach().numpy()
tensor = ((tensor - tensor.min())/(tensor.max() - tensor.min())).permute(0, 2, 3, 1).cpu().detach().numpy()
fig = plt.figure()
for i in range(tensor.shape[0]):
ax1 = fig.add_subplot(num_rows,num_cols,i+1)
ax1.imshow(tensor[i])
ax1.axis('off')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
plt.subplots_adjust(wspace=0.1, hspace=0.1)
fig.savefig(path, bbox_inches='tight')
plt.close(fig)
# Add an entry containing the seed of a training iteration and the test accuracy of the corresponding model to a csv file
def update_csv(iter_id, accuracy, path):
d = {'iter_id': 'accuracy'}
try:
with open(path, 'r') as csv_file:
reader = csv.reader(csv_file)
d = dict(reader)
except: pass
d[iter_id] = accuracy
try:
with open(path, mode='w', newline='') as csv_file:
writer = csv.writer(csv_file)
for k, v in d.items(): writer.writerow([k, v])
except: pass
| 38.04878 | 121 | 0.738248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,592 | 0.340171 |
8c386151db19fdbccad2e5d5ef88164358f52445 | 181 | py | Python | Chapter 01/Chap01_Example1.92.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 01/Chap01_Example1.92.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 01/Chap01_Example1.92.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | # reading 2 numbers from the keyboard and printing maximum value
r = int(input("Enter the first number: "))
s = int(input("Enter the second number: "))
x = r if r>s else s
print(x)
| 30.166667 | 64 | 0.701657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.646409 |
8c39b0ac142ab65ce1b2aa686569be1655292566 | 63,854 | py | Python | training/model.py | jim-schwoebel/allie | d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e | [
"Apache-2.0"
] | 87 | 2020-08-07T09:05:11.000Z | 2022-01-24T00:48:22.000Z | training/model.py | jim-schwoebel/allie | d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e | [
"Apache-2.0"
] | 87 | 2020-08-07T19:12:10.000Z | 2022-02-08T14:46:34.000Z | training/model.py | jim-schwoebel/allie | d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e | [
"Apache-2.0"
] | 25 | 2020-08-07T20:03:08.000Z | 2022-03-16T07:33:25.000Z | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
This is Allie's modeling API to help build classification or regression models.
All you need to do is run the model.py script and you will be guided through the
modeling process.
Usage: python3 model.py
Alternative CLI Usage: python3 model.py audio 2 c gender males females
- audio = audio file type
- 2 = 2 classes
- c = classification (r for regression)
- gender = common name of model
- male = first class
- female = second class [via N number of classes]
For addditional documentation, check out
https://github.com/jim-schwoebel/allie/tree/master/training
'''
###############################################################
## IMPORT STATEMENTS ##
###############################################################
import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform
from pyfiglet import Figlet
f=Figlet(font='doh')
print(f.renderText('Allie'))
f=Figlet(font='doom')
import pandas as pd
import matplotlib.pyplot as plt
###############################################################
## CREATE HELPER FUNCTIONS ##
###############################################################
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_folders(listdir):
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
return folders
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
counts={'audio': filetypes.count('audio'),
'image': filetypes.count('image'),
'text': filetypes.count('text'),
'video': filetypes.count('video'),
'csv': filetypes.count('csv')}
# get back the type of folder (main file type)
countlist=list(counts)
countvalues=list(counts.values())
maxvalue=max(countvalues)
maxind=countvalues.index(maxvalue)
return countlist[maxind]
def pull_element(mylist, element):
pull_=list()
for i in range(len(mylist)):
pull_.append(mylist[i][element])
return pull_
def convert_csv(X_train, y_train, labels, mtype, classes):
'''
Take in a array of features and labels and output a
pandas DataFrame format for easy .CSV expor and for model training.
This is important to make sure all machine learning training sessions
use the same dataset (so they can be benchmarked appropriately).
'''
# from pandas merging guide https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
feature_list=labels
data=list()
for i in tqdm(range(len(X_train)), desc='converting csv...'):
newlist=list()
for j in range(len(X_train[i])):
newlist.append([X_train[i][j]])
temp=pd.DataFrame(dict(zip(feature_list,newlist)), index=[i])
# print(temp)
data.append(temp)
data = pd.concat(data)
if mtype == 'c':
data['class_']=y_train
elif mtype == 'r':
if len(classes) == 1:
data[classes[0]]=y_train
else:
for j in range(len(classes)):
newy=pull_element(y_train, j)
data[classes[j]]=newy
data=pd.DataFrame(data, columns = list(data))
# print this because in pretty much every case you will write the .CSV file afterwards
print('writing csv file...')
return data
def device_info():
cpu_data={'memory':psutil.virtual_memory(),
'cpu percent':psutil.cpu_percent(),
'cpu times':psutil.cpu_times(),
'cpu count':psutil.cpu_count(),
'cpu stats':psutil.cpu_stats(),
'cpu swap':psutil.swap_memory(),
'partitions':psutil.disk_partitions(),
'disk usage':psutil.disk_usage('/'),
'disk io counters':psutil.disk_io_counters(),
'battery':psutil.sensors_battery(),
'boot time':psutil.boot_time(),
}
data={'time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'timezone':time.tzname,
'operating system': platform.system(),
'os release':platform.release(),
'os version':platform.version(),
'cpu data':cpu_data,
'space left': list(psutil.disk_usage('/'))[2]/1000000000}
return data
def get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
metrics_=dict()
y_true=y_test
if default_training_script not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(X_test)
elif default_training_script=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif default_training_script == 'autogluon':
from autogluon import TabularPrediction as task
test_data=test_data.drop(labels=['class'],axis=1)
y_pred=clf.predict(test_data)
elif default_training_script == 'autokeras':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'autopytorch':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif default_training_script == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif default_training_script == 'devol':
X_test=X_test.reshape(X_test.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(X_test).flatten()
elif default_training_script=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(X_test).flatten()
elif mtype == 'r':
y_pred=clf.predict(X_test).flatten()
elif default_training_script=='neuraxle':
y_pred=clf.transform(X_test)
elif default_training_script=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
print(y_pred)
# get classification or regression metrics
if mtype in ['c', 'classification']:
# now get all classification metrics
mtype='classification'
metrics_['accuracy']=metrics.accuracy_score(y_true, y_pred)
metrics_['balanced_accuracy']=metrics.balanced_accuracy_score(y_true, y_pred)
try:
metrics_['precision']=metrics.precision_score(y_true, y_pred)
except:
metrics_['precision']='n/a'
try:
metrics_['recall']=metrics.recall_score(y_true, y_pred)
except:
metrics_['recall']='n/a'
try:
metrics_['f1_score']=metrics.f1_score (y_true, y_pred, pos_label=1)
except:
metrics_['f1_score']='n/a'
try:
metrics_['f1_micro']=metrics.f1_score(y_true, y_pred, average='micro')
except:
metrics_['f1_micro']='n/a'
try:
metrics_['f1_macro']=metrics.f1_score(y_true, y_pred, average='macro')
except:
metrics_['f1_macro']='n/a'
try:
metrics_['roc_auc']=metrics.roc_auc_score(y_true, y_pred)
except:
metrics_['roc_auc']='n/a'
try:
metrics_['roc_auc_micro']=metrics.roc_auc_score(y_true, y_pred, average='micro')
except:
metrics_['roc_auc_micro']='n/a'
try:
metrics_['roc_auc_macro']=metrics.roc_auc_score(y_true, y_pred, average='macro')
except:
metrics_['roc_auc_micro']='n/a'
metrics_['confusion_matrix']=metrics.confusion_matrix(y_true, y_pred).tolist()
metrics_['classification_report']=metrics.classification_report(y_true, y_pred, target_names=classes)
plot_confusion_matrix(np.array(metrics_['confusion_matrix']), classes)
try:
# predict_proba only works for or log loss and modified Huber loss.
# https://stackoverflow.com/questions/47788981/sgdclassifier-with-predict-proba
try:
y_probas = clf.predict_proba(X_test)[:, 1]
except:
try:
y_probas = clf.decision_function(X_test)[:, 1]
except:
print('error making y_probas')
plot_roc_curve(y_test, [y_probas], [default_training_script])
except:
print('error plotting ROC curve')
print('predict_proba only works for or log loss and modified Huber loss.')
elif mtype in ['r', 'regression']:
# now get all regression metrics
mtype='regression'
metrics_['mean_absolute_error'] = metrics.mean_absolute_error(y_true, y_pred)
metrics_['mean_squared_error'] = metrics.mean_squared_error(y_true, y_pred)
metrics_['median_absolute_error'] = metrics.median_absolute_error(y_true, y_pred)
metrics_['r2_score'] = metrics.r2_score(y_true, y_pred)
plot_regressor(clf, classes, X_test, y_test)
data={'sample type': problemtype,
'training time': time.time()-model_start_time,
'created date': str(datetime.datetime.now()),
'device info': device_info(),
'session id': model_session,
'classes': classes,
'problem type': mtype,
'model name': modelname,
'model type': default_training_script,
'metrics': metrics_,
'settings': settings,
'transformer name': transformer_name,
'training data': created_csv_files,
'sample X_test': X_test[0].tolist(),
'sample y_test': y_test[0].tolist()}
if modelname.endswith('.pickle'):
jsonfilename=modelname[0:-7]+'.json'
elif modelname.endswith('.h5'):
jsonfilename=modelname[0:-3]+'.json'
else:
jsonfilename=modelname+'.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
# also output requirements.txt for reproducibilty purposes
curdir=os.getcwd()
basedir=prev_dir(curdir)
os.chdir(basedir)
os.system('pip3 freeze -> requirements.txt')
# FUTURE - add in optional copy of cleaning, augmentation, and feature libraries contextually
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/features', basedir+'/features')
# except:
# print('error copying features')
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/cleaning', basedir+'/cleaning')
# except:
# print('error copying cleaning techniques')
# shutil.copytree(prev_dir(prev_dir(basedir))+'/augmentation', basedir+'/augmentation')
# except:
# print('error copying augmentation techniques')
os.chdir(curdir)
def plot_roc_curve(y_test, probs, clf_names):
'''
This function plots an ROC curve with the appropriate
list of classifiers.
'''
cycol = itertools.cycle('bgrcmyk')
for i in range(len(probs)):
print(y_test)
print(probs[i])
try:
fper, tper, thresholds = roc_curve(y_test, probs[i])
plt.plot(fper, tper, color=next(cycol), label=clf_names[i]+' = %s'%(str(round(metrics.auc(fper, tper), 3))))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
except:
print('passing %s'%(clf_names[i]))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.tight_layout()
plt.savefig('roc_curve.png')
plt.close()
def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("\nNormalized confusion matrix")
else:
print('\nConfusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig('confusion_matrix.png')
plt.close()
def plot_regressor(regressor, classes, X_test, y_test):
'''
plot regression models with a bar chart.
'''
try:
y_pred = regressor.predict(X_test)
# plot the first 25 records
if len(classes) == 2:
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
# plot a straight line on the data
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.tight_layout()
plt.savefig('straight_line_predictions.png')
plt.close()
else:
# multi-dimensional generalization
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
except:
print('error plotting regressor')
def pursue_modeling(mtype, model_dir, problemtype, default_training_script,common_name_model):
'''
simple script to decide whether or not to continue modeling the data.
'''
try:
model_listdir=os.listdir(model_dir+'/'+problemtype+'_models')
except:
model_listdir=list()
# note that these are tpot definitions
model_exists=False
if default_training_script == 'tpot':
if common_name_model + '_classifier' in model_listdir and mtype == 'c':
model_exists=True
elif common_name_model +'_regression' in model_listdir and mtype == 'r':
model_exists=True
else:
# only look for naming conflicts with TPOT for now, can expand into the future.
model_exists=False
return model_exists, model_listdir
def get_csvfiles(listdir):
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
return csvfiles
###############################################################
## LOADING SETTINGS ##
###############################################################
# load the default feature set
cur_dir = os.getcwd()
prevdir= prev_dir(cur_dir)
sys.path.append(prevdir+'/train_dir')
settings=json.load(open(prevdir+'/settings.json'))
# get all the default feature arrays
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
create_csv=settings['create_csv']
# prepare training and testing data (should have been already featurized) - # of classes/folders
os.chdir(prevdir+'/train_dir')
data_dir=os.getcwd()
listdir=os.listdir()
folders=get_folders(listdir)
csvfiles=get_csvfiles(listdir)
# now assess folders by content type
data=dict()
for i in range(len(folders)):
os.chdir(folders[i])
listdir=os.listdir()
filetype=classifyfolder(listdir)
data[folders[i]]=filetype
os.chdir(data_dir)
###############################################################
## INITIALIZE CLASSES ##
###############################################################
# get all information from sys.argv, and if not,
# go through asking user for the proper parameters
try:
problemtype=sys.argv[1]
mtype=sys.argv[3]
if mtype == 'c':
classnum=sys.argv[2]
common_name=sys.argv[4]
classes=list()
for i in range(int(classnum)):
classes.append(sys.argv[i+5])
else:
classnum=1
problemtype='csv'
mtype=sys.argv[1]
csvfile=sys.argv[2]
classes=[sys.argv[3]]
common_name=csvfile[0:-4]
except:
# now ask user what type of problem they are trying to solve
mtype=input('is this a classification (c) or regression (r) problem? \n')
while mtype not in ['c','r']:
print('input not recognized...')
mtype=input('is this a classification (c) or regression (r) problem? \n')
if mtype == 'c':
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
while problemtype not in ['1','2','3','4','5']:
print('answer not recognized...')
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
if problemtype=='1':
problemtype='audio'
elif problemtype=='2':
problemtype='text'
elif problemtype=='3':
problemtype='image'
elif problemtype=='4':
problemtype='video'
elif problemtype=='5':
problemtype='csv'
if problemtype != 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
count=0
availableclasses=list()
for i in range(len(folders)):
if data[folders[i]]==problemtype:
availableclasses.append(folders[i])
count=count+1
classnum=input('how many classes would you like to model? (%s available) \n'%(str(count)))
print('these are the available classes: ')
print(availableclasses)
# get all if all (good for many classes)
classes=list()
if classnum=='all':
for i in range(len(availableclasses)):
classes.append(availableclasses[i])
else:
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
elif problemtype == 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
print('csv file options are: %s \n'%(csvfiles))
csvfile=input('which csvfile would you like to use for classification? \n')
g=pd.read_csv(csvfile)
columns=list(g)
print('potential targets include: %s'%(columns))
target=input('what target would you like to use? \n')
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
elif mtype =='r':
# for regression problems we need a target column to predict / classes from a .CSV
problemtype='csv'
# assumes the .CSV file is in the train dir
os.chdir(prevdir+'/train_dir')
listdir=os.listdir()
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
while csvfile not in csvfiles:
print('answer not recognized...')
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
# the available classes are only the numeric columns from the spreadsheet
data = pd.read_csv(csvfile)
columns = list(data)
availableclasses=list()
for i in range(len(columns)):
# look at filetype extension in each column
coldata=data[columns[i]]
sampletypes=list()
for j in range(len(coldata)):
try:
values=float(coldata[j])
sampletypes.append('numerical')
except:
if coldata[j].endswith('.wav'):
sampletypes.append('audio')
elif coldata[j].endswith('.txt'):
sampletypes.append('text')
elif coldata[j].endswith('.png'):
sampletypes.append('image')
elif coldata[j].endswith('.mp4'):
sampletypes.append('video')
else:
sampletypes.append('other')
coltype=most_common(sampletypes)
# correct the other category if needed
if coltype == 'other':
# if coltype.endswith('.csv'):
# coltype='csv'
if len(set(list(coldata))) < 10:
coltype='categorical'
else:
# if less than 5 unique answers then we can interpret this as text input
coltype='typedtext'
if coltype == 'numerical':
availableclasses.append(columns[i])
if len(availableclasses) > 0:
classnum=input('how many classes would you like to model? (%s available) \n'%(str(len(availableclasses))))
print('these are the available classes: %s'%(str(availableclasses)))
classes=list()
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
else:
print('no classes available... ending session')
sys.exit()
common_name=input('what is the 1-word common name for the problem you are working on? (e.g. gender for male/female classification) \n')
###############################################################
## UPGRADE MODULES / LOAD MODULES ##
###############################################################
print('-----------------------------------')
print(' LOADING MODULES ')
print('-----------------------------------')
# upgrade to have the proper scikit-learn version later
os.chdir(cur_dir)
os.system('python3 upgrade.py')
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_curve
###############################################################
## CLEAN THE DATA ##
###############################################################
clean_data=settings['clean_data']
clean_dir=prevdir+'/cleaning'
if clean_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('CLEANING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# clean audio via default_audio_cleaners
os.chdir(clean_dir+'/audio_cleaning')
elif problemtype == 'text':
# clean text via default_text_cleaners
os.chdir(clean_dir+'/text_cleaning')
elif problemtype == 'image':
# clean images via default_image_cleaners
os.chdir(clean_dir+'/image_cleaning')
elif problemtype == 'video':
# clean video via default_video_cleaners
os.chdir(clean_dir+'/video_cleaning')
elif problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
elif clean_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## AUGMENT THE DATA ##
###############################################################
augment_data=settings['augment_data']
augment_dir=prevdir+'/augmentation'
if augment_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('AUGMENTING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# augment audio via default_audio_augmenters
os.chdir(augment_dir+'/audio_augmentation')
elif problemtype == 'text':
# augment text via default_text_augmenters
os.chdir(augment_dir+'/text_augmentation')
elif problemtype == 'image':
# augment images via default_image_augmenters
os.chdir(augment_dir+'/image_augmentation')
elif problemtype == 'video':
# augment video via default_video_augmenters
os.chdir(augment_dir+'/video_augmentation')
elif problemtype == 'csv':
# augment .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
elif augment_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# featurize .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## FEATURIZE FILES ##
###############################################################
# now featurize each class (in proper folder)
if mtype == 'c':
data={}
print('-----------------------------------')
print(f.renderText('FEATURIZING DATA'))
print('-----------------------------------')
if problemtype == 'csv':
# csv features should have already been defined
# need to separate into number of unique classes
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
csv_feature_labels=list(csv_features)
classes=list(set(list(csv_labels)))
for i in range(len(classes)):
class_type = classes[i]
feature_list=list()
label_list=list()
for i in range(len(csv_features)):
if csv_labels[i] == class_type:
feature_list.append(list(csv_features.iloc[i,:]))
label_list.append(csv_feature_labels)
data[class_type]=feature_list
else:
#
for i in range(len(classes)):
class_type=classes[i]
if problemtype == 'audio':
# featurize audio
os.chdir(prevdir+'/features/audio_features')
default_features=default_audio_features
elif problemtype == 'text':
# featurize text
os.chdir(prevdir+'/features/text_features')
default_features=default_text_features
elif problemtype == 'image':
# featurize images
os.chdir(prevdir+'/features/image_features')
default_features=default_image_features
elif problemtype == 'video':
# featurize video
os.chdir(prevdir+'/features/video_features')
default_features=default_video_features
print('-----------------------------------')
print(' FEATURIZING %s'%(classes[i].upper()))
print('-----------------------------------')
os.system('python3 featurize.py "%s"'%(data_dir+'/'+classes[i]))
os.chdir(data_dir+'/'+classes[i])
# load audio features
listdir=os.listdir()
feature_list=list()
label_list=list()
for j in range(len(listdir)):
if listdir[j][-5:]=='.json':
try:
g=json.load(open(listdir[j]))
# consolidate all features into one array (if featurizing with multiple featurizers)
default_feature=list()
default_label=list()
for k in range(len(default_features)):
default_feature=default_feature+g['features'][problemtype][default_features[k]]['features']
default_label=default_label+g['features'][problemtype][default_features[k]]['labels']
feature_list.append(default_feature)
label_list.append(default_label)
except:
print('ERROR - skipping ' + listdir[j])
data[class_type]=feature_list
elif mtype == 'r':
# featurize .CSV
os.chdir(prevdir+'/features/csv_features')
output_file=str(uuid.uuid1())+'.csv'
os.system('python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"'%(prevdir+'/train_dir/'+csvfile, prevdir+'/train_dir/'+output_file, classes[0]))
csvfile=output_file
default_features=['csv_regression']
###############################################################
## GENERATE TRAINING DATA ##
###############################################################
print('-----------------------------------')
print(f.renderText('CREATING TRAINING DATA'))
print('-----------------------------------')
# perform class balance such that both classes have the same number
# of members (true by default, but can also be false)
os.chdir(prevdir+'/training/')
model_dir=prevdir+'/models'
balance=settings['balance_data']
remove_outliers=settings['remove_outliers']
outlier_types=settings['default_outlier_detector']
if mtype == 'c':
if problemtype != 'csv':
jsonfile=''
for i in range(len(classes)):
if i==0:
jsonfile=classes[i]
else:
jsonfile=jsonfile+'_'+classes[i]
jsonfile=jsonfile+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_dir=prevdir+'/train_dir/'+classes[0]
os.chdir(labels_dir)
listdir=os.listdir()
features_file=''
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
features_file=listdir[i]
labels_=list()
for i in range(len(default_features)):
tlabel=json.load(open(features_file))['features'][problemtype][default_features[i]]['labels']
labels_=labels_+tlabel
elif problemtype == 'csv':
# format data appropriately
jsonfile=target+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_=csv_feature_labels
elif mtype == 'r':
regression_data=pd.read_csv(prevdir+'/train_dir/'+csvfile)
print(csvfile)
# get features and labels
features_=regression_data.drop(columns=classes, axis=1)
labels_=list(features_)
labels_csv=regression_data.drop(columns=list(features_), axis=1)
# iterate through each column and make into proper features and labels
features=list()
labels=list()
# testing
# print(len(features_))
# print(len(labels_))
# print(features_)
# print(labels_)
# print(features_.iloc[0,:])
# print(labels_.iloc[0,:])
# get features and labels
for i in range(len(features_)):
features.append(list(features_.iloc[i,:]))
labels.append(list(labels_csv.iloc[i,:]))
# convert to name alldata just to be consistent
alldata=features
# print(alldata[0])
# print(labels[0])
# print(labels_)
os.chdir(model_dir)
# get the split from the settings.json
try:
test_size=settings['test_size']
except:
test_size=0.25
# error checking around lengths of arrays and deleting as necessary
lengths=list()
for i in range(len(alldata)):
lengths.append(len(alldata[i]))
# CLEAN IF DIMENSIONS DO NOT MATCH!!
maxval=max(lengths)
minval=min(lengths)
delete_ind=list()
inds=list()
alldata=np.array(alldata)
labels=np.array(labels)
if maxval != minval:
if lengths.count(maxval) > lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been removed
if lengths[i] == minval:
delete_ind.append(i)
elif lengths.count(maxval) < lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been added
if lengths[i] == maxval:
delete_ind.append(i)
print('DELETING THESE INDICES: %s'%(str(delete_ind)))
print(alldata.shape)
print(labels.shape)
alldata=np.delete(alldata, tuple(delete_ind), axis=0)
labels=np.delete(labels, tuple(delete_ind))
print(alldata.shape)
print(labels.shape)
# # now see if any element in the array is a NaN and do not include if so in alldata or labels
# for i in range(len(alldata)):
# try:
# array_has_nan = list(np.isnan(np.array(alldata[i]))).count(True)
# array_has_string=list(np.char.isnumeric(np.array(alldata[i]))).count(False)
# except:
# array_has_string=1
# if array_has_nan > 0 or array_has_string > 0:
# inds.append(i)
# print(alldata[i])
# if len(inds) > 0:
# print('DELETING THESE INDICES: %s'%(str(inds)))
# alldata=np.delete(alldata, tuple(inds))
# labels=np.delete(labels, tuple(inds))
# REMOVE OUTLIERS IF SETTING IS TRUE
alldata=np.array(alldata)
labels=np.array(labels)
if remove_outliers==True:
print('-----------------------------------')
print(' REMOVING OUTLIERS')
print('-----------------------------------')
for i in range(len(outlier_types)):
outlier_type=outlier_types[i]
if outlier_type =='isolationforest':
from sklearn.ensemble import IsolationForest
clf = IsolationForest(random_state=0).fit(alldata)
y_pred = clf.predict(alldata)
inlier_ind=list(np.where(y_pred==1))
outlier_ind=list(np.where(y_pred==-1))
y_pred = y_pred.tolist()
print(type(y_pred))
print(type(y_pred[0]))
n_inliers = y_pred.count(1)
n_outliers = y_pred.count(-1)
print(n_inliers)
print(n_outliers)
# shape before
print(alldata.shape)
print(labels.shape)
# delete outliers
alldata=np.delete(alldata, tuple(outlier_ind), axis=0)
labels=np.delete(labels, tuple(outlier_ind))
print(alldata.shape)
print(labels.shape)
elif outlier_type=='zscore':
os.system('pip3 install statsmodels==0.11.1')
from scipy import stats
from statsmodels.formula.api import ols
# https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba
z = np.abs(stats.zscore(alldata))
# print(z)
threshold = 3
inds=list(set(np.where(z>threshold)[0]))
print(len(inds))
print(tuple(inds))
print(alldata.shape)
print('-->')
alldata = np.delete(alldata, tuple(inds), axis=0)
print(alldata.shape)
labels = np.delete(labels, tuple(inds))
print(len(alldata))
print(len(labels))
# rebalance data to all be the same length
newlabels=list(labels)
outlier_class=list()
for i in range(len(classes)):
outlier_class.append(newlabels.count(i))
lengths=np.array(outlier_class)
minlength=np.amin(outlier_class)
# now load all the classes
for i in range(len(classes)):
# only balance if specified in settings
if balance==True:
count2=newlabels.count(i)
while count2 > minlength:
count2=newlabels.count(i)
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(count2-minlength)))
ind=list(labels).index(i)
alldata=np.delete(alldata, tuple([ind]), axis=0)
labels=np.delete(labels, tuple([ind]))
newlabels=list(labels)
alldata=list(alldata)
labels=list(labels)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert everything to numpy arrays (for testing later)
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# create list of created csv files
created_csv_files=list()
# create training and testing datasets and save to a .CSV file for archive purposes
# this ensures that all machine learning training methods use the same training data
basefile=common_name
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all.csv'.upper())
if basefile+'_all.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all.csv',index=False)
created_csv_files.append(basefile+'_all.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all.csv'))
try:
print(basefile+'_train.csv'.upper())
if basefile+'_train.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train.csv',index=False)
created_csv_files.append(basefile+'_train.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train.csv'))
try:
print(basefile+'_test.csv'.upper())
if basefile+'_test.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test.csv',index=False)
created_csv_files.append(basefile+'_test.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test.csv'))
############################################################
## DATA TRANSFORMATION ##
############################################################
'''
Scale features via scalers, dimensionality reduction techniques,
and feature selection strategies per the settings.json document.
'''
preprocess_dir=prevdir+'/preprocessing'
os.chdir(preprocess_dir)
# get all the important settings for the transformations
scale_features=settings['scale_features']
reduce_dimensions=settings['reduce_dimensions']
select_features=settings['select_features']
default_scalers=settings['default_scaler']
default_reducers=settings['default_dimensionality_reducer']
default_selectors=settings['default_feature_selector']
# get command for terminal
transform_command=''
if problemtype == 'csv' and mtype == 'c':
transform_command=transform_command+' "'+'Class'+'"'
else:
for i in range(len(classes)):
transform_command=transform_command+' "'+classes[i]+'"'
# get filename / create a unique file name
if mtype=='r':
t_filename='r_'+common_name
elif mtype=='c':
t_filename='c_'+common_name
# only add names in if True
if scale_features == True:
for i in range(len(default_scalers)):
t_filename=t_filename+'_'+default_scalers[i]
if reduce_dimensions == True:
for i in range(len(default_reducers)):
t_filename=t_filename+'_'+default_reducers[i]
if select_features == True:
for i in range(len(default_selectors)):
t_filename=t_filename+'_'+default_selectors[i]
transform_file=t_filename+'.pickle'
if scale_features == True or reduce_dimensions == True or select_features == True:
print('----------------------------------')
print(f.renderText('TRANSFORMING DATA'))
print('----------------------------------')
# go to proper transformer directory
try:
os.chdir(problemtype+'_transformer')
except:
os.mkdir(problemtype+'_transformer')
os.chdir(problemtype+'_transformer')
# train transformer if it doesn't already exist
os.system('pip3 install scikit-learn==0.22.2.post1')
if transform_file in os.listdir():
# remove file if in listdir to avoid conflicts with naming
os.remove(transform_file)
print('making transformer...')
alldata=np.asarray(alldata)
labels=np.asarray(labels)
os.chdir(preprocess_dir)
if mtype == 'c':
print('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.system('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.chdir(problemtype+'_transformer')
print(transform_file)
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(np.array(alldata))
elif mtype == 'r':
command='python3 transform.py "%s" "%s" "%s" "%s" "%s" "%s"'%('csv', 'r', classes[0], csvfile, prevdir+'/train_dir/', common_name)
print(command)
os.system(command)
os.chdir(problemtype+'_transformer')
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(alldata)
os.chdir(preprocess_dir)
os.system('python3 load_transformer.py "%s" "%s"'%(problemtype, transform_file))
# now make new files as .CSV
os.chdir(model_dir)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert to numpy arrays
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# get new labels_ array
labels_=list()
for i in range(len(alldata[0].tolist())):
labels_.append('transformed_feature_%s'%(str(i)))
# now create transformed excel sheets
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all_transformed.csv'.upper())
if basefile+'_all_transformed.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all_transformed.csv',index=False)
created_csv_files.append(basefile+'_all_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all_transformed.csv'))
try:
print(basefile+'_train_transformed.csv'.upper())
if basefile+'_train_transformed.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train_transformed.csv',index=False)
created_csv_files.append(basefile+'_train_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train_transformed.csv'))
try:
print(basefile+'_test_transformed.csv'.upper())
if basefile+'_test_transformed.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test_transformed.csv',index=False)
created_csv_files.append(basefile+'_test_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test_transformed.csv'))
else:
# make a transform model == '' so that later during model training this can be skipped
transform_model=''
############################################################
## VISUALIZE DATA ##
############################################################
visualize_data=settings['visualize_data']
visual_dir=prevdir+'/visualize'
model_session=str(uuid.uuid1())
os.chdir(visual_dir)
if visualize_data == True and mtype == 'c':
print('----------------------------------')
print(f.renderText('VISUALIZING DATA'))
print('----------------------------------')
command='python3 visualize.py %s'%(problemtype)
for i in range(len(classes)):
command=command+' "'+classes[i]+'"'
os.system(command)
# restructure the visualization directory
os.chdir(visual_dir+'/visualization_session')
os.mkdir('visualizations')
vizdir=os.getcwd()
# move directories so that visualization is separate from main model directory
shutil.move(vizdir+'/clustering', vizdir+'/visualizations/clustering')
shutil.move(vizdir+'/feature_ranking', vizdir+'/visualizations/feature_ranking')
shutil.move(vizdir+'/model_selection', vizdir+'/visualizations/model_selection')
# go back to main direcotry
os.chdir(visual_dir)
# now copy over the visualization directory to
try:
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
except:
shutil.rmtree(model_dir+'/'+model_session)
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json',model_dir+'/%s/settings.json'%(model_session))
else:
# make a model session for next section if it doesn't exist from visualization directory
os.chdir(model_dir)
try:
os.mkdir(model_session)
except:
shutil.rmtree(model_session)
os.mkdir(model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json', model_dir+'/%s/settings.json'%(model_session))
############################################################
## TRAIN THE MODEL ##
############################################################
'''
Now we can train the machine learning model via the default_training script.
Note you can specify multiple training scripts and it will consecutively model the
files appropriately.
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# Here is what all the variables below mean:
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# alldata = list of features in an array for model training
# [[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875...],
...
[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875,...]]
# labels = list of labels in an array for model training
# ['males','females',...,'males','females']
# mtype = classification or regression problem?
# 'c' --> classification
# 'r' --> regression
# jsonfile = filename of the .JSON document seprating classes
# males_females.json
# problemtype = type of problem selected
# 'audio' --> audio files
# 'image' --> images files
# 'text' --> text files
# 'video' --> video files
# 'csv' --> csv files
# default_featurenames = default feature array(s) to use for modeling
# ['librosa_features']
# settings = overall settings currenty used for model training
# output of the settings.json document
-----
# transform_model = transformer model if applicable
# useful for data transformation as part of the model initialization process (if pickle file)
# uses scikit-learn pipeline
# X_train, X_test, y_train, y_test
# training datasets used in the .CSV documents
# also can use pandas dataframe if applicable (loading in the model dir)
'''
print('----------------------------------')
print(f.renderText('MODELING DATA'))
print('----------------------------------')
# get defaults
default_training_scripts=settings['default_training_script']
model_compress=settings['model_compress']
default_featurenames=''
if problemtype != 'csv' and mtype == 'c':
for i in range(len(default_features)):
if i ==0:
default_featurenames=default_features[i]
else:
default_featurenames=default_featurenames+'_|_'+default_features[i]
else:
default_featurenames='csv_classification'
# just move all created .csv files into model_session directory
os.chdir(model_dir)
os.chdir(model_session)
os.mkdir('data')
for i in range(len(created_csv_files)):
shutil.move(model_dir+'/'+created_csv_files[i], os.getcwd()+'/data/'+created_csv_files[i])
# initialize i (for tqdm) and go through all model training scripts
i=0
for i in tqdm(range(len(default_training_scripts)), desc=default_training_scripts[i]):
try:
model_start_time=time.time()
# go to model directory
os.chdir(model_dir)
# get common name and default training script to select proper model trainer
default_training_script=default_training_scripts[i]
common_name_model=common_name+'_'+default_training_script
model_exists, model_listdir = pursue_modeling(mtype, model_dir, problemtype, default_training_script, common_name_model)
if model_exists == False:
print('----------------------------------')
print(' .... training %s '%(default_training_script.upper()))
print('----------------------------------')
if default_training_script=='adanet':
print('Adanet training is coming soon! Please use a different model setting for now.')
# import train_adanet as ta
# ta.train_adanet(mtype, classes, jsonfile, alldata, labels, feature_labels, problemtype, default_featurenames)
elif default_training_script=='alphapy':
import train_alphapy as talpy
modelname, modeldir, files=talpy.train_alphapy(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='atm':
import train_atm as tatm
modelname, modeldir, files=tatm.train_atm(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autobazaar':
import train_autobazaar as autobzr
modelname, modeldir, files=autobzr.train_autobazaar(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogbt':
import train_autogbt as tautogbt
modelname, modeldir, files=tautogbt.train_autogbt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogluon':
import train_autogluon as tautg
modelname, modeldir, files, test_data=tautg.train_autogluon(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokaggle':
import train_autokaggle as autokag
modelname, modeldir, files=autokag.train_autokaggle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokeras':
import train_autokeras as autokeras_
modelname, modeldir, files=autokeras_.train_autokeras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='automl':
import train_automl as auto_ml
modelname, modeldir, files=auto_ml.train_automl(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autosklearn':
print('Autosklearn training is unstable! Please use a different model setting for now.')
# import train_autosklearn as taskl
# taskl.train_autosklearn(alldata, labels, mtype, jsonfile, problemtype, default_featurenames)
elif default_training_script=='autopytorch':
import train_autopytorch as autotorch_
modelname, modeldir, files=autotorch_.train_autopytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='btb':
import train_btb as tbtb
modelname, modeldir, files=tbtb.train_btb(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='cvopt':
import train_cvopt as tcvopt
modelname, modeldir, files = tcvopt.train_cvopt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='devol':
import train_devol as td
modelname, modeldir, files=td.train_devol(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gama':
import train_gama as tgama
modelname, modeldir, files=tgama.train_gama(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gentun':
import train_gentun as tgentun
modelname, modeldir, files=tgentun.train_gentun(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hyperband':
import train_hyperband as thband
modelname, modeldir, files = thband.train_hyperband(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hypsklearn':
import train_hypsklearn as th
modelname, modeldir, files=th.train_hypsklearn(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hungabunga':
import train_hungabunga as thung
modelname, modeldir, files=thung.train_hungabunga(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='imbalance':
import train_imbalance as timb
modelname, modeldir, files=timb.train_imbalance(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='keras':
import train_keras as tk
modelname, modeldir, files=tk.train_keras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='ludwig':
import train_ludwig as tl
modelname, modeldir, files=tl.train_ludwig(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlblocks':
import train_mlblocks as mlb
modelname, modeldir, files=mlb.train_mlblocks(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlbox':
import train_mlbox as mlbox_
modelname, modeldir, files=mlbox_.train_mlbox(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='neuraxle':
if mtype=='c':
print('Neuraxle does not support classification at this time. Please use a different model training script')
break
else:
import train_neuraxle as tneuraxle
modelname, modeldir, files=tneuraxle.train_neuraxle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='plda':
print('PLDA training is unstable! Please use a different model setting for now.')
# import train_pLDA as tp
# tp.train_pLDA(alldata,labels)
elif default_training_script=='pytorch':
import train_pytorch as t_pytorch
modelname, modeldir, files = t_pytorch.train_pytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='safe':
import train_safe as tsafe
modelname, modeldir, files=tsafe.train_safe(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='scsr':
import train_scsr as scsr
if mtype == 'c':
modelname, modeldir, files=scsr.train_sc(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,minlength)
elif mtype == 'r':
modelname, modeldir, files=scsr.train_sr(X_train,X_test,y_train,y_test,common_name_model,problemtype,classes,default_featurenames,transform_model,model_dir,settings)
elif default_training_script=='tpot':
import train_TPOT as tt
modelname, modeldir, files=tt.train_TPOT(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
############################################################
## CALCULATE METRICS / PLOT ROC CURVE ##
############################################################
if modelname.endswith('.pickle'):
foldername=modelname[0:-7]
elif modelname.endswith('.h5'):
foldername=modelname[0:-3]
else:
foldername=common_name_model
# copy the folder in case there are multiple models being trained
try:
shutil.copytree(model_session, foldername)
except:
shutil.rmtree(foldername)
shutil.copytree(model_session, foldername)
cur_dir2=os.getcwd()
os.chdir(foldername)
os.mkdir('model')
os.chdir('model')
model_dir_temp=os.getcwd()
# dump transform model to the models directory if necessary
if transform_model == '':
transformer_name=''
else:
# dump the tranform model into the current working directory
transformer_name=modelname.split('.')[0]+'_transform.pickle'
tmodel=open(transformer_name,'wb')
pickle.dump(transform_model, tmodel)
tmodel.close()
# move all supplementary files into model folder
for j in range(len(files)):
shutil.move(modeldir+'/'+files[j], model_dir_temp+'/'+files[j])
# load model for getting metrics
if default_training_script not in ['alphapy', 'atm', 'autokeras', 'autopytorch', 'ludwig', 'keras', 'devol']:
loadmodel=open(modelname, 'rb')
clf=pickle.load(loadmodel)
loadmodel.close()
elif default_training_script == 'atm':
from atm import Model
clf=Model.load(modelname)
elif default_training_script == 'autokeras':
import tensorflow as tf
import autokeras as ak
clf = pickle.load(open(modelname, 'rb'))
elif default_training_script=='autopytorch':
import torch
clf=torch.load(modelname)
elif default_training_script == 'ludwig':
from ludwig.api import LudwigModel
clf=LudwigModel.load('ludwig_files/experiment_run/model/')
elif default_training_script in ['devol', 'keras']:
from keras.models import load_model
clf = load_model(modelname)
else:
clf=''
# create test_data variable for anything other than autogluon
if default_training_script != 'autogluon':
test_data=''
# now make main .JSON file for the session summary with metrics
get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time)
# now move to the proper models directory
os.chdir(model_dir)
os.system('python3 create_readme.py "%s"'%(os.getcwd()+'/'+foldername))
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
shutil.move(model_dir+'/'+foldername, os.getcwd()+'/'+foldername)
############################################################
## COMPRESS MODELS ##
############################################################
if model_compress == True:
print(f.renderText('COMPRESSING MODEL'))
# now compress the model according to model type
if default_training_script in ['hypsklearn', 'scsr', 'tpot']:
# all .pickle files and can compress via scikit-small-ensemble
from sklearn.externals import joblib
# open up model
loadmodel=open(modelname, 'rb')
model = pickle.load(loadmodel)
loadmodel.close()
# compress - from 0 to 9. Higher value means more compression, but also slower read and write times.
# Using a value of 3 is often a good compromise.
joblib.dump(model, modelname[0:-7]+'_compressed.joblib',compress=3)
# can now load compressed models as such
# thenewmodel=joblib.load(modelname[0:-7]+'_compressed.joblib')
# leads to up to 10x reduction in model size and .72 sec - 0.23 secoon (3-4x faster loading model)
# note may note work in sklearn and python versions are different from saving and loading environments.
elif default_training_script in ['devol', 'keras']:
# can compress with keras_compressor
import logging
from keras.models import load_model
from keras_compressor.compressor import compress
logging.basicConfig(
level=logging.INFO,
)
try:
print('compressing model!!')
model = load_model(modelname)
model = compress(model, 7e-1)
model.save(modelname[0:-3]+'_compressed.h5')
except:
print('error compressing model!!')
else:
# for everything else, we can compress pocketflow models in the future.
print('We cannot currently compress %s models. We are working on this!! \n\n The model will remain uncompressed for now'%(default_training_script))
else:
if mtype == 'r':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_regression', problemtype+'_models', str(model_listdir)))
elif mtype == 'c':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_classifier', problemtype+'_models', str(model_listdir)))
############################################################
## PRODUCTIONIZING MODELS ##
############################################################
# TO BE COMPLETED IN THE FUTURE!
except:
print('ERROR - error in modeling session')
| 36.973943 | 206 | 0.674915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25,223 | 0.39501 |
8c3b859ba3434a9c4221dd64e613da84a1bae1bc | 2,896 | py | Python | migrations/versions/a2c88ed3a94a_.py | crossgovernmentservices/csd_notes | 0d69d8cad86446327c6bcadc03f7192e7d7cfb71 | [
"MIT"
] | null | null | null | migrations/versions/a2c88ed3a94a_.py | crossgovernmentservices/csd_notes | 0d69d8cad86446327c6bcadc03f7192e7d7cfb71 | [
"MIT"
] | null | null | null | migrations/versions/a2c88ed3a94a_.py | crossgovernmentservices/csd_notes | 0d69d8cad86446327c6bcadc03f7192e7d7cfb71 | [
"MIT"
] | null | null | null | """empty message
Revision ID: a2c88ed3a94a
Revises: None
Create Date: 2016-04-27 16:54:34.185442
"""
# revision identifiers, used by Alembic.
revision = 'a2c88ed3a94a'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_role')),
sa.UniqueConstraint('name', name=op.f('uq_role_name'))
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(), nullable=True),
sa.Column('full_name', sa.String(), nullable=True),
sa.Column('inbox_email', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_user')),
sa.UniqueConstraint('email', name=op.f('uq_user_email')),
sa.UniqueConstraint('inbox_email', name=op.f('uq_user_inbox_email'))
)
op.create_table('note',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('is_email', sa.Boolean(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_note_user_id_user')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_note'))
)
op.create_table('user_roles',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_user_roles_role_id_role')),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_user_roles_user_id_user'))
)
op.create_table('note_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('note_id', sa.Integer(), nullable=True),
sa.Column('version', sa.Integer(), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['note_id'], ['note.id'], name=op.f('fk_note_history_note_id_note')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_note_history'))
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('note_history')
op.drop_table('user_roles')
op.drop_table('note')
op.drop_table('user')
op.drop_table('role')
### end Alembic commands ###
| 39.135135 | 97 | 0.66989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 932 | 0.321823 |
8c3bdf4d9c2e832816c1db1b0691647abd8a828a | 2,201 | py | Python | opus/opus_server.py | chezecz/research-project | 1a9c236398dc6b68967af0847b0461b390e760bd | [
"MIT"
] | null | null | null | opus/opus_server.py | chezecz/research-project | 1a9c236398dc6b68967af0847b0461b390e760bd | [
"MIT"
] | null | null | null | opus/opus_server.py | chezecz/research-project | 1a9c236398dc6b68967af0847b0461b390e760bd | [
"MIT"
] | null | null | null | import asyncio
import zlib
import queue
import threading
import audioop
from google.cloud import speech
from opuslib import Decoder
from config.config import Config
from config.config import Server
from config.config import Opus
buffer = queue.Queue()
buffer_response = queue.Queue()
dec = Decoder(Opus.rate, Opus.channels)
def chunks():
while True:
try:
yield buffer.get(timeout = 1)
except queue.Empty:
break
def get_transcription():
while True:
generator = chunks()
client = speech.SpeechClient()
config = speech.types.RecognitionConfig(
encoding=Config.encoding,
language_code=Config.language,
sample_rate_hertz=Opus.rate
)
config = speech.types.StreamingRecognitionConfig(config=config, interim_results = True)
requests = (speech.types.StreamingRecognizeRequest(audio_content=chunk) for chunk in generator)
results = client.streaming_recognize(config, requests)
for result in results:
print(result)
for data in result.results:
for parts in data.alternatives:
buffer_response.put(parts.transcript)
def activate_job():
background = threading.Thread(target=get_transcription, args=())
background.daemon = True
background.start()
class EchoServerProtocol(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
message = dec.decode(zlib.decompress(data), Opus.chunk)
buffer.put(message)
if buffer_response.empty():
self.transport.sendto(b'', addr)
else:
self.transport.sendto(buffer_response.get().encode(), addr)
def run_server():
loop = asyncio.get_event_loop()
listen = loop.create_datagram_endpoint(
EchoServerProtocol, local_addr=(Server.host, Server.port))
transport, protocol = loop.run_until_complete(listen)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
transport.close()
loop.close()
if __name__ == '__main__':
activate_job()
run_server() | 28.217949 | 103 | 0.671967 | 433 | 0.196729 | 130 | 0.059064 | 0 | 0 | 0 | 0 | 13 | 0.005906 |
8c3d3f0d60b1efa71f513e96282456978c895ca2 | 9,423 | py | Python | Artesian/_Query/VersionedQuery.py | ARKlab/Artesian.SDK-Python | 79b54ad00526f5a75c400422fd1c0c8532b67436 | [
"MIT"
] | 2 | 2022-02-21T17:03:04.000Z | 2022-02-24T17:14:02.000Z | Artesian/_Query/VersionedQuery.py | ARKlab/Artesian.SDK-Python | 79b54ad00526f5a75c400422fd1c0c8532b67436 | [
"MIT"
] | 2 | 2020-02-06T10:03:35.000Z | 2022-03-01T09:39:54.000Z | Artesian/_Query/VersionedQuery.py | ARKlab/Artesian.SDK-Python | 79b54ad00526f5a75c400422fd1c0c8532b67436 | [
"MIT"
] | 1 | 2019-08-01T06:20:58.000Z | 2019-08-01T06:20:58.000Z | from Artesian._Query.Query import _Query
from Artesian._Query.QueryParameters.VersionedQueryParameters import VersionedQueryParameters
from Artesian._Query.Config.ExtractionRangeConfig import ExtractionRangeConfig
from Artesian._Query.Config.VersionSelectionType import VersionSelectionType
from Artesian._Configuration.DefaultPartitionStrategy import DefaultPartitionStrategy
from Artesian._Query.Config.Granularity import Granularity
import urllib
class _VersionedQuery(_Query):
__routePrefix = "vts"
def __init__(self, client, requestExecutor, partitionStrategy):
queryParameters = VersionedQueryParameters(None,ExtractionRangeConfig(), None, None, None, None, None, None, None)
_Query.__init__(self, client, requestExecutor, queryParameters)
self.__partition= partitionStrategy
def forMarketData(self, ids):
super()._forMarketData(ids)
return self
def forFilterId(self, filterId):
super()._forFilterId(filterId)
return self
def inTimeZone(self, tz):
super()._inTimezone(tz)
return self
def inAbsoluteDateRange(self, start, end):
super()._inAbsoluteDateRange(start, end)
return self
def inRelativePeriodRange(self, pStart, pEnd=None):
super()._inRelativePeriodRange(pStart, pEnd)
return self
def inRelativePeriod(self, extractionPeriod):
super()._inRelativePeriod(extractionPeriod)
return self
def inRelativeInterval(self, relativeInterval):
super()._inRelativeInterval(relativeInterval)
return self
def withTimeTransform(self, tr):
self._queryParameters.transformId = tr
return self
def inGranularity(self, granularity):
self._queryParameters.granularity = granularity
return self
def forMUV(self):
self._queryParameters.versionSelectionType = VersionSelectionType.MUV
return self
def forLastOfDays(self, start, end=None):
self._queryParameters.versionSelectionType = VersionSelectionType.LAST_OF_DAYS
if(start.startswith("P")):
if(end is None):
self._queryParameters.versionSelectionConfig.versionsRange.period = start
else:
self._queryParameters.versionSelectionConfig.versionsRange.periodFrom = start
self._queryParameters.versionSelectionConfig.versionsRange.periodTo = end
else:
self._queryParameters.versionSelectionConfig.versionsRange.dateStart = start
self._queryParameters.versionSelectionConfig.versionsRange.dateEnd = end
return self
def forLastOfMonths(self, start, end=None):
self._queryParameters.versionSelectionType = VersionSelectionType.LAST_OF_MONTHS
if(start.startswith("P")):
if(end is None):
self._queryParameters.versionSelectionConfig.versionsRange.period = start
else:
self._queryParameters.versionSelectionConfig.versionsRange.periodFrom = start
self._queryParameters.versionSelectionConfig.versionsRange.periodTo = end
else:
self._queryParameters.versionSelectionConfig.versionsRange.dateStart = start
self._queryParameters.versionSelectionConfig.versionsRange.dateEnd = end
return self
def forLastNVersions(self, lastN):
self._queryParameters.versionSelectionType = VersionSelectionType.LASTN
self._queryParameters.versionSelectionConfig.lastN = lastN
return self
def forVersion(self, version):
self._queryParameters.versionSelectionType = VersionSelectionType.VERSION
self._queryParameters.versionSelectionConfig.version = version
return self
def forMostRecent(self, start, end=None):
self._queryParameters.versionSelectionType = VersionSelectionType.MOST_RECENT
if(start.startswith("P")):
if(end is None):
self._queryParameters.versionSelectionConfig.versionsRange.period = start
else:
self._queryParameters.versionSelectionConfig.versionsRange.periodFrom = start
self._queryParameters.versionSelectionConfig.versionsRange.periodTo = end
else:
self._queryParameters.versionSelectionConfig.versionsRange.dateStart = start
self._queryParameters.versionSelectionConfig.versionsRange.dateEnd = end
return self
def withFillNull(self):
self._queryParameters.fill = NullFillStategy()
return self
def withFillNone(self):
self._queryParameters.fill = NoFillStategy()
return self
def withFillLatestValue(self, period):
self._queryParameters.fill = FillLatestStategy(period)
return self
def withFillCustomValue(self, val):
self._queryParameters.fill = FillCustomStategy(val)
return self
def execute(self):
urls = self.__buildRequest()
return super()._exec(urls)
def executeAsync(self):
urls = self.__buildRequest()
return super()._execAsync(urls)
def __buildRequest(self):
self.__validateQuery()
qps = self.__partition.Partitionversioned([self._queryParameters])
urls = []
for qp in qps:
url = f"/{self.__routePrefix}/{self.__buildVersionRoute()}/{self.__getGranularityPath(qp.granularity)}/{super()._buildExtractionRangeRoute(qp)}?_=1"
if not (qp.ids is None):
sep = ","
ids= sep.join(map(str,qp.ids))
enc = urllib.parse.quote_plus(ids)
url = url + "&id=" + enc
if not (qp.filterId is None):
url = url + "&filterId=" + qp.filterId
if not (qp.timezone is None):
url = url + "&tz=" + qp.timezone
if not (qp.transformId is None):
url = url + "&tr=" + qp.transformId
if not (qp.fill is None):
url = url + "&" + qp.fill.getUrlParams()
urls.append(url)
return urls
def __validateQuery(self):
super()._validateQuery()
if (self._queryParameters.granularity is None):
raise Exception("Extraction granularity must be provided. Use .InGranularity() argument takes a granularity type")
if (self._queryParameters.versionSelectionType is None):
raise Exception("Version selection must be provided. Provide a version to query. eg .ForLastOfDays() arguments take a date range , period or period range")
def __buildVersionRoute(self):
switcher = {
VersionSelectionType.LASTN: f"Last{self._queryParameters.versionSelectionConfig.lastN}",
VersionSelectionType.MUV: f"MUV",
VersionSelectionType.LAST_OF_DAYS: f"LastOfDays/" + self.__buildVersionRange(),
VersionSelectionType.LAST_OF_MONTHS: f"LastOfMonths/" + self.__buildVersionRange(),
VersionSelectionType.MOST_RECENT: f"MostRecent/" + self.__buildVersionRange(),
VersionSelectionType.VERSION: f"Version/{self._queryParameters.versionSelectionConfig.version}"
}
vr = switcher.get(self._queryParameters.versionSelectionType, "VType")
if vr == "VType" :
raise Exception("Not supported VersionType")
return vr
def __buildVersionRange(self):
vr=""
if (self._queryParameters.versionSelectionConfig.versionsRange.dateStart is not None) and (self._queryParameters.versionSelectionConfig.versionsRange.dateEnd is not None):
vr = f"{self._queryParameters.versionSelectionConfig.versionsRange.dateStart}/{self._queryParameters.versionSelectionConfig.versionsRange.dateEnd}"
elif (self._queryParameters.versionSelectionConfig.versionsRange.period is not None):
vr = f"{self._queryParameters.versionSelectionConfig.versionsRange.period}"
elif (self._queryParameters.versionSelectionConfig.versionsRange.periodFrom is not None) and (self._queryParameters.versionSelectionConfig.versionsRange.periodTo is not None):
vr = f"{self._queryParameters.versionSelectionConfig.versionsRange.dateStart}/{self._queryParameters.versionSelectionConfig.versionsRange.dateEnd}"
return vr
def __getGranularityPath(self,granularity):
switcher = {
Granularity.DAY: "Day",
Granularity.FIFTEEN_MINUTE: "FifteenMinute",
Granularity.HOUR: "Hour" ,
Granularity.MINUTE: "Minute",
Granularity.MONTH: "Month",
Granularity.QUARTER: "Quarter",
Granularity.TEN_MINUTE: "TenMinute",
Granularity.THIRTY_MINUTE: "ThirtyMinute",
Granularity.WEEK: "Week",
Granularity.YEAR: "Year",
}
vr = switcher.get(granularity, "VGran")
if vr == "VGran" :
raise Exception("Not supported Granularity")
return vr
class NullFillStategy:
def getUrlParams(self):
return "fillerK=Null"
class NoFillStategy:
def getUrlParams(self):
return "fillerK=NoFill"
class FillLatestStategy:
def __init__(self, period):
self.period = period
def getUrlParams(self):
return f"fillerK=LatestValidValue&fillerP={self.period}"
class FillCustomStategy:
def __init__(self, val):
self.val = val
def getUrlParams(self):
return f"fillerK=CustomValue&fillerDV={self.val}" | 48.823834 | 184 | 0.685875 | 8,964 | 0.951289 | 0 | 0 | 0 | 0 | 0 | 0 | 1,247 | 0.132336 |
8c3d87d2655207c96ef42a62d50d37072057f3ba | 2,061 | py | Python | start.py | JoshuaMcroberts/DeliveryDilemmaLite | daac96edc140c82695b7d4103b1ecddf2742d1e6 | [
"MIT"
] | null | null | null | start.py | JoshuaMcroberts/DeliveryDilemmaLite | daac96edc140c82695b7d4103b1ecddf2742d1e6 | [
"MIT"
] | null | null | null | start.py | JoshuaMcroberts/DeliveryDilemmaLite | daac96edc140c82695b7d4103b1ecddf2742d1e6 | [
"MIT"
] | null | null | null | from libraries import *
from text import *
from game import *
from reception import recep
# DISPLAY HELP TEXT
def help_text():
clear_screen()
print_tab("Help text will go here!")
# DISPLAY ABOUT TEXT
def cred_text():
clear_screen()
print_tab(pr_colour("l_green","-- CREDITS --"))
print_tab("Intro Story Reviewers - C. Cadden, J. Harrower, S. Kavuri ")
print_tab("Receptionsist Name - S. Kavuri")
print_tab("Alpha Testers - R. McRoberts, D. McRoberts, A. McRoberts")
print_tab("Beta Testers - J. Smyth, N. Smyth")
print_tab("User Testers - P. Shields, N. Scott-Murphy")
# DISPLAY ASCII ART
def game_intro():
clear_screen()
# ascii_del_dil()
print(pr_colour("l_blue","\n\tWelcome to Delviery Dilemma"))
s_pause()
# DISPLAYS AME OVER ASCII ART
def game_over():
ascii_game_over()
# GAME FUNCTION
def new_game():
clear_screen()
game = N_game()
game.enter_name()
game.set_courier()
game.create_char()
pc = game.get_character()
cour = game.get_courier()
pause()
act_1_intro(cour, pc)
recep(game)
game_over()
def menu():
ext = False
while not ext:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- MAIN MENU --") + "\n")
print_tab("[1] Start\n")
print_tab("[2] Help\n")
print_tab("[3] Credits\n")
print_tab("[4] Exit\n")
try:
main_op = int(input("\tEnter Option: "))
except:
main_op = 10
if main_op == 1:
new_game()
elif main_op == 2:
help_text()
pause()
elif main_op == 3:
cred_text()
pause()
elif main_op == 4:
print("")
print_tab(pr_colour("l_orange","Bye Bye\n"))
ext = True
else:
print_tab("Select a Number from 1-4")
pause()
# MAIN FUNCTION
def main():
game_intro()
menu()
if __name__ == "__main__":
main()
| 22.16129 | 81 | 0.554585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 642 | 0.311499 |
8c3e02647638f0368a8afcd89e3da556e43acf60 | 449 | py | Python | apps/store/urls.py | Quanfita/QTechCode | 78a3cac617a63bd46272461e6b1e89411e2fb130 | [
"MIT"
] | null | null | null | apps/store/urls.py | Quanfita/QTechCode | 78a3cac617a63bd46272461e6b1e89411e2fb130 | [
"MIT"
] | 9 | 2022-01-16T04:23:33.000Z | 2022-03-31T20:39:58.000Z | apps/store/urls.py | Quanfita/QTechCode | 78a3cac617a63bd46272461e6b1e89411e2fb130 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.urls import path
# from .views import goview
from .views import IndexView, DetailView, PayView, CallbackView, DeliverView
urlpatterns = [
path('', IndexView.as_view(), name='index'), # 主页,自然排序
path('goods/<slug:slug>/', DetailView.as_view(), name='detail'),
path('pay/', PayView, name='pay'),
path('callback/', CallbackView, name='callback'),
path('deliver/', DeliverView, name='deliver'),
] | 37.416667 | 76 | 0.665924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.347732 |
8c3e725f70992e52faf86f3d51a34179d2a39e52 | 853 | py | Python | day3/test_crossed_wires_part2.py | capsulecorplab/adventofcode2019 | a1a27f37dde23662bdca6950680d159a42035c36 | [
"MIT"
] | null | null | null | day3/test_crossed_wires_part2.py | capsulecorplab/adventofcode2019 | a1a27f37dde23662bdca6950680d159a42035c36 | [
"MIT"
] | null | null | null | day3/test_crossed_wires_part2.py | capsulecorplab/adventofcode2019 | a1a27f37dde23662bdca6950680d159a42035c36 | [
"MIT"
] | null | null | null | from crossed_wires import FuelManagementSystem
import pytest
class Test1:
@pytest.fixture
def fms(self):
return FuelManagementSystem("R8,U5,L5,D3", "U7,R6,D4,L4")
def test_steps_combined_min(self, fms):
assert fms.steps_combined_min() == 30
class Test2:
@pytest.fixture
def fms(self):
return FuelManagementSystem(
"R75,D30,R83,U83,L12,D49,R71,U7,L72", "U62,R66,U55,R34,D71,R55,D58,R83"
)
def test_steps_combined_min(self, fms):
assert fms.steps_combined_min() == 610
class Test3:
@pytest.fixture
def fms(self):
return FuelManagementSystem(
"R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51",
"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7",
)
def test_steps_combined_min(self, fms):
assert fms.steps_combined_min() == 410
| 24.371429 | 83 | 0.640094 | 783 | 0.917937 | 0 | 0 | 457 | 0.535756 | 0 | 0 | 178 | 0.208675 |
8c3ed5966f5972419e367c22f46baad392ad9753 | 8,958 | py | Python | pomdp.py | gongjue/pocm | 1f8ae819aaa7fa5f25878a0662a23cb457c1180b | [
"MIT"
] | null | null | null | pomdp.py | gongjue/pocm | 1f8ae819aaa7fa5f25878a0662a23cb457c1180b | [
"MIT"
] | null | null | null | pomdp.py | gongjue/pocm | 1f8ae819aaa7fa5f25878a0662a23cb457c1180b | [
"MIT"
] | null | null | null | import numpy as np
import cvxpy as cvx
import util
def set_contains_array(S, a):
"""
:param S: list of np.ndarray
:param a: np.ndarray
:return: contains, 0 or 1
"""
contains = 0
for b in S:
if not (a - b).any(): # if a contained in S
contains = 1
return contains
def set_sum_two(A, B):
"""
:param A: list of np.ndarray
:param B: list of np.ndarray
:return: list of np.ndarray
"""
C = []
for a in A:
for b in B:
if not set_contains_array(C, a + b):
C.append(a + b)
return C
def set_sum_list(Omega):
"""
Set sum of multiple set of np.ndarray
:param Omega: list of list of np.ndarray
:return: list of np.ndarray
"""
S = Omega[0]
# print 'len(Omega) =', len(Omega)
# print 0, 'S =', S
for i in range(1, len(Omega)):
# print i, 'Omega[i] =',Omega[i]
S = set_sum_two(S, Omega[i])
# print i, 'S =', S
return S
def pointwise_dominate(w, U):
"""
Test if w is point-wise dominated by all u in U
:param w: np.ndarray
:param U: list of np.ndarray
:return:
"""
for u in U:
if np.all(w < u):
return True
return False
def lp_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With LP in White & Clark
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
# print("LP dominate")
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [b.T*(w-u) >= d for u in U] + [np.sum(b) == 1]
prob = cvx.Problem(objective, constraints)
result = prob.solve()
# print("d =", d.value)
if d.value >= 0:
return np.ravel(b.value)
else:
return None
def dec_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With Bender's decomposition (Walraven & Spaan, 2017)
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [np.sum(b) == 1]
b_ = np.random.random(S)
b_ = b_ / np.sum(b_)
U_ = []
while 1:
_b = b_
u_ = U[np.argmin([np.dot((w - U[i]), _b) for i in range(len(U))])]
constraints += [d <= b.T*(w-u_)]
U_.append(u_)
prob = cvx.Problem(objective, constraints)
_ = prob.solve()
b_ = np.ravel(b.value)
if not (b_ - _b).any():
break
if d.value >= 0:
return _b
else:
return None
def lex_less(u, w):
if w is None:
return False
for i in range(len(u)):
if u[i] > w[i]:
return False
return True
def best_point(b, U):
# print("Find best")
_max = -np.inf
w = None
for i in range(len(U)):
u = U[i]
# print("b", b)
# print("u", u)
x = np.dot(b, u)
# print("x", x)
if x > _max or (x == _max and lex_less(u, U[w])):
w = i
_max = x
# print("max", _max)
return w
def prune(W, A=None):
# print("prune", W)
D, E = [], []
while len(W) > 0:
w = W[-1]
if pointwise_dominate(w, D):
W.pop()
else:
# b = lp_dominate(w, D)
b = dec_dominate(w, D)
if b is None:
W.pop()
else:
i = best_point(b, W)
D.append(W[i])
if A is not None:
E.append(A[i])
W.pop(i)
if A is not None:
return D, E
else:
return D
def set_union(V):
V_ = []
for v in V:
V_ += v
return V_
class POMDP:
def __init__(self, P=None, Z=None, R=None, g=None, alpha=1.0):
self.P = P # m x n x n: a(t)->s(t)->s(t+1)
self.Z = Z # m x n x k: a(t)->s(t+1)->o(t+1)
self.R = R # m x n x n: a(t)->s(t+1)->s(t+1)
self.g = g # n x 1: s(T)
self.alpha = alpha # discount factor
self.nActions = self.Z.shape[0] # m
self.nStates = self.Z.shape[1] # n
self.nLevels = self.Z.shape[2] # k
if g is None:
self.g = np.zeros(self.nStates)
# print self.nActions, self.nStates, self.nLevels
def update_belief(self, b, a, o):
p = self.Z[a, :, o] * self.P[a].T.dot(b)
return p / p.sum()
def monahan_enumeration(self, V):
"""construct the set of Omega
:param V: input list of alpha vectors
"""
V_, A_ = [], []
for a in range(self.nActions):
# print("Action", a)
Va = []
_r = np.sum(self.P[a] * self.R[a], axis=1) / self.nLevels
# print("_r:", _r)
for z in range(self.nLevels):
# print("Obs", z)
Vaz = [_r + self.alpha * (self.Z[a,:,z] * v).dot(self.P[a]) for v in V]
# print("Vaz", Vaz)
if len(Va) > 0:
Va = prune(set_sum_two(Va, Vaz)) # incremental pruning
else:
Va = Vaz
A_ += [a for _ in Va]
V_ += Va
V_, A_ = prune(V_, A_)
return V_, A_
def transition(self, a, s):
return np.random.choice(self.nStates, p=self.P[a, s])
def emmission(self, a, s):
return np.random.choice(self.nStates, p=self.Z[a, s])
@staticmethod
def optimal_action(b, V, A):
assert len(V) == len(A)
values = [np.dot(b, v) for v in V]
opt_idx = np.argmax(values)
return A[opt_idx], V[opt_idx]
def solve(self, T):
V = self.g
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T-1-t] = V
Actions[T-1-t] = A
return Values, Actions
def plan(self, T, initial_belief=None, perform=False):
V = self.g
if initial_belief is None:
initial_belief = np.ones(self.nStates) / self.nStates
b = initial_belief
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T - 1 - t] = V
Actions[T - 1 - t] = A
a0, v0 = self.optimal_action(b, Values[0], Actions[0])
if not perform:
return a0, v0
s = np.random.choice(self.nStates, p=b)
actions, states, observations, reward = [], [], [], 0.0
for t in range(T):
a, v = self.optimal_action(b, Values[t], Actions[t])
# print('a', a)
# print('v', v)
_s = s
s = self.transition(a, s)
o = self.transition(a, s)
b = self.update_belief(b, a, o)
states.append(_s)
actions.append(s)
observations.append(o)
reward += self.R[a, _s, s] * self.alpha ** t
return a0, v0, actions, states, observations, reward
def test_pomdp(nActions, nStates, nLevels, alpha):
# P = np.array([
# [[0.25, 0.75], [0.6 , 0.4 ]],
# [[0.5 , 0.5 ], [0.7 , 0.3 ]]])
# Z = np.array([
# [[0.55, 0.45], [0.3 , 0.7 ]],
# [[0.65, 0.35], [0.25, 0.75]]])
# R = np.array([
# [[2., 2. ], [ 0., 0.]],
# [[3., 3. ], [-1., -1.]]])
# g = np.array([2., -1.])
P = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
Z = util.normalize(np.random.random(size=(nActions, nStates, nLevels)), axis=2)
R = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
g = util.normalize(np.random.random(size=(nStates)), axis=0)
pomdp = POMDP(P, Z, R, g, alpha)
T = 10
V = pomdp.g
a0, v0 = pomdp.plan(T, initial_belief=None, perform=False)
# a0, v0, actions, states, observations, reward = pomdp.plan(T, initial_belief=None, perform=True)
# print('a0 =', a0, 'v0 =', v0)
# print('actions:', actions)
# print('states:', states)
# print('observations:', observations)
# print('reward:', reward)
# for t in range(T):
# print("Iteration", t+1)
# V, A = pomdp.monahan_enumeration(V)
# for v, a in zip(V, A):
# print(v, a)
if __name__ == "__main__":
# import timeit
# print(timeit.timeit("main()"))
import time
for s in range(123, 133):
start_time = time.time()
np.random.seed(s)
print("===== SEED %d =====" %(s))
test_pomdp(nActions=2, nStates=3, nLevels=3, alpha=0.9975)
end_time = time.time()
print(end_time - start_time)
| 28.169811 | 102 | 0.493414 | 3,392 | 0.378656 | 0 | 0 | 195 | 0.021768 | 0 | 0 | 2,346 | 0.261889 |
8c3ff9e8f67f58e9956e1fd3e9e7faf6decba1b4 | 1,105 | py | Python | main.py | bernatfogarasi/lempel-ziv-compression | 5c739c1bcbc405e6977e991c51fb48d687780d54 | [
"MIT"
] | null | null | null | main.py | bernatfogarasi/lempel-ziv-compression | 5c739c1bcbc405e6977e991c51fb48d687780d54 | [
"MIT"
] | null | null | null | main.py | bernatfogarasi/lempel-ziv-compression | 5c739c1bcbc405e6977e991c51fb48d687780d54 | [
"MIT"
] | null | null | null | def main():
STRING = "aababbabbaaba"
compressed = compress(STRING)
print(compressed)
decompressed = decompress(compressed)
print(decompressed)
def compress(string):
encode = {} # string -> code
known = ""
count = 0
result = []
for letter in string:
if known + letter in encode:
known += letter
else:
count += 1
encode[known + letter] = count
result.append([encode[known] if known else 0, letter])
known = ""
if known:
result.append([encode[known], ""])
return result
def decompress(compressed):
string = ""
decode = {} # code -> string
known = ""
count = 0
for code, new in compressed:
if not code:
count += 1
decode[count] = new
string += new
elif not new:
string += decode[code]
else:
count += 1
known = decode[code]
decode[count] = known + new
string += known + new
return string
if __name__ == "__main__":
main()
| 22.55102 | 66 | 0.513122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.060633 |
8c40196b9c28971e7054c903a03e0bb918a945dd | 9,448 | py | Python | scripts/process_gh_mapping.py | ptrebert/reference-data | 7bca069b8995660252d4f601976f9f7abaaf063b | [
"MIT"
] | null | null | null | scripts/process_gh_mapping.py | ptrebert/reference-data | 7bca069b8995660252d4f601976f9f7abaaf063b | [
"MIT"
] | null | null | null | scripts/process_gh_mapping.py | ptrebert/reference-data | 7bca069b8995660252d4f601976f9f7abaaf063b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
import os as os
import sys as sys
import traceback as trb
import argparse as argp
import csv as csv
import functools as fnt
import collections as col
import multiprocessing as mp
import numpy as np
import pandas as pd
import intervaltree as ivt
def parse_command_line():
"""
:return:
"""
parser = argp.ArgumentParser()
parser.add_argument('--input', '-i', type=str, dest='inputfile')
parser.add_argument('--cons', '-c', type=str, dest='conservation')
parser.add_argument('--direction', '-d', type=str, choices=['target', 'query'], dest='direction')
parser.add_argument('--chromosomes', '-chr', type=str, dest='chromosomes')
parser.add_argument('--workers', '-w', type=int, default=4, dest='workers')
parser.add_argument('--output', '-o', type=str, dest='outputfile')
args = parser.parse_args()
return args
def compute_weights(cons, enh):
"""
:param enh:
:param cons:
:return:
"""
s, e = enh['start'], enh['end']
enh['ftcons_enh_abs_min'] = np.round(cons[s:e].min(), 2)
enh['ftcons_enh_abs_max'] = np.round(cons[s:e].max(), 2)
enh['ftcons_enh_abs_mean'] = np.round(cons[s:e].mean(), 2)
enh['ftcons_enh_abs_median'] = np.round(cons[s:e].median(), 2)
total_score = sum([float(s) for s in enh['assoc_score'].split(',')])
enh['weight'] = 1000 - np.round(float(enh['enhancer_score']) * total_score, 2)
return enh
def process_mapped_enhancer(params):
"""
:param params:
:return:
"""
inputfile, consfile, chrom = params
with pd.HDFStore(consfile, 'r') as hdf:
cons_scores = hdf[chrom]
comp_wt = fnt.partial(compute_weights, cons_scores)
header = ['chrom', 'start', 'end', 'GHid', 'enhancer_score', 'is_elite', 'cluster_id',
'name', 'symbol', 'assoc_score', 'enh_gene_dist']
regions = []
with open(inputfile, 'r', newline='') as infile:
rows = csv.DictReader(infile, delimiter='\t', fieldnames=header)
for r in rows:
if r['chrom'] == chrom:
r['start'] = int(r['start'])
r['end'] = int(r['end'])
l = r['end'] - r['start']
if l < 2:
continue
r = comp_wt(r)
regions.append(comp_wt(r))
ivtree = ivt.IntervalTree()
for r in regions:
ivtree[r['start']:r['end']] = r['GHid'], r['ftcons_enh_abs_mean'], r['weight']
regions = sorted(regions, key=lambda d: d['ftcons_enh_abs_mean'])
blacklist = set()
whitelist = set()
for item in regions:
if item['GHid'] in blacklist:
continue
overlaps = ivtree[item['start']:item['end']]
if len(overlaps) == 1:
whitelist.add(overlaps.pop().data[0])
elif len(overlaps) > 1:
overlaps = [o for o in sorted(overlaps, key=lambda i: (i.data[1], i.data[2])) if o.data[0] not in blacklist]
whitelist.add(overlaps[0].data[0])
[blacklist.add(o.data[0]) for o in overlaps[1:]]
else:
raise AssertionError('No self-overlap in tree: {}'.format(item))
regions = sorted([r for r in regions if r['GHid'] in whitelist], key=lambda x: (x['start'], x['end']))
return regions
def process_target_enhancer(args):
"""
:param args:
:return:
"""
with pd.HDFStore(args.conservation, 'r') as hdf:
chroms = [k.strip('/') for k in hdf.keys()]
params = [(args.inputfile, args.conservation, c) for c in chroms]
header = ['chrom', 'start', 'end', 'GHid', 'enhancer_score', 'is_elite',
'name', 'symbol', 'assoc_score', 'ftcons_enh_abs_mean',
'ftcons_enh_abs_median', 'ftcons_enh_abs_min', 'ftcons_enh_abs_max']
with mp.Pool(args.workers) as pool:
res = pool.imap_unordered(process_mapped_enhancer, params)
outbuffer = []
for regions in res:
outbuffer.extend(regions)
outbuffer = sorted(outbuffer, key=lambda d: (d['chrom'], d['start'], d['end']))
with open(args.outputfile, 'w') as out:
_ = out.write('#')
writer = csv.DictWriter(out, fieldnames=header, delimiter='\t', extrasaction='ignore')
writer.writeheader()
writer.writerows(outbuffer)
return
def process_annotated_enhancer(params):
"""
:param params:
:return:
"""
enh_file, chrom = params
header = ['chrom', 'start', 'end', 'GHid', 'enhancer_score', 'is_elite',
'ftcons_enh_abs_mean', 'ftcons_enh_abs_median',
'ftcons_enh_abs_min', 'ftcons_enh_abs_max']
enh_collect = col.defaultdict(list)
with open(enh_file, 'r') as infile:
rows = csv.DictReader(infile, delimiter='\t', fieldnames=header)
for row in rows:
if row['chrom'] == chrom:
row['start'] = int(row['start'])
row['end'] = int(row['end'])
row['enhancer_score'] = float(row['enhancer_score'])
row['ftcons_enh_abs_mean'] = float(row['ftcons_enh_abs_mean'])
enh_collect[row['GHid']].append(row)
enh_collect = merge_split_enhancers(enh_collect)
ivtree = ivt.IntervalTree()
for r in enh_collect:
ivtree[r['start']:r['end']] = r['GHid'], r['ftcons_enh_abs_mean'], r['ftcons_enh_abs_min']
enh_collect = sorted(enh_collect, key=lambda d: d['ftcons_enh_abs_mean'])
blacklist = set()
whitelist = set()
for item in enh_collect:
ghid = item['GHid']
if ghid in blacklist or ghid in whitelist:
continue
overlaps = ivtree[item['start']:item['end']]
if len(overlaps) == 1:
# that is: only self overlap
whitelist.add(ghid)
continue
elif len(overlaps) > 1:
if any([o.data[0] in whitelist for o in overlaps if o.data[0] != ghid]):
# region overlaps with a whitelist region -> blacklist
blacklist.add(ghid)
continue
overlaps = [o for o in sorted(overlaps, key=lambda i: (i.data[1], i.data[2])) if o.data[0] not in blacklist]
if overlaps[0].data[0] == ghid:
# the query region has highest conservation
# others can safely be blacklisted
whitelist.add(ghid)
[blacklist.add(o.data[0]) for o in overlaps[1:]]
else:
# another region is selected; could be that among
# the remaining regions, others might also be feasible
blacklist.add(ghid)
whitelist.add(overlaps[0].data[0])
else:
raise AssertionError('No self-overlap in tree: {}'.format(item))
enh_collect = sorted([r for r in enh_collect if r['GHid'] in whitelist], key=lambda x: (x['start'], x['end']))
return enh_collect
def merge_split_enhancers(collector):
"""
:param collector:
:return:
"""
mrg_collect = []
for ghid, splits in collector.items():
if len(splits) == 1:
mrg_collect.append(splits[0])
continue
c = 1
splits = sorted(splits, key=lambda d: (d['start'], d['end']))
s, e = splits[0]['start'], splits[1]['end']
for idx, entry in enumerate(splits[:-1]):
if splits[idx+1]['start'] <= e + 100:
s = min(s, splits[idx+1]['start'])
e = max(e, splits[idx+1]['end'])
else:
new_enh = dict(splits[0])
new_enh['GHid'] = new_enh['GHid'] + '-{}-{}'.format(new_enh['chrom'].strip('chr'), c)
new_enh['start'] = s
new_enh['end'] = e
mrg_collect.append(new_enh)
c += 1
s, e = splits[idx+1]['start'], splits[idx+1]['end']
new_enh = dict(splits[0])
new_enh['GHid'] = new_enh['GHid'] + '-{}-{}'.format(new_enh['chrom'].strip('chr'), c)
new_enh['start'] = s
new_enh['end'] = e
mrg_collect.append(new_enh)
mrg_collect = [m for m in mrg_collect if m['end'] - m['start'] > 49]
return mrg_collect
def process_query_enhancer(args):
"""
:param args:
:return:
"""
with open(args.chromosomes, 'r') as infile:
chroms = [l.split()[0].strip() for l in infile.readlines()]
header = ['chrom', 'start', 'end', 'GHid', 'enhancer_score', 'is_elite',
'ftcons_enh_abs_mean', 'ftcons_enh_abs_median',
'ftcons_enh_abs_min', 'ftcons_enh_abs_max']
params = [(args.inputfile, c) for c in chroms]
with mp.Pool(args.workers) as pool:
res = pool.imap_unordered(process_annotated_enhancer, params)
outbuffer = []
for regions in res:
outbuffer.extend(regions)
outbuffer = sorted(outbuffer, key=lambda d: (d['chrom'], d['start'], d['end']))
with open(args.outputfile, 'w') as out:
_ = out.write('#')
writer = csv.DictWriter(out, fieldnames=header, delimiter='\t', extrasaction='ignore')
writer.writeheader()
writer.writerows(outbuffer)
return
if __name__ == '__main__':
try:
args = parse_command_line()
if args.direction == 'target':
process_target_enhancer(args)
else:
process_query_enhancer(args)
except Exception as err:
trb.print_exc()
raise err
else:
sys.exit(0)
| 37.943775 | 120 | 0.572714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,149 | 0.227456 |
8c413a0d2f5f6ff5e14c6c6a30c555a0cca041dd | 3,720 | py | Python | tests/test_save_load.py | dev-rinchin/RePlay | 85f0a17af73868a8284e06a7688845f072edee15 | [
"Apache-2.0"
] | 63 | 2021-09-03T19:09:09.000Z | 2022-03-31T12:35:35.000Z | tests/test_save_load.py | dev-rinchin/RePlay | 85f0a17af73868a8284e06a7688845f072edee15 | [
"Apache-2.0"
] | 63 | 2021-09-03T19:06:31.000Z | 2022-03-30T10:06:03.000Z | tests/test_save_load.py | dev-rinchin/RePlay | 85f0a17af73868a8284e06a7688845f072edee15 | [
"Apache-2.0"
] | 2 | 2021-12-23T16:57:33.000Z | 2022-02-22T07:54:03.000Z | # pylint: disable-all
from os.path import dirname, join
import pytest
import pandas as pd
from implicit.als import AlternatingLeastSquares
from pyspark.sql import functions as sf
import replay
from replay.model_handler import save, load
from replay.models import *
from tests.utils import sparkDataFrameEqual, long_log_with_features, spark
@pytest.fixture
def user_features(spark):
return spark.createDataFrame(
[("u1", 20.0, -3.0, 1), ("u2", 30.0, 4.0, 0), ("u3", 40.0, 0.0, 1)]
).toDF("user_id", "age", "mood", "gender")
@pytest.fixture
def df():
folder = dirname(replay.__file__)
return pd.read_csv(
join(folder, "../experiments/data/ml1m_ratings.dat"),
sep="\t",
names=["user_id", "item_id", "relevance", "timestamp"],
).head(1000)
@pytest.mark.parametrize(
"recommender",
[
ALSWrap,
ADMMSLIM,
KNN,
MultVAE,
NeuroMF,
PopRec,
SLIM,
UserPopRec,
LightFMWrap,
],
)
def test_equal_preds(long_log_with_features, recommender, tmp_path):
path = (tmp_path / "test").resolve()
model = recommender()
model.fit(long_log_with_features)
base_pred = model.predict(long_log_with_features, 5)
save(model, path)
m = load(path)
new_pred = m.predict(long_log_with_features, 5)
sparkDataFrameEqual(base_pred, new_pred)
def test_random(long_log_with_features, tmp_path):
path = (tmp_path / "random").resolve()
model = RandomRec(seed=1)
model.fit(long_log_with_features)
base_pred = model.predict(long_log_with_features, 5)
save(model, path)
m = load(path)
new_pred = m.predict(long_log_with_features, 5)
sparkDataFrameEqual(base_pred, new_pred)
def test_rules(df, tmp_path):
path = (tmp_path / "rules").resolve()
model = AssociationRulesItemRec()
model.fit(df)
base_pred = model.get_nearest_items(["i1"], 5, metric="lift")
save(model, path)
m = load(path)
new_pred = m.get_nearest_items(["i1"], 5, metric="lift")
sparkDataFrameEqual(base_pred, new_pred)
def test_word(df, tmp_path):
path = (tmp_path / "word").resolve()
model = Word2VecRec()
model.fit(df)
base_pred = model.predict(df, 5)
save(model, path)
m = load(path)
new_pred = m.predict(df, 5)
sparkDataFrameEqual(base_pred, new_pred)
def test_implicit(long_log_with_features, tmp_path):
path = (tmp_path / "implicit").resolve()
model = ImplicitWrap(AlternatingLeastSquares())
model.fit(long_log_with_features)
base_pred = model.predict(long_log_with_features, 5)
save(model, path)
m = load(path)
new_pred = m.predict(long_log_with_features, 5)
sparkDataFrameEqual(base_pred, new_pred)
def test_cluster(long_log_with_features, user_features, tmp_path):
path = (tmp_path / "cluster").resolve()
model = ClusterRec()
model.fit(long_log_with_features, user_features)
base_pred = model.predict(user_features, 5)
save(model, path)
m = load(path)
new_pred = m.predict(user_features, 5)
sparkDataFrameEqual(base_pred, new_pred)
def test_wilson(long_log_with_features, tmp_path):
path = (tmp_path / "wilson").resolve()
model = Wilson()
df = long_log_with_features.withColumn(
"relevance", (sf.col("relevance") > 3).cast("integer")
)
model.fit(df)
base_pred = model.predict(df, 5)
save(model, path)
m = load(path)
new_pred = m.predict(df, 5)
sparkDataFrameEqual(base_pred, new_pred)
def test_study(df, tmp_path):
path = (tmp_path / "study").resolve()
model = PopRec()
model.study = 80083
model.fit(df)
save(model, path)
m = load(path)
assert m.study == model.study
| 27.555556 | 75 | 0.672312 | 0 | 0 | 0 | 0 | 1,027 | 0.276075 | 0 | 0 | 268 | 0.072043 |
8c41961e1c04c3e4784f76d511a5d3c4a5d1d393 | 599 | py | Python | scripts/total_damage.py | Masanori-Suzu1024/mypkg | 1117d744fefd27caab9ff78d589335525cd0611e | [
"BSD-3-Clause"
] | null | null | null | scripts/total_damage.py | Masanori-Suzu1024/mypkg | 1117d744fefd27caab9ff78d589335525cd0611e | [
"BSD-3-Clause"
] | null | null | null | scripts/total_damage.py | Masanori-Suzu1024/mypkg | 1117d744fefd27caab9ff78d589335525cd0611e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# BSD 3-Clause "New" or "Revised" License
# Copyright (c) 2021, Masanori-Suzu1024 RyuichiUeda
# All rights reserved.
# Genshin is a copyrighted work of miHoYo co., Ltd
import rospy
from std_msgs.msg import Int32
n = 0
def cb(message):
global n
n = message.data
if __name__== '__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
pub = rospy.Publisher('twice', Int32, queue_size=10)
rate = rospy.Rate(1)
a = 0
while not rospy.is_shutdown():
a = a + n
pub.publish(a)
rate.sleep()
| 21.392857 | 56 | 0.639399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.370618 |
8c41abbedc0d7fa2c291694a56f40b82034bd4ff | 113 | py | Python | download-deveres/para-execicios-curso-em-video/exe046.py | Hugo-Oliveira-RDO11/meus-deveres | b5e41015e2cb95946262678e82197e5f47d56271 | [
"MIT"
] | null | null | null | download-deveres/para-execicios-curso-em-video/exe046.py | Hugo-Oliveira-RDO11/meus-deveres | b5e41015e2cb95946262678e82197e5f47d56271 | [
"MIT"
] | null | null | null | download-deveres/para-execicios-curso-em-video/exe046.py | Hugo-Oliveira-RDO11/meus-deveres | b5e41015e2cb95946262678e82197e5f47d56271 | [
"MIT"
] | null | null | null | from time import sleep
for c in range(10, -1, -1):
print(c)
sleep(1)
print('BOMMMMMMMMM\nE ANO NOVO!!!')
| 18.833333 | 35 | 0.628319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.247788 |
8c42c6c20316fe5c20318f93d3a8f8fb011455d2 | 2,257 | py | Python | sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/credential.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/credential.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/credential.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class Credential(ProxyResource):
"""Definition of the credential.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar user_name: Gets the user name of the credential.
:vartype user_name: str
:ivar creation_time: Gets the creation time.
:vartype creation_time: datetime
:ivar last_modified_time: Gets the last modified time.
:vartype last_modified_time: datetime
:param description: Gets or sets the description.
:type description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'user_name': {'readonly': True},
'creation_time': {'readonly': True},
'last_modified_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_name': {'key': 'properties.userName', 'type': 'str'},
'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'iso-8601'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Credential, self).__init__(**kwargs)
self.user_name = None
self.creation_time = None
self.last_modified_time = None
self.description = kwargs.get('description', None)
| 36.403226 | 89 | 0.597696 | 1,738 | 0.770049 | 0 | 0 | 0 | 0 | 0 | 0 | 1,612 | 0.714222 |
8c4438cccb5b2cadc74299fb1db0d3d594a9e00e | 4,107 | py | Python | prime/postrefine/mod_partiality.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | prime/postrefine/mod_partiality.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | prime/postrefine/mod_partiality.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
from cctbx.array_family import flex
from scitbx.matrix import sqr, col
from cctbx.crystal_orientation import crystal_orientation, basis_type
import math
import numpy as np
class partiality_handler(object):
"""
mod_partiality:
1. Calculate partiality for given
miller indices, crystal orientation, unit cell, wavelength.
2. Cacluate spot centroid delta distance
"""
def __init__(self):
"""
Intialitze parameters
"""
def calc_full_refl(self, I_o_p_set, sin_theta_over_lambda_sq_set,
G, B, p_set, rs_set, flag_volume_correction=True):
I_o_full_set = I_o_p_set/(G * flex.exp(-2*B*sin_theta_over_lambda_sq_set) * p_set)
return I_o_full_set
def calc_spot_radius(self, a_star_matrix, miller_indices, wavelength):
#calculate spot_radius based on rms delta_S for all spots
S0 = -1*col((0,0,1./wavelength))
sd_array = a_star_matrix.elems * miller_indices.as_vec3_double() + S0.elems
rh_set = sd_array.norms() - (1/wavelength)
return rh_set.standard_deviation_of_the_sample()
def voigt(self, x, sig, nu):
if nu < 0:
nu = 0
elif nu > 1:
nu = 1
f1 = nu * math.sqrt(math.log(2)/math.pi) * flex.exp(-4*math.log(2)*((x/sig)**2)) * (1/abs(sig))
f2 = (1-nu)/(math.pi*abs(sig)*(1+(4*((x/sig)**2))))
f3 = ((nu * math.sqrt(math.log(2)/math.pi))/abs(sig)) + ((1-nu)/(math.pi*abs(sig)))
svx = (f1 + f2)/f3
return svx
def lognpdf(self, x, FWHM, zero):
#find sig from root of this function
zero = np.abs(zero)
sig_range = np.arange(50)/100
t = sig_range * math.sqrt(math.log(4))
sig_set = np.array([sig_range[np.argmin(np.abs(( fwhm - (zero * (np.exp(t) - np.exp(-1*t))) )))] for fwhm in FWHM])
#calc x0
x0 = math.log(zero) + sig_set**2
g = 1/( sig_set * math.sqrt(2*math.pi) * np.exp(x0-((sig_set**2)/2)) )
#calc lognpdf
X = zero - x
f1 = 1/( X * sig_set * math.sqrt(2*math.pi) )
f2 = np.exp( -1 * (np.log(X)-x0)**2 / (2*(sig_set**2)) )
svx = flex.double(f1 * f2 / g)
return svx
def calc_partiality_anisotropy_set(self, my_uc, rotx, roty, miller_indices,
ry, rz, r0, re, nu,
bragg_angle_set, alpha_angle_set, wavelength, crystal_init_orientation,
spot_pred_x_mm_set, spot_pred_y_mm_set, detector_distance_mm,
partiality_model, flag_beam_divergence):
#use III.4 in Winkler et al 1979 (A35; P901) for set of miller indices
O = sqr(my_uc.orthogonalization_matrix()).transpose()
R = sqr(crystal_init_orientation.crystal_rotation_matrix()).transpose()
CO = crystal_orientation(O*R, basis_type.direct)
CO_rotate = CO.rotate_thru((1,0,0), rotx
).rotate_thru((0,1,0), roty)
A_star = sqr(CO_rotate.reciprocal_matrix())
S0 = -1*col((0,0,1./wavelength))
#caculate rs
rs_set = r0 + (re * flex.tan(bragg_angle_set))
if flag_beam_divergence:
rs_set += ((ry * flex.cos(alpha_angle_set))**2 + (rz * flex.sin(alpha_angle_set))**2)**(1/2)
#calculate rh
x = A_star.elems * miller_indices.as_vec3_double()
sd_array = x + S0.elems
rh_set = sd_array.norms() - (1/wavelength)
#calculate partiality
if partiality_model == "Lorentzian":
partiality_set = ((rs_set**2)/((2*(rh_set**2))+(rs_set**2)))
elif partiality_model == "Voigt":
partiality_set = self.voigt(rh_set, rs_set, nu)
elif partiality_model == "Lognormal":
partiality_set = self.lognpdf(rh_set, rs_set, nu)
#calculate delta_xy
if sum(spot_pred_y_mm_set) == 0:
#hack for dials integration - spot_pred_x_mm_set is s1 * to be fixed *
delta_xy_set = (spot_pred_x_mm_set - sd_array).norms()
else:
d_ratio = -detector_distance_mm/sd_array.parts()[2]
calc_xy_array = flex.vec3_double(sd_array.parts()[0]*d_ratio, \
sd_array.parts()[1]*d_ratio, flex.double([0]*len(d_ratio)))
pred_xy_array = flex.vec3_double(spot_pred_x_mm_set, spot_pred_y_mm_set, flex.double([0]*len(d_ratio)))
delta_xy_set = (pred_xy_array - calc_xy_array).norms()
return partiality_set, delta_xy_set, rs_set, rh_set
| 41.908163 | 119 | 0.665206 | 3,901 | 0.949842 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.134892 |
8c444c182cfcbcbf6fc1fbb3f34c210eb99835c2 | 476 | py | Python | python/large-class/1_extract-class.py | mario21ic/refactoring-guru | a28730ebbcf54363cb98e921820198b50fc3204f | [
"MIT"
] | 1 | 2020-03-31T00:57:39.000Z | 2020-03-31T00:57:39.000Z | python/large-class/1_extract-class.py | mario21ic/refactoring-guru | a28730ebbcf54363cb98e921820198b50fc3204f | [
"MIT"
] | null | null | null | python/large-class/1_extract-class.py | mario21ic/refactoring-guru | a28730ebbcf54363cb98e921820198b50fc3204f | [
"MIT"
] | null | null | null | # When one class does the work of two, awkwardness results.
class Person:
def __init__(self, name, office_area_code, office_number):
self.name = name
self.office_area_code = office_area_code
self.office_number = office_number
def telephone_number(self):
return "%d-%d" % (self.office_area_code, self.office_number)
if __name__=="__main__":
p = Person("Mario", 51, 966296636)
print(p.name)
print(p.telephone_number()) | 29.75 | 68 | 0.680672 | 299 | 0.628151 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.17437 |
8c460136129c9f2340cb374d68977dc7a50439ea | 1,205 | py | Python | utils.py | btq/Seph_scrape | 441cdb342b5a3e1ef75f5f0861b34c26ee02012e | [
"MIT"
] | 1 | 2019-04-22T04:05:37.000Z | 2019-04-22T04:05:37.000Z | utils.py | btq/Seph_scrape | 441cdb342b5a3e1ef75f5f0861b34c26ee02012e | [
"MIT"
] | null | null | null | utils.py | btq/Seph_scrape | 441cdb342b5a3e1ef75f5f0861b34c26ee02012e | [
"MIT"
] | null | null | null | '''
every module in the system must use the following import:
from utils import log
'''
import os
import sys
import re
import logging
from subprocess import Popen, PIPE
from configparser import ConfigParser
#log_format = '%(asctime)s %(levelname)-8s [%(filename)s,%(lineno)d] %(message)s'
#logging.basicConfig(level=logging.DEBUG, format=log_format)
log = logging.getLogger('scraper')
CSS = '''
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 3px 20px;
}
.bold {
font-weight: bold;
}
.red {
color: red;
}
.fail {
font-style: italic;
font-weight: bold;
color: red;
}
'''
def error(msg):
log.error(msg + '. Exiting ...')
sys.exit(1)
'''
def config(filename='database.ini', section='postgresql'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
'''
| 17.214286 | 90 | 0.635685 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 988 | 0.819917 |
8c4814a2ff66ec07a5796cfd425daedb33196a98 | 1,855 | py | Python | pcompile/tests/test_items.py | cb01/pcompile | d04bffc481291ca9586db94ce2ca9be6d8534e24 | [
"BSD-3-Clause"
] | 1 | 2016-12-04T02:46:17.000Z | 2016-12-04T02:46:17.000Z | pcompile/tests/test_items.py | cb01/pcompile | d04bffc481291ca9586db94ce2ca9be6d8534e24 | [
"BSD-3-Clause"
] | null | null | null | pcompile/tests/test_items.py | cb01/pcompile | d04bffc481291ca9586db94ce2ca9be6d8534e24 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from pcompile.items import NameRegistry, Items, min_container_type
from pcompile.tests.test_context import TestContext
from pcompile import ureg
import json
from pcompile.solution import Solution
class TestNameRegistry(unittest.TestCase):
def test_init(self):
nr = NameRegistry()
def test_generate_name(self):
name = NameRegistry().generate()
def test_new(self):
name = NameRegistry().new()
class TestItems(unittest.TestCase):
def test_init(self):
items = Items()
def test_find(self):
tc = TestContext()
items = Items()
well = items.allocate(tc.env, '96-pcr')
assert isinstance(well, dict)
# When more than 96 wells are allocated, a second plate is allocated,
# but not before.
for i in range(1,97):
items.allocate(tc.env, '96-pcr')
assert len(items.containers) == 2
# Check that the objects were reffed in the process of being
# allocated.
assert len(tc.env.protocol.as_dict()['refs'].keys()) == 2
class TestHelpers(unittest.TestCase):
def test_min_container_type(self):
assert min_container_type(1*ureg.microliter) == "96-pcr"
assert min_container_type(1000*ureg.microliter) == "micro-1.5"
'''
from pcompile.solution import Component
from pcompile.items import ContainerPool, Container
class TestContainer(unittest.TestCase):
def test_something(self):
ct1 = Container(name='happy', ctype='96-pcr')
ct2 = Container(name='lucky', ctype='384-pcr')
ct3 = Container(name='sparkles', ctype='1.5-micro')
cp = ContainerPool(containers=[ct1, ct2, ct3])
assert cp.find('sparkles').ctype == '1.5-micro'
'''
if __name__ == '__main__':
unittest.main()
| 22.901235 | 77 | 0.653908 | 1,076 | 0.580054 | 0 | 0 | 0 | 0 | 0 | 0 | 708 | 0.381671 |
8c49489abc57b62bc37cd45f69e770e72a7d07cf | 2,805 | py | Python | SC101/SC101_week2/zonegraphics.py | ariel98po/SC101-projects | abc6e1672ca2e1c594e274945ec851f4e3a587ef | [
"MIT"
] | null | null | null | SC101/SC101_week2/zonegraphics.py | ariel98po/SC101-projects | abc6e1672ca2e1c594e274945ec851f4e3a587ef | [
"MIT"
] | null | null | null | SC101/SC101_week2/zonegraphics.py | ariel98po/SC101-projects | abc6e1672ca2e1c594e274945ec851f4e3a587ef | [
"MIT"
] | null | null | null | from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect
from campy.gui.events.mouse import onmouseclicked
import random
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 400
ZONE_WIDTH = 100
ZONE_HEIGHT = 100
BALL_RADIUS = 15
MAX_SPEED = 6
MIN_Y_SPEED = 2
class ZoneGraphics:
def __init__(self, window_width=WINDOW_WIDTH, window_height=WINDOW_HEIGHT,
zone_width=ZONE_WIDTH, zone_height=ZONE_HEIGHT, ball_radius=BALL_RADIUS):
# Create window
self.window = GWindow(window_width, window_height, title='Zone Game')
# Create zone
self.zone = GRect(zone_width, zone_height, x=(window_width - zone_width) / 2,
y=(window_height - zone_height) / 2)
self.zone.color = 'blue'
self.window.add(self.zone)
# Create ball and initialize velocity/position
self.ball = GOval(2 * ball_radius, 2 * ball_radius)
self.ball.filled = True
self.ball.fill_color = 'salmon'
self.dx = 0
self.dy = 0
self.reset_ball()
# Initialize mouse listeners
onmouseclicked(self.handle_click)
# Set ball position at random inside the window
def set_ball_position(self):
self.ball.x = random.randint(0, self.window.width - self.ball.width)
self.ball.y = random.randint(0, self.window.height - self.ball.height)
def set_ball_velocity(self):
self.dx = random.randint(0, MAX_SPEED)
if random.random() > 0.5:
self.dx = -self.dx
self.dy = random.randint(MIN_Y_SPEED, MAX_SPEED)
if random.random() > 0.5:
self.dy = -self.dy
def reset_ball(self):
self.set_ball_position()
while self.ball_in_zone():
self.set_ball_position()
self.set_ball_velocity()
self.window.add(self.ball)
def move_ball(self):
self.ball.move(self.dx, self.dy)
def handle_wall_collisions(self):
if self.ball.x + self.ball.width >= self.window.width or self.ball.x <= 0:
self.dx = -self.dx
if self.ball.y + self.ball.height >= self.window.height or self.ball.y <= 0:
self.dy = -self.dy
def ball_in_zone(self):
zone_left_side = self.zone.x
zone_right_side = self.zone.x + self.zone.width
ball_x_in_zone = zone_left_side <= self.ball.x <= zone_right_side - self.ball.width
zone_top_side = self.zone.y
zone_bottom_side = self.zone.y + self.zone.height
ball_y_in_zone = zone_top_side <= self.ball.y <= zone_bottom_side - self.ball.height
return ball_x_in_zone and ball_y_in_zone
def handle_click(self, event):
obj = self.window.get_object_at(event.x, event.y)
if self.ball == obj:
self.reset_ball()
| 32.616279 | 92 | 0.642068 | 2,524 | 0.899822 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.062032 |
8c49da13dca709daa5a8e6c290b24925848f9676 | 1,790 | py | Python | database_replication/python/mock_ripper.py | ryland-e-atkins/complexa | fa5ce6227f45227cdd6b27b5d63edd000c760e84 | [
"MIT"
] | null | null | null | database_replication/python/mock_ripper.py | ryland-e-atkins/complexa | fa5ce6227f45227cdd6b27b5d63edd000c760e84 | [
"MIT"
] | null | null | null | database_replication/python/mock_ripper.py | ryland-e-atkins/complexa | fa5ce6227f45227cdd6b27b5d63edd000c760e84 | [
"MIT"
] | null | null | null | # This module is used to combine and remove duplicates from raw mockaroo data
from subprocess import call
from util import *
# def generateCleanFiles():
# """
# DEPRECATED
# """
# filePrefix = 'mockaroo/mock_data_raw/'
# fileNames = [
# filePrefix + 'brandName/brandName1.csv',
# filePrefix + 'brandName/brandName2.csv',
# filePrefix + 'brandName/brandName3.csv',
# filePrefix + 'brandName/brandName4.csv',
# filePrefix + 'brandName/brandName5.csv']
# names = cleanList(readMultipleFilesIntoList(fileNames))
# words = cleanList(splitWords(names))
# nameFileName = 'mockaroo/mock_data_clean/brandName/brandNames.csv'
# wordFileName = 'mockaroo/mock_data_clean/brandName/brandWords.csv'
# print("Name write: {0}".format(writeListToFile(names,nameFileName)))
# print("Word write: {0}".format(writeListToFile(words,wordFileName)))
def generateFile(modifier, numFiles):
#mkdir(modifier)
filePrefix = 'mockaroo_data/raw/'+modifier+'/'
fileNames = [filePrefix + modifier + str(x) + '.csv' for x in range(1,numFiles+1)]
outFileName = 'mockaroo_data/cln/'+modifier+'/'+modifier+'.csv'
items = cleanList(readMultipleFilesIntoList(fileNames))
print("Write to {1} successful: {0}".format(writeListToFile(items,outFileName), outFileName))
return
def mkdir(dirName):
"""
Creates directory if not exists
"""
raw = 'mockaroo_data/raw/' + dirName
cln = 'mockaroo_data/cln/' + dirName
call(['mkdir', raw])
call(['mkdir', cln])
return
def main():
modifier = 'buzzword'
numFiles = 10
#mkdir(modifier)
generateFile(modifier, numFiles)
if __name__ == "__main__":
main() | 30.862069 | 98 | 0.644693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,109 | 0.619553 |
8c49e119bd276ab8615c3a7f51eff37f3b42ea1c | 100 | py | Python | eval/scripts/__init__.py | mbatchkarov/dc_evaluation | 83fad49d597952957420b23c0d39c264c581e750 | [
"BSD-3-Clause"
] | null | null | null | eval/scripts/__init__.py | mbatchkarov/dc_evaluation | 83fad49d597952957420b23c0d39c264c581e750 | [
"BSD-3-Clause"
] | null | null | null | eval/scripts/__init__.py | mbatchkarov/dc_evaluation | 83fad49d597952957420b23c0d39c264c581e750 | [
"BSD-3-Clause"
] | null | null | null | __author__ = 'mmb28'
import sys
sys.path.append('.')
sys.path.append('..')
sys.path.append('../..') | 16.666667 | 24 | 0.63 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.21 |
8c4a0be60d8dc4aa18a2eafcee63ae8c7af73a85 | 268 | py | Python | LeetcodeAlgorithms/598. Range Addition II/range-addition-ii.py | Fenghuapiao/PyLeetcode | d804a62643fe935eb61808196a2c093ea9583654 | [
"MIT"
] | 3 | 2019-08-20T06:54:38.000Z | 2022-01-07T12:56:46.000Z | LeetcodeAlgorithms/598. Range Addition II/range-addition-ii.py | Fenghuapiao/PyLeetcode | d804a62643fe935eb61808196a2c093ea9583654 | [
"MIT"
] | null | null | null | LeetcodeAlgorithms/598. Range Addition II/range-addition-ii.py | Fenghuapiao/PyLeetcode | d804a62643fe935eb61808196a2c093ea9583654 | [
"MIT"
] | 2 | 2018-06-07T02:56:39.000Z | 2018-08-01T15:27:55.000Z | class Solution(object):
def maxCount(self, m, n, ops):
"""
:type m: int
:type n: int
:type ops: List[List[int]]
:rtype: int
"""
return reduce(operator.mul, map(min, zip(*ops + [[m,n]])))
| 26.8 | 67 | 0.440299 | 254 | 0.947761 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.436567 |
8c4a5299dfc921ff77539a9f3fcaeb8f4f33ab3c | 2,186 | py | Python | test/test3.py | v-smwang/AI-NLP-Tutorial | 3dbfdc7e19a025e00febab97f4948da8a3710f34 | [
"Apache-2.0"
] | null | null | null | test/test3.py | v-smwang/AI-NLP-Tutorial | 3dbfdc7e19a025e00febab97f4948da8a3710f34 | [
"Apache-2.0"
] | null | null | null | test/test3.py | v-smwang/AI-NLP-Tutorial | 3dbfdc7e19a025e00febab97f4948da8a3710f34 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @author : wanglei
# @date : 2021/2/19 1:47 PM
# @description :
import numpy as np
"""
感应器对象
"""
class Perceptron(object):
"""
该方法为感应器的初始化方法
eta:学习速率
n_iter:学习次数(迭代次数)
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
"""
该方法为模型训练的方法
shape[0]返回该矩阵有几行
shape[1]返回该矩阵有几列
在这个例子中X.shape[1]=2
np.zeros(1 + X.shape[1])是一个1行3列的元素都为零的列表
"""
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1]) # 初始化一个权重和阈值的列表,初始值为0
self.errors_ = [] # 用来记录每一次迭代全样本的错误预测次数
for _ in range(self.n_iter): # 进行多次预测样本
errors = 0 # 用来记录本次预测的全样本的错误次数
for xi, target in zip(X, y): # 遍历这个样本集和实际结果集
update = self.eta * (
target - self.predict(xi)) # 用实际结果值减掉预测结果值如果该值为0,表示预测正确,如果不为0则乘上学习速率,获取的值就是本次权重、阈值需要更新的值
self.w_[1:] += update * xi # 如果预测正确,则update为0,那么权重本次就无需改变,否则,增加
self.w_[0] += update # 如果预测正确,则update为0,那么阈值本次就无需改变,否则,增加
errors += int(update != 0.0) # 预测错误就记录一次错误数
self.errors_.append(errors) # 将所有的样本数据预测完成后,将本次的预测错误的次数放到error_这个列表中
return self
"""
该方法为将一个样本的属性值进行处理的方法
X=array([[1,2,3,4],[5,6,7,8],...])
self.w_[1:]=array([0,0,0,0])
根据api:dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
np.dot(X,self.w_[1:])=array([[0],[0],...])【将每一个属性乘上权重再将每一个样本的每个属性值进行求和】
self.w_[0]=array([[0]])获取阈值
"""
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
"""
该方法为一个样本的预测结果输出方法
numpy.where(condition[, x, y])
就是一个三目运算,满足条件就输出x,否则输出y
"""
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
import pandas as pd
"""
读取数据源
"""
df = pd.read_csv("/Users/a1/Downloads/iris.data", header=None)
print(df.tail()) # 打印后几行
y = df.iloc[0:100, 4].values # 取前100行数据的第4列,类标这一列,前100行就两类
print(y)
y = np.where(y == 'Iris-setosa', -1, 1) # 将类标这一列的文本表示替换成数字表示,就分了两类
X = df.iloc[0:100, [0, 2]].values # 获取前100行的第0列和第2列,即花瓣宽度和花萼宽度
print(X)
"""
对模型进行训练,查看训练时每次迭代的错误数量
"""
ppn= Perceptron(eta=0.1, n_iter=10)
ppn.fit(X,y) | 26.658537 | 117 | 0.580512 | 2,440 | 0.766332 | 0 | 0 | 0 | 0 | 0 | 0 | 2,115 | 0.664259 |
8c4a69db0d9fe3cc86c43f136e4fb4d819ce00e6 | 3,982 | py | Python | python/depthcharge/arch/arm.py | youssefms/depthcharge | 51744abd4c92c8f49a47900cb02a3652744a4083 | [
"BSD-3-Clause"
] | null | null | null | python/depthcharge/arch/arm.py | youssefms/depthcharge | 51744abd4c92c8f49a47900cb02a3652744a4083 | [
"BSD-3-Clause"
] | null | null | null | python/depthcharge/arch/arm.py | youssefms/depthcharge | 51744abd4c92c8f49a47900cb02a3652744a4083 | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
# Depthcharge: <https://github.com/nccgroup/depthcharge>
"""
ARM 32-bit support
"""
import os
import re
from .arch import Architecture
class ARM(Architecture):
"""
ARMv7 (or earlier) target information - 32-bit little-endian
"""
_desc = 'ARM 32-bit, little-endian'
_alignment = 4
_word_size = 4
_phys_size = 4
_word_mask = 0xffffffff
_endianness = 'little'
_supports_64bit_data = False
# ident values used by RETURN_REGISTER payload
_regs = {
'r0': {'ident': 0x61},
'r1': {'ident': 0x62},
'r2': {'ident': 0x63},
'r3': {'ident': 0x64},
'r4': {'ident': 0x65},
'r5': {'ident': 0x66},
'r6': {'ident': 0x67},
'r7': {'ident': 0x68},
'r8': {'ident': 0x69},
'r9': {'ident': 0x6a, 'gd': True, 'alias': 'sb'},
'r10': {'ident': 0x6b},
'r11': {'ident': 0x6c, 'alias': 'fp'},
'r12': {'ident': 0x6d, 'alias': 'ip'},
'r13': {'ident': 0x6e, 'alias': 'sp'},
'r14': {'ident': 0x6f, 'alias': 'lr'},
'r15': {'ident': 0x70, 'alias': 'pc'},
}
_DA_ENTRY = re.compile(r"""
(?P<name>[a-zA-Z][a-zA-Z0-9]+)
\s?:\s?
(\[<)?
(?P<value>[0-9a-fA-F]{8})
(>\])?
""", re.VERBOSE)
@classmethod
def parse_data_abort(cls, text: str) -> dict:
"""
Parse ARM data abort output formatted as follows and return each field in a dict.
00000001:data abort
pc : [<8f7d8858>] lr : [<8f7d8801>]
reloc pc : [<17835858>] lr : [<17835801>]
sp : 8ed99718 ip : 00000000 fp : 00000001
r10: 00000001 r9 : 8eda2ea8 r8 : 00000001
r7 : 00000000 r6 : 00000004 r5 : 00000004 r4 : 00000001
r3 : 8ed9972c r2 : 020200b4 r1 : 8ed994ec r0 : 00000009
Flags: nZCv IRQs off FIQs off Mode SVC_32
Code: 2800f915 f04fd0cf e7ce30ff d10a2d04 (2000f8d8)
"""
ret = {}
for line in text.splitlines():
line = line.strip()
if line.startswith('Flags:'):
ret['flags'] = {}
for field in line.split(' '):
name, value = field.split(' ')
name = name.replace('Flags:', 'Asserted')
ret['flags'][name] = value
continue
elif line.startswith('Code:'):
code = line.split()
instructions = []
for instruction in code[1:]:
try:
instruction = instruction.replace('(', '').replace(')', '').strip()
instruction = int(instruction, 16)
instruction = instruction.to_bytes(cls.word_size, byteorder=cls.endianness)
instructions.append(instruction)
except ValueError as e:
msg = 'Invalid instruction or parse error: ' + str(e)
raise ValueError(msg)
ret['code'] = instructions
else:
if line.startswith('reloc '):
pfx = 'reloc '
line = line[len(pfx):]
else:
pfx = ''
for match in cls._DA_ENTRY.finditer(line):
regname, _ = cls.register(match.group('name'))
name = pfx + regname
value = match.group('value')
regs = ret.get('registers', {})
try:
regs[name] = int(value, 16)
except ValueError:
regs[name] = value
ret['registers'] = regs
if not ret:
msg = 'No data abort content found in the following text:' + os.linesep
msg += text
raise ValueError(msg)
return ret
| 32.373984 | 99 | 0.467353 | 3,802 | 0.954797 | 0 | 0 | 2,647 | 0.664741 | 0 | 0 | 1,497 | 0.375942 |
8c4acb5101a7563401d0c3e1503f91e74c7f281d | 392 | py | Python | server/ffstore/ErrorInfo.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | 1 | 2017-05-02T10:02:28.000Z | 2017-05-02T10:02:28.000Z | server/ffstore/ErrorInfo.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | null | null | null | server/ffstore/ErrorInfo.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
class OpenError(StandardError):
def __init__(self, error_code, error, error_info):
self.error_code = error_code
self.error = error
self.error_info = error_info
StandardError.__init__(self, error)
def __str__(self):
return 'Error: %s: %s, request: %s' % (self.error_code, self.error, self.error_info) | 30.153846 | 92 | 0.645408 | 348 | 0.887755 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.173469 |
8c4acfc3788d8bcf11dd124ebca2c1f4e65a2595 | 1,355 | py | Python | gdpr_search.py | thebirdsbeak/gdpr_search | 32eb19454589158dfc1d04eae6f627cab7e7b7b8 | [
"Unlicense"
] | null | null | null | gdpr_search.py | thebirdsbeak/gdpr_search | 32eb19454589158dfc1d04eae6f627cab7e7b7b8 | [
"Unlicense"
] | null | null | null | gdpr_search.py | thebirdsbeak/gdpr_search | 32eb19454589158dfc1d04eae6f627cab7e7b7b8 | [
"Unlicense"
] | null | null | null | import requests
import json
def search_article(search_art, j):
''' Searches GDPR by Article number '''
for line in j:
if search_art == line['article']:
print("\n{}({})\n{}".format(line['article'], line['num'], line['text']))
def search_text(search_term, j):
''' Searches GDPR by exact match '''
result_string = ""
for line in j:
if search_term.upper() in line['text'].upper():
if line['section'] != "Recitals":
result_string += "Chapter {}\n{}\n{}\n{}({})\n{}\n\n".format(line['chapter'], line['section'], line['subtitle'], line['article'], line['num'], line['text'])
else:
result_string += "Recital\n{}\n\n".format(line['text'])
print(result_string)
gdpr_text = requests.get("http://enceladus.world/gdpr_json")
j = gdpr_text.json()
while True:
search_term = input("\nSearch or [num] > ")
if search_term:
try:
if search_term.upper() == "Q":
quit()
except TypeError:
print("Type error - please use valid string")
break
if search_term.startswith("["):
search_art = search_term.replace("[", "").replace("]", "")
search_article(search_art, j)
else:
search_text(search_term, j)
| 29.456522 | 172 | 0.538745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 362 | 0.267159 |
8c4b12f8f94b053d751d83aa1276be7fe1d62081 | 1,024 | py | Python | 191225/Python/solution.py | ktaletsk/daily-problem | 62ea445d499b8c5211eba635373102627e1d2710 | [
"MIT"
] | 1 | 2020-01-20T20:31:44.000Z | 2020-01-20T20:31:44.000Z | 191225/Python/solution.py | ktaletsk/daily-problem | 62ea445d499b8c5211eba635373102627e1d2710 | [
"MIT"
] | null | null | null | 191225/Python/solution.py | ktaletsk/daily-problem | 62ea445d499b8c5211eba635373102627e1d2710 | [
"MIT"
] | null | null | null | from collections import defaultdict
import copy
def get_next(current, d, finish):
flag=False
if len(d[current])==1:
if d[current][0]==finish and len(d.keys())==1:
flag= True
else:
new_d = copy.deepcopy(d)
new_current = d[current][0]
new_d.pop(current)
flag=get_next(new_current, new_d, finish)
elif len(d[current])>1:
for index, c in enumerate(d[current]):
new_d = copy.deepcopy(d)
new_d[current].pop(index)
new_current = c
flag=get_next(new_current, new_d, finish)
return flag
def can_get_chained(input):
words = [(word[0], word[-1]) for word in input]
d = defaultdict(list)
for k, v in words:
d[k].append(v)
#Start with any word
start = list(d.items())[0][0]
return get_next(start, d, start)
def main():
can_get_chained(['eggs', 'karat', 'apple', 'snack', 'tuna'])
if __name__== "__main__":
main() | 26.25641 | 64 | 0.556641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.061523 |
8c4ef8962b42025bd9bfb340fd7c52343c5849b0 | 15,018 | py | Python | QtQmlViewport/Viewport.py | maxlem/pioneer.common.gui | e04ae85c9ffa87666090fa33285fd03c0c768ef3 | [
"BSD-3-Clause"
] | null | null | null | QtQmlViewport/Viewport.py | maxlem/pioneer.common.gui | e04ae85c9ffa87666090fa33285fd03c0c768ef3 | [
"BSD-3-Clause"
] | null | null | null | QtQmlViewport/Viewport.py | maxlem/pioneer.common.gui | e04ae85c9ffa87666090fa33285fd03c0c768ef3 | [
"BSD-3-Clause"
] | null | null | null | from QtQmlViewport import InFboRenderer, utils, Product, CustomActors
from QtQmlViewport.Actors import Actors, Renderable
from QtQmlViewport.Camera import Camera
from QtQmlViewport.Geometry import Geometry, BVH
from PyQt5.QtQuick import QQuickFramebufferObject
from PyQt5.QtGui import QMatrix4x4, QVector3D, QColor, qRgba
from PyQt5.QtCore import Qt, QRectF, QTimer, pyqtSlot as Slot
import numpy as np
import math, traceback, os, warnings
class NothingToPickException(Exception):
def __init__(self, world_origin, world_direction):
super().__init__('Nothing to pick')
self.world_origin, self.world_direction = world_origin, world_direction
class Viewport( QQuickFramebufferObject ):
def __init__( self, parent=None ):
super(Viewport, self).__init__( parent )
self.setAcceptedMouseButtons(Qt.AllButtons)
self.setAcceptHoverEvents(True)
self.renderer = None
self.camera = Camera()
self._mouse_start = None
self._start_eye = None
self._start_up = None
self._debug_actors = Actors()
self.render_to_texture_attachment = -1
self._hovered = None
self._clicked = None
Product.RWProperty(vars(), bool, 'debug', False)
Product.RWProperty(vars(), Camera, 'camera', None)
Product.RWProperty(vars(), QColor, 'backgroundColor', QColor(qRgba(1,1,1,1)))
Product.RWProperty(vars(), Actors, 'actors', None)
Product.RWProperty(vars(), Renderable, 'selected', None)
def aspect_ratio(self):
return self.width()/self.height()
def view_matrix(self):
return self.camera.view_matrix()
def perspective_matrix(self):
if self.camera.perspective_override is None:
p = QMatrix4x4()
p.perspective(self.camera.vfov, self.aspect_ratio(), self.camera.near, self.camera.far)
return p
else:
return self.camera.perspective_override
def orthographic_matrix(self):
p = QMatrix4x4()
p.ortho(0, self.width(), 0, self.height(), self.camera.near, self.camera.far)
return p
def pick_helper(self, clicked_x, clicked_y):
# http://schabby.de/picking-opengl-ray-tracing/
aspect_ratio = self.aspect_ratio()
cam_origin = self.camera.eye
cam_direction = (self.camera.center - cam_origin).normalized()
# The coordinate system we chose has x pointing right, y pointing down, z pointing into the screen
# in screen coordinates, the vertical axis points down, this coincides with our 'y' axis.
v = -self.camera.up # our y axis points down
# in screen coordinates, the horizontal axis points right, this coincides with our x axis
h = QVector3D.crossProduct(cam_direction, self.camera.up).normalized() # cam_direction points into the screen
# in InFobRenderer::render(), we use Viewport::perspective_matrix(), where self.camera.fov is used
# as QMatrix4x4::perspective()'s verticalAngle parameter, so near clipping plane's vertical scale is given by:
v_scale = math.tan( math.radians(self.camera.vfov) / 2 ) * self.camera.near
h_scale = v_scale * aspect_ratio
# translate mouse coordinates so that the origin lies in the center
# of the viewport (screen coordinates origin is top, left)
x = clicked_x - self.width() / 2
y = clicked_y - self.height() / 2
# scale mouse coordinates so that half the view port width and height
# becomes 1 (to be coherent with v_scale, which takes half of fov)
x /= (self.width() / 2)
y /= (self.height() / 2)
# the picking ray origin: corresponds to the intersection of picking ray with
# near plane (we don't want to pick actors behind the near plane)
world_origin = cam_origin + cam_direction * self.camera.near + h * h_scale * x + v * v_scale * y
# the picking ray direction
world_direction = (world_origin - cam_origin).normalized()
return v,h,world_origin,world_direction
def pick(self, clicked_x, clicked_y, modifiers = None):
v, h, world_origin, world_direction = self.pick_helper(clicked_x, clicked_y)
min_t = float("inf")
min_result = None
for actor in self.renderer.sorted_actors:
if actor._geometry and actor.pickable:
if actor._geometry.indices is not None\
and actor._geometry.attribs.vertices is not None:
bvh = actor._geometry.goc_bvh()
if bvh is None:
continue
bvh.update()
if bvh.bvh is None:
continue
local_origin, local_direction = self.to_local(world_origin, world_direction, actor)
local_origin_np, local_direction_np = utils.to_numpy(local_origin), utils.to_numpy(local_direction)
# try to intersect the actor's geometry!
if bvh.primitiveType == BVH.PrimitiveType.TRIANGLES or bvh.primitiveType == BVH.PrimitiveType.LINES:
ids, tuvs = bvh.bvh.intersect_ray(local_origin_np, local_direction_np, True)
if ids.size > 0:
actor_min_t = tuvs[:,0].min()
if actor_min_t < min_t:
min_t = actor_min_t
min_result = (actor, ids, tuvs, world_origin, world_direction, local_origin, local_direction)
elif bvh.primitiveType == BVH.PrimitiveType.POINTS:
object_id, distance, t = bvh.bvh.ray_distance(local_origin_np, local_direction_np)
real_distance = math.sqrt(t**2 + distance**2)
if real_distance < min_t:
min_t = real_distance
min_result = (actor, bvh.indices.ndarray[object_id, None], np.array([[t, distance, real_distance]]), world_origin, world_direction, local_origin, local_direction)
if self.debug and modifiers is not None and bool(modifiers & Qt.ShiftModifier):
np_origin = utils.to_numpy(world_origin)
np_v = utils.to_numpy(v)
np_h = utils.to_numpy(h)
self._debug_actors.clearActors()
a = self._debug_actors.addActor(CustomActors.arrow(np_origin, np_origin + np_h, 0.1, QColor('red')))
b = self._debug_actors.addActor(CustomActors.arrow(np_origin, np_origin + np_v, 0.1, QColor('green')))
c = self._debug_actors.addActor(CustomActors.arrow(np_origin, np_origin + utils.to_numpy(world_direction) * (min_t if min_t != float("inf") else 500.0), 0.1, QColor('magenta')))
a.pickable = False
b.pickable = False
c.pickable = False
if min_result is not None:
return min_result
raise NothingToPickException(world_origin, world_direction)
def to_local(self, world_origin, world_direction, actor):
# bring back the actor at the origin
if not "transform" in actor.bo_actor:
actor.update()
print(actor.dirty)
m = actor.bo_actor["transform"]
m_inv = m.inverted()[0]
# bring the ray in the actor's referential
local_origin, local_direction = m_inv.map(world_origin), m_inv.mapVector(world_direction)
return local_origin, local_direction
def mouseDoubleClickEvent(self, event):
btns = event.buttons()
if btns & Qt.MidButton or (btns & Qt.LeftButton and event.modifiers() & Qt.ControlModifier) :
try:
_, _, tuvs, world_origin, world_direction, _, _ = self.pick(event.localPos().x(), event.localPos().y())
p = world_origin + world_direction * tuvs[0,0]
self.setCameraRotationCenter(world_origin, p)
except NothingToPickException:
pass
except:
traceback.print_exc()
@Slot(QVector3D, QVector3D)
def setCameraRotationCenter(self, eye, center):
# centers camera on selected point
self.camera.center = center
self.camera.eye = eye
self.update()
def signal_helper(self, signal, event, ids, tuvs, world_origin, world_direction, local_origin, local_direction):
if len(ids) > 0:
signal.emit(ids[0], QVector3D(tuvs[0,0], tuvs[0,1], tuvs[0,2]), world_origin, world_direction, local_origin, local_direction, utils.QObject_to_dict(event), self)
def mousePressEvent(self, event):
"""
Called by the Qt libraries whenever the window receives a mouse click.
"""
self._mouse_start = (event.localPos().x(), event.localPos().y())
self._start_eye = self.camera.eye
self._start_up = self.camera.up
self._start_center = self.camera.center
if event.buttons() & Qt.LeftButton:
try:
actor, ids, tuvs, world_origin, world_direction, local_origin, local_direction = self.pick(event.localPos().x(), event.localPos().y(), event.modifiers())
if actor.clickable:
self.signal_helper(actor.click, event, ids, tuvs, world_origin, world_direction, local_origin, local_direction)
self._clicked = actor
self._clicked.destroyed.connect(self.clearClicked)
if actor.selectable:
self.selected = actor
self.selected.selected = True
except NothingToPickException as e:
if self.selected is not None:
self.selected.selected = False
self.selected = None
except:
traceback.print_exc()
event.setAccepted(True)
def mouseMoveEvent(self, event):
"""
Called by the Qt libraries whenever the window receives a mouse
move/drag event.
"""
btns = event.buttons()
x, y = event.localPos().x(), event.localPos().y()
x_0, y_0 = self._mouse_start
dx, dy = (x - x_0, y - y_0)
h_width = self.width()/2
h_height = self.height()/2
if btns & Qt.LeftButton:
if self._clicked:
_, _, world_origin, world_direction = self.pick_helper(event.pos().x(), event.pos().y())
self._clicked.move.emit(world_origin, world_direction, utils.QObject_to_dict(event), self)
else:
# we want half a screen movement rotates the camera 90deg:
self.camera.pan_tilt(self._start_eye, self._start_up, 90.0 * dx/h_width, 90.0 * dy/h_height)
elif btns & Qt.MidButton:
self.camera.translate(self._start_eye, self._start_center, -dx/h_width, dy/h_height)
elif btns & (Qt.RightButton):
self.camera.roll(self._start_eye, self._start_up, -90.0 * dy/h_width)
# re-draw at next timer tick
self.update()
def mouseReleaseEvent(self, event):
if self._clicked is not None:
_, _, world_origin, world_direction = self.pick_helper(event.pos().x(), event.pos().y())
self._clicked.release.emit(world_origin, world_direction, utils.QObject_to_dict(event), self)
self._clicked.destroyed.disconnect(self.clearClicked)
self._clicked = None
def wheelEvent(self, event):
"""
Called by the Qt libraries whenever the window receives a mouse wheel change.
"""
delta = event.angleDelta().y()
# move in look direction of camera
# note: this will only do something for non-orthographic projection
front = self.camera.center - self.camera.eye
if event.modifiers() & Qt.ShiftModifier:
factor = (5*120)
else:
factor = (5*12)
d = front.normalized() * delta/factor
self.camera.eye -= d
self.camera.center -= d
# re-paint at the next timer tick
self.update()
@Slot()
def clearHovered(self):
self._hovered = None
@Slot()
def clearClicked(self):
self._clicked = None
def hoverMoveEvent(self, event):
try:
actor, ids, tuvs, world_origin, world_direction, local_origin, local_direction = self.pick(event.pos().x(), event.pos().y(), event.modifiers())
if actor == self._hovered:
self.signal_helper(actor.hoverMove, event, ids, tuvs, world_origin, world_direction, local_origin, local_direction)
else:
if self._hovered is not None: #release the previous pick
self._hovered.hoverLeave.emit(world_origin, world_direction, utils.QObject_to_dict(event), self)
self._hovered.mouseOver = False
if actor.hoverable:
self._hovered = actor
self._hovered.destroyed.connect(self.clearHovered)
self.signal_helper(actor.hoverEnter, event, ids, tuvs, world_origin, world_direction, local_origin, local_direction)
actor.mouseOver = True
except NothingToPickException as e:
if self._hovered is not None:
self._hovered.hoverLeave.emit(e.world_origin, e.world_direction, utils.QObject_to_dict(event), self)
self._hovered.mouseOver = False
self._hovered.destroyed.disconnect(self.clearHovered)
self._hovered = None
pass
except:
traceback.print_exc()
def createRenderer( self ):
self.renderer = InFboRenderer.InFboRenderer()
self.timer = QTimer()
self.timer.timeout.connect(self.harvest_updates)
self.timer.start(0) #will be called after each event loop
return self.renderer
def set_render_to_texture_attachment(self, attachment):
if "QSG_RENDER_LOOP" in os.environ:
if os.environ['QSG_RENDER_LOOP'] != "basic":
warnings.warn("Error: multithreaded rendering enabled, please set os.environ['QSG_RENDER_LOOP'] = 'basic' before any Qt call")
if self.render_to_texture_attachment != attachment:
self.render_to_texture_attachment = attachment
self.update()
def get_render_to_texture_array(self):
if self.renderer is not None:
return self.renderer.render_to_texture_array
return None
def harvest_updates(self):
self.update()
# TODO: to prevent clogging 1 CPU, we should do something like this
# if self.renderer is not None:
# for actor in self.renderer.sorted_actors:
# if actor.dirty:
# self.update()
# return | 40.15508 | 213 | 0.612665 | 14,572 | 0.970302 | 0 | 0 | 338 | 0.022506 | 0 | 0 | 2,254 | 0.150087 |
8c4f66064e678f01aa0db0a69b31d14b2f75ea3f | 575 | py | Python | examples/example_utils.py | rmorshea/purly | 0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7 | [
"MIT"
] | 2 | 2018-08-18T05:39:24.000Z | 2018-08-21T19:02:16.000Z | examples/example_utils.py | rmorshea/purly | 0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7 | [
"MIT"
] | 2 | 2018-07-27T07:14:19.000Z | 2018-07-27T07:17:06.000Z | examples/example_utils.py | rmorshea/purly | 0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7 | [
"MIT"
] | null | null | null | import os
def localhost(protocol, port=8000):
"""Returns the host URL.
When examples are running on mybinder.org this is not simply "localhost" or
"127.0.0.1". Instead we use ``nbserverproxy`` whose proxy is used instead.
"""
if 'JUPYTERHUB_OAUTH_CALLBACK_URL' in os.environ:
protocol += 's'
form = protocol + '://hub.mybinder.org%s/proxy/%s'
auth = os.environ['JUPYTERHUB_OAUTH_CALLBACK_URL'].rsplit('/', 1)[0]
return form % (auth, port)
else:
form = protocol + '://127.0.0.1:%s'
return form % port
| 31.944444 | 79 | 0.62087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 309 | 0.537391 |
8c4fb46daef9f00f54d178b3d13b1196f12048c9 | 473 | py | Python | pyroomacoustics/experimental/__init__.py | oleg-alexandrov/pyroomacoustics | 4681b3cec21e09c54be50b2ee835115bcbc1d298 | [
"MIT"
] | 1 | 2020-02-14T22:32:55.000Z | 2020-02-14T22:32:55.000Z | pyroomacoustics/experimental/__init__.py | Bar-BY/pyroomacoustics | 45b45febdf93340a55a719942f2daa9efbef9960 | [
"MIT"
] | null | null | null | pyroomacoustics/experimental/__init__.py | Bar-BY/pyroomacoustics | 45b45febdf93340a55a719942f2daa9efbef9960 | [
"MIT"
] | 1 | 2021-01-14T08:42:47.000Z | 2021-01-14T08:42:47.000Z | """
Experimental
============
A bunch of routines useful when doing measurements and experiments.
"""
__all__ = [
"measure_ir",
"physics",
"point_cloud",
"delay_calibration",
"deconvolution",
"localization",
"signals",
"rt60",
]
from .deconvolution import *
from .delay_calibration import *
from .localization import *
from .measure_ir import *
from .physics import *
from .point_cloud import *
from .rt60 import *
from .signals import *
| 17.518519 | 67 | 0.672304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.420719 |
8c507b40e4711ff99a8dd46ed57dd876787b8095 | 1,441 | py | Python | tekstovni_vmesnik.py | tjazerzen/Vislice-vaje2021 | a22f3e0c61f69dfe878d98f0538b44fbd7b4d0e1 | [
"MIT"
] | null | null | null | tekstovni_vmesnik.py | tjazerzen/Vislice-vaje2021 | a22f3e0c61f69dfe878d98f0538b44fbd7b4d0e1 | [
"MIT"
] | 2 | 2021-04-19T15:52:45.000Z | 2021-04-19T16:20:11.000Z | tekstovni_vmesnik.py | tjazerzen/Vislice-vaje2021 | a22f3e0c61f69dfe878d98f0538b44fbd7b4d0e1 | [
"MIT"
] | null | null | null | import model
def izpis_igre(igra):
return (
f"Igraš igro vislic:\n" +
f"Narobe ugibane črke so: {igra.nepravilni_ugibi()}\n" +
f"Trenutno stanje besede: {igra.pravilni_del_gesla()}\n"
)
def izpis_poraza(igra):
return (
f"Izgubil si. Več sreče prihodnjič.\n" +
f"Narobe si uganil: {igra.nepravilni_ugibi()}\n" +
f"Pravilno si uganil: {igra.pravilni_del_gesla()}\n"
f"Pravilno geslo je bilo: {igra.geslo}\n"
)
def izpis_zmage(igra):
return (
f"Zmagal si. Bravo!\n" +
f"Narobe si uganil: {igra.nepravilni_ugibi()}\n" +
f"Pravilno si uganil: {igra.pravilni_del_gesla()}\n"
f"Pravilno geslo je bilo: {igra.geslo}\n"
)
def se_enkrat():
vnos = input("vnesi X, če želiš igrati še enkrat, in Y, če ne. ")
if vnos == "X":
return True
elif vnos == "Y":
return False
else:
print("Niste vnesli ne X ne Y. Vnesite še enkrat :) ")
return se_enkrat()
def pozeni_vmesnik():
igra = model.nova_igra(model.bazen_besed)
while True:
if igra.zmaga():
print(izpis_zmage(igra))
elif igra.poraz():
print(izpis_poraza(igra))
else:
print(izpis_igre(igra))
vnos = input("Vnesi novo črko: ")
igra.ugibaj(vnos)
se_enkrat_bool = se_enkrat()
if se_enkrat_bool:
pozeni_vmesnik()
pozeni_vmesnik() | 25.280702 | 69 | 0.582929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 610 | 0.419821 |
8c50e921602b656a4f5e46e589c187aad8d34c49 | 2,050 | py | Python | seoaudit/__main__.py | Guber/seoaudit | e38bc453629643f0282cdf9324e4f1db81f57f7f | [
"Apache-2.0"
] | 7 | 2019-12-10T17:05:14.000Z | 2020-11-10T10:10:45.000Z | seoaudit/__main__.py | Guber/seoaudit | e38bc453629643f0282cdf9324e4f1db81f57f7f | [
"Apache-2.0"
] | 3 | 2020-10-23T09:19:19.000Z | 2021-12-13T20:28:03.000Z | seoaudit/__main__.py | Guber/seoaudit | e38bc453629643f0282cdf9324e4f1db81f57f7f | [
"Apache-2.0"
] | null | null | null | import argparse
from seoaudit.analyzer.site_parser import SiteParser, LXMLPageParser
from seoaudit.analyzer.seo_auditor import SEOAuditor
def main():
"""The main routine."""
parser = argparse.ArgumentParser(description='Run SEO checks on a set of urls')
parser.add_argument('-u', '--url', action='append', help='<Required> Url to parse', required=True)
parser.add_argument('-c', '--config', help='Python config file to use (without file extension)')
parser.add_argument('-s', '--sitemap', help='Sitemap location', default=None)
parser.add_argument('-p', '--parse', action="store_true", help='Parse sitemap urls', default=False)
args = parser.parse_args()
# Load the configuration file
if args.config:
import importlib
import os
try:
module_name = args.config
module_file_path = os.path.abspath(os.getcwd()) + "\\" + module_name + ".py"
module_spec = importlib.util.spec_from_file_location(
module_name, module_file_path)
cfg = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(cfg)
print("Using config file {}".format(args.config))
except ImportError as err:
print('Error:', err)
else:
import seoaudit.config as cfg
print("Using default conf file")
# site_parser = SiteParser(url, SeleniumPageParser(url))
print("Starting Site Parser...")
site_parser = SiteParser(args.url[0], LXMLPageParser(args.url[0]), urls=args.url, sitemap_link=args.sitemap,
parse_sitemap_urls=args.parse)
# initiate auditer object
print("Starting SEO Auditor...")
print("-----------------------")
auditer = SEOAuditor(args.url[0], site_parser, cfg.page_tests, cfg.element_tests)
auditer.run_checks_for_site()
print("-----------------------")
print("SEO Auditor finished.")
print("Results stored in: {}".format(auditer.result_filename))
if __name__ == "__main__":
main()
| 35.964912 | 112 | 0.641951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.280488 |
8c528606a3a84a2bbb3d2d39a85c6d83188013fe | 4,305 | py | Python | .circleci/scripts/chlogger.py | hackaugusto/scenario-player | 0701bb986f47e1ec4a4fb7a469157826da1993e2 | [
"MIT"
] | null | null | null | .circleci/scripts/chlogger.py | hackaugusto/scenario-player | 0701bb986f47e1ec4a4fb7a469157826da1993e2 | [
"MIT"
] | null | null | null | .circleci/scripts/chlogger.py | hackaugusto/scenario-player | 0701bb986f47e1ec4a4fb7a469157826da1993e2 | [
"MIT"
] | null | null | null | import pathlib
import re
import subprocess
from typing import List, Tuple
from constants import PROJECT_GIT_DIR, CURRENT_BRANCH, COMMIT_PATTERN, COMMIT_TYPE
def read_git_commit_history_since_tag(
tag
) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]], List[Tuple[str, str]]]:
"""Return a list of all git commit titles since the given `tag`.
If `tag` is not given, we'll use the previous tag and compare the log up
up to the current tag.
The commits are returned as three lists:
1. feature commits
2. bugfix commits
3. hotfix commits
"""
completed_process = subprocess.run(
f"git --git-dir={PROJECT_GIT_DIR} log {tag}..master --format=%s".split(" "),
check=True,
stdout=subprocess.PIPE,
)
titles = completed_process.stdout.decode("UTF-8").split("\n")
# The body of a commit may include newline characters, so we need to specify
# a custom separator to indicate the end of the commit body.
separator = "<><><>"
completed_process = subprocess.run(
f"git --git-dir={PROJECT_GIT_DIR} log {tag}..master --format=%b{separator}".split(" "),
check=True,
stdout=subprocess.PIPE,
)
bodies = completed_process.stdout.decode("UTF-8").split(separator)
assert len(titles) == len(bodies)
pattern = re.compile(COMMIT_PATTERN)
feats, fixes, hotfixes = [], [], []
for title, body in zip(titles, bodies):
match = pattern.match(title)
if not match:
continue
commit_type = match.groupdict()["TYPE"]
if commit_type == "FEAT":
feats.append((title, body))
elif commit_type == "FIX":
fixes.append((title, body))
elif commit_type == "HOTFIX":
hotfixes.append((title, body))
else:
print(f"No type found, skipping commit '{title}'..")
return feats, fixes, hotfixes
def format_commits(commits: List[Tuple[str, str]]) -> List[str]:
"""Format the given commits for writing to the Changelog.
The expected input Tuple[str, str] format is:
([(FEAT|FIX|HOTFIX)-#123] <Subject>, <Optional body with further details on the commit.>)
The output format is as follows::
r'- #123 <Subject>\n <Optional body with further details on the commit.>\n'
Newlines in the body are honored, and each line indented by 4 spaces automatically.
TODO: Duplicate Issues should share a single Changelog Entry.
"""
if not commits:
return []
pattern = re.compile(COMMIT_PATTERN)
formatted = set()
for title, body in commits:
match = pattern.match(title)
issue, subject = match.groupdict()["ISSUE"], match.groupdict()["SUBJECT"]
entry = f"- {issue} {subject}\n"
if body:
# Make sure the body is indented by 8 spaces.
formatted_body = " ".join(body.split("\n"))
entry += f"{formatted_body}\n"
formatted.add(entry)
return sorted(formatted)
def update_chlog(
tag: str,
feats: List[str],
fixes: List[str],
hotfixes: List[str],
chlog_path: pathlib.Path = pathlib.Path("CHANGELOG.rst"),
):
try:
history = chlog_path.read_text()
except FileNotFoundError:
print("No Changelog file found - creating a new one.")
history = ""
chlog_entry = f"RELEASE {tag}\n=============\n\n"
if feats:
feats = "\n".join(feats)
chlog_entry += f"Features\n--------\n{feats}\n"""
if fixes:
fixes = "\n".join(fixes)
chlog_entry += f"Fixes\n-----\n{fixes}\n"
if hotfixes:
hotfixes = "\n".join(hotfixes)
chlog_entry += f"Hotfixes\n--------\n{hotfixes}\n"
chlog_path.write_text(f"{chlog_entry}\n{history}")
def make_chlog(chlog_path, new_version):
feats, fixes, hotfixes = read_git_commit_history_since_tag(new_version)
update_chlog(
"0.4.0", format_commits(feats), format_commits(fixes), format_commits(hotfixes), chlog_path
)
subprocess.run(f"git --git-dir={PROJECT_GIT_DIR} add {chlog_path}".split(" "), check=True)
subprocess.run(
f"git --git-dir={PROJECT_GIT_DIR} commit {chlog_path} -m".split(" ") + ['"Update Changelog."'],
check=True
)
| 31.195652 | 103 | 0.617189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,671 | 0.388153 |
8c549c95fea72c967f3e44d56696e639a9c44785 | 12,339 | py | Python | bayesian_deep_learning/libs/distribution_shift_generator.py | mandt-lab/variational-beam-search | 61f217ed6ac6fdda0123f2b3bda37fa42fb4b4c2 | [
"MIT"
] | 1 | 2022-03-16T09:50:10.000Z | 2022-03-16T09:50:10.000Z | bayesian_deep_learning/libs/distribution_shift_generator.py | mandt-lab/variational-beam-search | 61f217ed6ac6fdda0123f2b3bda37fa42fb4b4c2 | [
"MIT"
] | null | null | null | bayesian_deep_learning/libs/distribution_shift_generator.py | mandt-lab/variational-beam-search | 61f217ed6ac6fdda0123f2b3bda37fa42fb4b4c2 | [
"MIT"
] | null | null | null | import struct
import sys
import pickle
import abc
import gzip
from copy import deepcopy
import numpy as np
import cv2
from albumentations import ShiftScaleRotate, ElasticTransform, HorizontalFlip
from albumentations import VerticalFlip, Compose
# if we want to apply deterministic elastic transformations, use the following
# API and set the numpy random number generator
from albumentations.augmentations.functional import elastic_transform
import tensorflow as tf
import random
class LongShiftScaleRotateTransformedGenerator(abc.ABC):
"""Abstract class with sequential transformation declared.
After initiated with specific dataset, it can generates batches of examples
with declared transformations.
"""
@property
@abc.abstractmethod
def X_train(self):
pass
@property
@abc.abstractmethod
def Y_train(self):
pass
@property
@abc.abstractmethod
def X_test(self):
pass
@property
@abc.abstractmethod
def Y_test(self):
pass
@property
@abc.abstractmethod
def out_dim(self):
# Total number of unique classes
pass
def __init__(self, rng=None, changerate=1, max_iter=10, task_size=2048,
validation=False):
self.max_iter = max_iter
self.cur_iter = 0
if rng is None:
rng = np.random.RandomState(1234)
self.rng = rng
self.validation = validation
change_pos = list(range(0, max_iter+1, changerate))
change_pos = change_pos[1:]
# at these indices the dataset will be permuted
self.switch_points = [j for j in change_pos if j <= self.max_iter]
self.tasks_to_test = [0] + self.switch_points
self.examples_per_iter = task_size # 1 # 2048 # 10000 # 2048 # 1024
# for demonstrations
scale_limits, rotate_limits, shift_limits = [], [], []
# First task is without transformations
self.transformers = [None]
for i, _ in enumerate(self.switch_points):
scale_limit = self.rng.normal(0, 0.3)
# rotate_limit = self.rng.uniform(-180, 180) # -180~180
rotate_limit = self.rng.normal(0, 10) # -30~30
shift_limit = self.rng.choice([-1, 1]) * self.rng.beta(1, 10)
scale_limits.append(scale_limit)
rotate_limits.append(rotate_limit)
shift_limits.append(shift_limit)
ssr = ShiftScaleRotate(
shift_limit=(shift_limit, shift_limit),
scale_limit=(scale_limit, scale_limit),
rotate_limit=(rotate_limit, rotate_limit),
border_mode=cv2.BORDER_CONSTANT,
value=0.0,
p=1.0,
)
pipe = ssr
self.transformers.append(pipe)
# First task is (unpermuted) MNIST, subsequent tasks are random
# permutations of pixels
self.perm_indices = [list(range(self.X_train.shape[1]))]
for i, _ in enumerate(self.switch_points):
perm_inds = list(range(self.X_train.shape[1]))
self.rng.shuffle(perm_inds)
self.perm_indices.append(perm_inds)
# make sure they are different permutations
assert(len(set(tuple(perm_inds) for perm_inds in self.perm_indices))
== len(self.perm_indices))
self.idx_map = {}
self.batch_indices = []
last_switch_point = 0
for i, switch_point in enumerate((self.switch_points
+ [self.max_iter])):
batch_inds = list(range(self.X_train.shape[0]))
self.rng.shuffle(batch_inds)
batch_inds = np.tile(batch_inds, 2) # for repetition
for j in range(last_switch_point, switch_point):
self.idx_map[j] = i
# deal with repetition
lbd = (j-last_switch_point)*self.examples_per_iter
ubd = (j-last_switch_point+1)*self.examples_per_iter
redundant_len = ((lbd//self.X_train.shape[0])
* self.X_train.shape[0])
# update lower and upper bound
lbd = lbd - redundant_len
ubd = ubd - redundant_len
self.batch_indices.append(batch_inds[lbd:ubd])
last_switch_point = switch_point
# np.save('./transform_params.npy',
# np.asarray([scale_limits, rotate_limits, shift_limits]))
def get_dims(self):
# Get data input and output dimensions
return self.X_train.shape[1], self.out_dim
def transform(self, transformer, images):
'''
Parameters:
transformer - transformation taken from `albumentations'
images - numpy array of shape (?, height*width) and assume
height==width for MNIST
'''
if transformer is None:
# do not transform
return images
else:
res_images = []
for image in images:
image = transformer(image=image)['image']
res_images.append(image)
return np.asarray(res_images)
def next_task(self):
if self.cur_iter >= self.max_iter:
raise Exception('Number of tasks exceeded!')
else:
transformer = self.transformers[self.idx_map[self.cur_iter]]
batch_inds = self.batch_indices[self.cur_iter]
# Retrieve train data
next_x_train = self.transform(
transformer,
deepcopy(self.X_train[batch_inds, ...])
)
next_y_train = self.Y_train[batch_inds]
# Retrieve test data
next_x_test = self.transform(
transformer,
deepcopy(self.X_test)
)
next_y_test = self.Y_test
if self.validation:
# use first 5000 images as validation set
next_x_test = next_x_test[:5000]
next_y_test = next_y_test[:5000]
print("Use first 5000 test images as validation set.")
else:
next_x_test = next_x_test[5000:]
next_y_test = next_y_test[5000:]
self.cur_iter += 1
return next_x_train, next_y_train, next_x_test, next_y_test
def reset(self):
self.cur_iter = 0
class LongElasticTransformedGenerator(abc.ABC):
"""Abstract class with sequential transformation declared.
After initiated with specific dataset, it can generates batches of examples
with declared transformations.
"""
@property
@abc.abstractmethod
def X_train(self):
pass
@property
@abc.abstractmethod
def Y_train(self):
pass
@property
@abc.abstractmethod
def X_test(self):
pass
@property
@abc.abstractmethod
def Y_test(self):
pass
@property
@abc.abstractmethod
def out_dim(self):
# Total number of unique classes
pass
def __init__(self, rng=None, changerate=1, max_iter=10, task_size=2048):
self.max_iter = max_iter
self.cur_iter = 0
if rng is None:
rng = np.random.RandomState(1234)
self.rng = rng
change_pos = list(range(0, max_iter+1, changerate))
change_pos = change_pos[1:]
# at these indices the dataset will be permuted
self.switch_points = [j for j in change_pos if j <= self.max_iter]
self.tasks_to_test = [0] + self.switch_points
self.examples_per_iter = task_size # 1 # 2048 # 10000 # 2048 # 1024
# First task is without transformations
self.transformer_rng_seeds = [None]
for i, switch_id in enumerate(self.switch_points):
# use the step as seed
self.transformer_rng_seeds.append(switch_id)
np.save('./transform_seeds.npy', self.transformer_rng_seeds)
self.idx_map = {}
self.batch_indices = []
last_switch_point = 0
for i, switch_point in enumerate((self.switch_points
+ [self.max_iter])):
batch_inds = list(range(self.X_train.shape[0]))
self.rng.shuffle(batch_inds)
batch_inds = np.tile(batch_inds, 2) # for repetition
for j in range(last_switch_point, switch_point):
self.idx_map[j] = i
# deal with repetition
lbd = (j-last_switch_point)*self.examples_per_iter
ubd = (j-last_switch_point+1)*self.examples_per_iter
redundant_len = ((lbd//self.X_train.shape[0])
* self.X_train.shape[0])
# update lower and upper bound
lbd = lbd - redundant_len
ubd = ubd - redundant_len
self.batch_indices.append(batch_inds[lbd:ubd])
last_switch_point = switch_point
def get_dims(self):
# Get data input and output dimensions
return self.X_train.shape[1], self.out_dim
def transform(self, rng_seed, images):
'''
Parameters:
rng_seed - seed for numpy.random.RandomState(). It ensures all
images use the same deterministic transformation.
images - numpy array of shape (?, height*width) and assume
height==width for MNIST
'''
if rng_seed is None:
# do not transform
return images
else:
res_images = []
for image in images:
# reset to enable deterministic behaviour
self.rng.seed(rng_seed)
image = elastic_transform(
image,
sigma=4,
alpha=34,
alpha_affine=1,
random_state=self.rng
)
res_images.append(image)
return np.asarray(res_images)
def next_task(self):
if self.cur_iter >= self.max_iter:
raise Exception('Number of tasks exceeded!')
else:
rng_seed = self.transformer_rng_seeds[self.idx_map[self.cur_iter]]
batch_inds = self.batch_indices[self.cur_iter]
# Retrieve train data
next_x_train = self.transform(
rng_seed,
deepcopy(self.X_train[batch_inds, ...])
)
next_y_train = self.Y_train[batch_inds]
# Retrieve test data
next_x_test = self.transform(
rng_seed,
deepcopy(self.X_test)
)
next_y_test = self.Y_test
self.cur_iter += 1
return next_x_train, next_y_train, next_x_test, next_y_test
def reset(self):
self.cur_iter = 0
class LongTransformedCifar10Generator(LongShiftScaleRotateTransformedGenerator):
# load data
(x_train, y_train), (x_test, y_test) = \
tf.keras.datasets.cifar10.load_data()
x_train = x_train.astype('float32')
y_train = np.squeeze(y_train)
x_test = x_test.astype('float32')
y_test = np.squeeze(y_test)
x_train /= 255
x_test /= 255
# Define train and test data
X_train = x_train
Y_train = y_train
X_test = x_test
Y_test = y_test
# Total number of unique classes
out_dim = 10
def __init__(self, rng=None, changerate=1, max_iter=10, task_size=2048,
validation=False):
super().__init__(rng, changerate, max_iter, task_size, validation)
class LongTransformedSvhnGenerator(LongShiftScaleRotateTransformedGenerator):
# load data
(x_train, y_train), (x_test, y_test) = np.load("./dataset/svhn.npy",
allow_pickle=True)
x_train = x_train.astype('float32')
y_train = np.squeeze(y_train)
x_test = x_test.astype('float32')
y_test = np.squeeze(y_test)
x_train /= 255
x_test /= 255
# Define train and test data
X_train = x_train
Y_train = y_train
X_test = x_test
Y_test = y_test
# Total number of unique classes
out_dim = 10
def __init__(self, rng=None, changerate=1, max_iter=10, task_size=2048,
validation=False):
super().__init__(rng, changerate, max_iter, task_size, validation)
| 33.529891 | 80 | 0.591863 | 11,847 | 0.960126 | 0 | 0 | 768 | 0.062242 | 0 | 0 | 2,366 | 0.19175 |
8c54ad4c8119847e922beb1d17182b424e160a27 | 819 | py | Python | Pre-Interview Challenges/camelcase.py | Wryhder/solve-with-code | 0fd1ef4f1c46ad89d68a667b3aaa6b98c69da266 | [
"MIT"
] | null | null | null | Pre-Interview Challenges/camelcase.py | Wryhder/solve-with-code | 0fd1ef4f1c46ad89d68a667b3aaa6b98c69da266 | [
"MIT"
] | null | null | null | Pre-Interview Challenges/camelcase.py | Wryhder/solve-with-code | 0fd1ef4f1c46ad89d68a667b3aaa6b98c69da266 | [
"MIT"
] | null | null | null | # Andela
"""
Problem Statement:
Write a function called camelCase that takes a string containing a Python-like variable name,
e.g. is_prime and turns it into the corresponding Java-like camel-case variable name, i.e. isPrime.
"""
def camelCase(python_var_name):
"""
This function takes a string containing a Python-like variable name
e.g. is_prime and turns it into the corresponding Java-like camel-case variable name,
i.e. isPrime.
"""
some_list = python_var_name.split("_")
java_version = ""
for word in some_list:
if word == some_list[0] or word.isdigit() == True:
java_version += word
continue
title_case = word.title()
java_version += title_case
return java_version
# test
print camelCase("is_prime1her")
| 27.3 | 99 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.555556 |
8c54d1bb115f902bf84d6ec2fd66b3e1daa99799 | 3,248 | py | Python | neighborapp/models.py | Maureen-1998DEV/watch_Hood | 44b644dc8a5c4dfbea7a1e90ac7fe79c5dbf9abb | [
"MIT"
] | null | null | null | neighborapp/models.py | Maureen-1998DEV/watch_Hood | 44b644dc8a5c4dfbea7a1e90ac7fe79c5dbf9abb | [
"MIT"
] | null | null | null | neighborapp/models.py | Maureen-1998DEV/watch_Hood | 44b644dc8a5c4dfbea7a1e90ac7fe79c5dbf9abb | [
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# Create your models here.
class Neighborhood(models.Model):
name = models.CharField(max_length = 50)
location = models.ForeignKey('Location',on_delete = models.CASCADE,null = True)
admin = models.ForeignKey(User,on_delete = models.CASCADE)
occupants = models.IntegerField(null=True)
def __str__(self):
return self.name
def create_neighborhood(self):
self.save()
def delete_neighborhood(self):
self.delete()
@classmethod
def find_neighborhood(cls,neigborhood_id):
neighborhood = cls.objects.get(id = neigborhood_id)
return neighborhood
def update_neighborhood(self):
self.save()
def update_occupants(self):
self.occupants += 1
self.save()
class UserProfile(models.Model):
user = models.ForeignKey(User,on_delete = models.CASCADE,related_name = 'profile')
first_name = models.CharField(max_length = 50,null=True)
last_name = models.CharField(max_length = 50,null=True)
bio = models.TextField(null=True)
neighborhood = models.ForeignKey(Neighborhood,on_delete = models.CASCADE,null=True)
email = models.EmailField(max_length = 60,null=True)
profile_pic = CloudinaryField('profile/')
pub_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username
class Business(models.Model):
name = models.CharField(max_length = 60)
user = models.ForeignKey(User,on_delete = models.CASCADE,related_name = 'business_user')
description = models.CharField(max_length = 150,null=True)
neighborhood = models.ForeignKey(Neighborhood,on_delete = models.CASCADE,related_name = 'business_neighbourhood')
category = models.ForeignKey('Category',on_delete = models.CASCADE,null=True)
email = models.EmailField(max_length = 60)
def __str__(self):
return self.name
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls,business_id):
business = Business.objects.get(id = business_id)
return business
def update_business(self):
self.save()
class Post(models.Model):
title = models.CharField(max_length = 50)
content = models.TextField()
user = models.ForeignKey(User,on_delete = models.CASCADE)
neighborhood = models.ForeignKey(Neighborhood,on_delete = models.CASCADE)
type = models.CharField(max_length = 50,null=True)
pub_date = models.DateTimeField(auto_now_add=True,null=True)
def __str__(self):
return self.title
class Comment(models.Model):
comment = models.CharField(max_length = 300)
posted_on = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def save_comment(self):
self.save()
def delete_comment(self):
self.delete()
class Location(models.Model):
name = models.CharField(max_length = 40)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length = 40)
def __str__(self):
return self.name | 30.933333 | 117 | 0.703818 | 3,088 | 0.950739 | 0 | 0 | 281 | 0.086515 | 0 | 0 | 104 | 0.03202 |
8c59187ba750abcf04c6147479de2bce3f2491de | 751 | py | Python | oidc_provider/migrations/0029_auto_20190606_1218.py | omunozn/django-oidc-provider | 6f4822b56637aaa7ece92324123c26a36061d73a | [
"MIT"
] | 2 | 2018-10-05T01:17:57.000Z | 2020-10-07T21:07:20.000Z | oidc_provider/migrations/0029_auto_20190606_1218.py | omunozn/django-oidc-provider | 6f4822b56637aaa7ece92324123c26a36061d73a | [
"MIT"
] | null | null | null | oidc_provider/migrations/0029_auto_20190606_1218.py | omunozn/django-oidc-provider | 6f4822b56637aaa7ece92324123c26a36061d73a | [
"MIT"
] | 4 | 2018-10-30T14:47:12.000Z | 2020-05-06T19:11:55.000Z | # Generated by Django 2.2.2 on 2019-06-06 12:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oidc_provider', '0028_auto_20190502_1654'),
]
operations = [
migrations.AddField(
model_name='code',
name='ae',
field=models.SmallIntegerField(null=True, verbose_name='AE'),
),
migrations.AddField(
model_name='token',
name='ae',
field=models.SmallIntegerField(null=True, verbose_name='AE'),
),
migrations.AddField(
model_name='userconsent',
name='ae',
field=models.SmallIntegerField(null=True, verbose_name='AE'),
),
]
| 25.896552 | 73 | 0.573901 | 658 | 0.876165 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.182423 |
8c5aabce04e618fe212d87594fcc8b26717a506f | 806 | py | Python | infra/lib/functions/hitcounter/update/adapters.py | haandol/aws-observability-example | 562e43cca1bd7e1488144856167d4b3f10986424 | [
"MIT"
] | null | null | null | infra/lib/functions/hitcounter/update/adapters.py | haandol/aws-observability-example | 562e43cca1bd7e1488144856167d4b3f10986424 | [
"MIT"
] | null | null | null | infra/lib/functions/hitcounter/update/adapters.py | haandol/aws-observability-example | 562e43cca1bd7e1488144856167d4b3f10986424 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Protocol, Callable
from aws_lambda_powertools import Tracer
tracer = Tracer()
class UpdateTable(Protocol):
update_item: Callable
class UpdateAdapter(ABC):
@abstractmethod
def update(self, path: str) -> int:
"""return hitCount for the given path"""
class DdbUpdateAdapter(UpdateAdapter):
def __init__(self, table: UpdateTable):
self.table = table
@tracer.capture_method
def update(self, path: str) -> int:
resp = self.table.update_item(
Key={ 'PK': path },
UpdateExpression='ADD hitCount :v',
ExpressionAttributeValues={
':v': 1
},
ReturnValues='UPDATED_NEW',
)
return int(resp['Attributes']['hitCount']) | 25.1875 | 50 | 0.626551 | 664 | 0.823821 | 0 | 0 | 465 | 0.576923 | 0 | 0 | 100 | 0.124069 |
8c5ec30a972f5c57beabebc879476a07e04ac93c | 3,213 | py | Python | lib/textwin.py | tomjackbear/python-0.9.1 | 00adeddadaede51e92447523266c9d5616201c38 | [
"FSFAP"
] | 4 | 2020-07-21T09:47:52.000Z | 2022-01-05T21:43:36.000Z | lib/textwin.py | tomjackbear/python-0.9.1 | 00adeddadaede51e92447523266c9d5616201c38 | [
"FSFAP"
] | 1 | 2020-09-23T20:46:33.000Z | 2020-09-23T20:59:57.000Z | lib/textwin.py | tomjackbear/python-0.9.1 | 00adeddadaede51e92447523266c9d5616201c38 | [
"FSFAP"
] | 4 | 2020-07-13T00:45:24.000Z | 2021-09-04T14:50:46.000Z | # Module 'textwin'
# Text windows, a subclass of gwin
import stdwin
import stdwinsupport
import gwin
S = stdwinsupport # Shorthand
def fixsize(w):
docwidth, docheight = w.text.getrect()[1]
winheight = w.getwinsize()[1]
if winheight > docheight: docheight = winheight
w.setdocsize(0, docheight)
fixeditmenu(w)
def cut(w, m, id):
s = w.text.getfocustext()
if s:
stdwin.setcutbuffer(0, s)
w.text.replace('')
fixsize(w)
def copy(w, m, id):
s = w.text.getfocustext()
if s:
stdwin.setcutbuffer(0, s)
fixeditmenu(w)
def paste(w, m, id):
w.text.replace(stdwin.getcutbuffer(0))
fixsize(w)
def addeditmenu(w):
m = w.editmenu = w.menucreate('Edit')
m.action = []
m.additem('Cut', 'X')
m.action.append(cut)
m.additem('Copy', 'C')
m.action.append(copy)
m.additem('Paste', 'V')
m.action.append(paste)
def fixeditmenu(w):
m = w.editmenu
f = w.text.getfocus()
can_copy = (f[0] < f[1])
m.enable(1, can_copy)
if not w.readonly:
m.enable(0, can_copy)
m.enable(2, (stdwin.getcutbuffer(0) <> ''))
def draw(w, area): # Draw method
w.text.draw(area)
def size(w, newsize): # Size method
w.text.move((0, 0), newsize)
fixsize(w)
def close(w): # Close method
del w.text # Break circular ref
gwin.close(w)
def char(w, c): # Char method
w.text.replace(c)
fixsize(w)
def backspace(w): # Backspace method
void = w.text.event(S.we_command, w, S.wc_backspace)
fixsize(w)
def arrow(w, detail): # Arrow method
w.text.arrow(detail)
fixeditmenu(w)
def mdown(w, detail): # Mouse down method
void = w.text.event(S.we_mouse_down, w, detail)
fixeditmenu(w)
def mmove(w, detail): # Mouse move method
void = w.text.event(S.we_mouse_move, w, detail)
def mup(w, detail): # Mouse up method
void = w.text.event(S.we_mouse_up, w, detail)
fixeditmenu(w)
def activate(w): # Activate method
fixeditmenu(w)
def open(title, str): # Display a string in a window
w = gwin.open(title)
w.readonly = 0
w.text = w.textcreate((0, 0), w.getwinsize())
w.text.replace(str)
w.text.setfocus(0, 0)
addeditmenu(w)
fixsize(w)
w.draw = draw
w.size = size
w.close = close
w.mdown = mdown
w.mmove = mmove
w.mup = mup
w.char = char
w.backspace = backspace
w.arrow = arrow
w.activate = activate
return w
def open_readonly(title, str): # Same with char input disabled
w = open(title, str)
w.readonly = 1
w.char = w.backspace = gwin.nop
# Disable Cut and Paste menu item; leave Copy alone
w.editmenu.enable(0, 0)
w.editmenu.enable(2, 0)
return w
| 26.775 | 70 | 0.522876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 389 | 0.121071 |
8c5f2904d8a7ccc123c734d3345c1136cc5dcbc1 | 6,792 | py | Python | src/main/py/nlp_insights/nlp/acd/acd_to_fhir/confidence.py | LinuxForHealth/nlp-insights | c4e76fa06c448ac1d1f90f0b132971f2e1758af8 | [
"Apache-2.0"
] | null | null | null | src/main/py/nlp_insights/nlp/acd/acd_to_fhir/confidence.py | LinuxForHealth/nlp-insights | c4e76fa06c448ac1d1f90f0b132971f2e1758af8 | [
"Apache-2.0"
] | null | null | null | src/main/py/nlp_insights/nlp/acd/acd_to_fhir/confidence.py | LinuxForHealth/nlp-insights | c4e76fa06c448ac1d1f90f0b132971f2e1758af8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for extracting confidences from ACD insights
An ACD confidence score has a direction associated with it.
For example a diagnosis explicit confidence score would be high for a
statement such as "The patient has cancer",
but low low for a statement such as "suspect cancer" or "could be cancer".
The later two examples would have a high suspected confidence score.
Although there are many confidence scores available,
nlp-insights only uses a subset of them that are believed to be
interesting for our examples and use cases.
The choice of confidence scores that are interesting is somewhat related
to the choice of ACD attributes that are used. For example a 'patient reported'
confidence score is likely to be high for a 'PatientReportedCondition' attribute.
If more attributes are added or removed, including additional confidence scores
may become valuable.
"""
from typing import Optional, List
from ibm_whcs_sdk.annotator_for_clinical_data.annotator_for_clinical_data_v1 import (
InsightModelData,
)
from nlp_insights.fhir.code_system import acd_scoring_method
from nlp_insights.fhir.insight_builder import InsightConfidenceBuilder, ConfidenceMethod
def get_diagnosis_usage_explicit(
insight_model_data: InsightModelData,
) -> Optional[InsightConfidenceBuilder]:
"""Returns a builder for the diagnosis usage explicit confidence extension, if there is one.
This score is likely to be high for statements such as:
* The patient was diagnosed with diabetes
But low for variations like:
* The patient reports that he has diabetes
* The patient's brother has diabetes
* We suspect that the patient has diabetes
"""
try:
explicit_score = insight_model_data.diagnosis.usage.explicit_score
except AttributeError:
return None
return InsightConfidenceBuilder(
ConfidenceMethod(
acd_scoring_method.SCORING_METHOD_ACD_CODE_SYSTEM,
acd_scoring_method.DIAGNOSIS_EXPLICIT_SCORE,
),
explicit_score,
"Explicit Score",
)
def get_diagnosis_usage_patient_reported(
insight_model_data: InsightModelData,
) -> Optional[InsightConfidenceBuilder]:
"""Returns a builder for the diagnosis usage patient reported confidence extension.
This score is likely to be high for statements such as:
* The patient reports that she has diabetes
But low for variations like:
* The patient was diagnosed with diabetes
* The patient's sister has diabetes
* The patient might have diabetes
"""
try:
patient_reported_score = (
insight_model_data.diagnosis.usage.patient_reported_score
)
except AttributeError:
return None
return InsightConfidenceBuilder(
ConfidenceMethod(
acd_scoring_method.SCORING_METHOD_ACD_CODE_SYSTEM,
acd_scoring_method.DIAGNOSIS_PATIENT_REPORTED_SCORE,
),
patient_reported_score,
"Patient Reported Score",
)
def get_derived_condition_confidences(
insight_model_data: InsightModelData,
) -> List[InsightConfidenceBuilder]:
"""Returns confidences for a derived condition
Args: insight_model_data - model data from the attribute's concept
Returns: a list of builders, or empty list if confidences could not be computed.
"""
if not insight_model_data:
return []
confidence_list = []
conf = get_diagnosis_usage_explicit(insight_model_data)
if conf:
confidence_list.append(conf)
conf = get_diagnosis_usage_patient_reported(insight_model_data)
if conf:
confidence_list.append(conf)
return confidence_list
def get_medication_taken_confidence(
insight_model_data: InsightModelData,
) -> Optional[InsightConfidenceBuilder]:
"""Returns a builder for the medication take confidence, if the confidence exists
This score is likely to be high for statements such as:
* The patient is taking aspirin
But low for variations like:
* The patient considered taking aspirin
"""
try:
taken_score = insight_model_data.medication.usage.taken_score
except AttributeError:
return None
return InsightConfidenceBuilder(
ConfidenceMethod(
acd_scoring_method.SCORING_METHOD_ACD_CODE_SYSTEM,
acd_scoring_method.MEDICATION_TAKEN_SCORE,
),
taken_score,
"Medication Taken Score",
)
def get_derived_medication_confidences(
insight_model_data: InsightModelData,
) -> List[InsightConfidenceBuilder]:
"""Returns confidences for a derived medication
Args: insight_model_data - model data from the attribute's concept
Returns: a list of confidence builders, or empty list if confidences could not be computed.
"""
if not insight_model_data:
return []
confidence_list = []
conf = get_medication_taken_confidence(insight_model_data)
if conf:
confidence_list.append(conf)
return confidence_list
def get_derived_ae_confidences(
insight_model_data: InsightModelData,
) -> List[InsightConfidenceBuilder]:
"""Returns confidences for a derived medication adverse event
Args: insight_model_data - model data from the attribute's concept
Returns: a list of confidence builders, or empty list if confidences could not be computed.
"""
if not insight_model_data:
return []
confidence_list = []
conf = get_ae_taken_confidence(insight_model_data)
if conf:
confidence_list.append(conf)
return confidence_list
def get_ae_taken_confidence(
insight_model_data: InsightModelData,
) -> Optional[InsightConfidenceBuilder]:
"""Returns a builder for the adverse event score confidence, if the confidence exists
"""
try:
ae_score = insight_model_data.medication.adverseEvent.get("score", 0.0)
except AttributeError:
return None
return InsightConfidenceBuilder(
ConfidenceMethod(
acd_scoring_method.SCORING_METHOD_ACD_CODE_SYSTEM,
acd_scoring_method.ADVERSE_EVENT_SCORE,
),
ae_score,
"Adverse Event Score",
)
| 33.623762 | 96 | 0.732479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,358 | 0.494405 |
8c5f2bede9cc06a4686640d4cb2fefeae8c02adc | 9,876 | py | Python | dassh/correlations/flowsplit_ctd.py | khurrumsaleem/dassh | 8823e4b5256975a375391787558e5b6aba816251 | [
"BSD-3-Clause"
] | 11 | 2021-08-12T17:08:37.000Z | 2021-12-09T22:35:48.000Z | dassh/correlations/flowsplit_ctd.py | khurrumsaleem/dassh | 8823e4b5256975a375391787558e5b6aba816251 | [
"BSD-3-Clause"
] | 3 | 2021-11-24T21:15:36.000Z | 2022-03-25T14:00:52.000Z | dassh/correlations/flowsplit_ctd.py | khurrumsaleem/dassh | 8823e4b5256975a375391787558e5b6aba816251 | [
"BSD-3-Clause"
] | 2 | 2021-08-23T08:00:55.000Z | 2021-09-16T02:26:59.000Z | ########################################################################
# Copyright 2021, UChicago Argonne, LLC
#
# Licensed under the BSD-3 License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a
# copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
########################################################################
"""
date: 2021-08-12
author: matz
Cheng-Todreas correlation for flow split (1986)
"""
########################################################################
import numpy as np
from . import friction_ctd as ctd
applicability = ctd.applicability
########################################################################
# MODULE-WIDE CONSTANTS
_GAMMA = 1 / 3.0
_M = ctd._m
_EXP1 = {}
_EXP2 = {}
for regime in ctd._m.keys():
_EXP1[regime] = (1 + ctd._m[regime]) / (2 - ctd._m[regime])
_EXP2[regime] = 1 / (2 - ctd._m[regime])
########################################################################
def calculate_flow_split(asm_obj, regime=None, beta=1.0):
"""Calculate the flow split into the different types of
subchannels based on the Cheng-Todreas model
Parameters
----------
asm_obj : DASSH Assembly object
Contains the geometric description of the assembly
regime : str or NoneType
Indicate flow regime for which to calculate flow split
{'turbulent', 'laminar', None}; default = None
beta : float
Beta is a factor used to combine the laminar and turbulent
flowpslit terms in the transition region. It comes from
Cheng's 1984 thesis in which he recommends a value of
0.05. There, Figure 4.19 shows the edge flowsplit assuming
beta=0.05. However, in reality beta=0.05 gives weird results
and beta=1.0 matches what's shown in the figure. Therefore,
it'set to 1.0 here by default.
Returns
-------
numpy.ndarray
Flow split between interior, edge, and corner coolant
subchannels
"""
try:
Re_bnds = asm_obj.corr_constants['fs']['Re_bnds']
except (KeyError, AttributeError):
Re_bnds = ctd.calculate_Re_bounds(asm_obj)
try:
Cf = asm_obj.corr_constants['fs']['Cf_sc']
except (KeyError, AttributeError):
Cf = ctd.calculate_subchannel_friction_factor_const(asm_obj)
if regime is not None:
return _calculate_flow_split(asm_obj, Cf, regime, Re_bnds, beta=beta)
elif asm_obj.coolant_int_params['Re'] <= Re_bnds[0]:
return _calculate_flow_split(asm_obj, Cf, 'laminar')
elif asm_obj.coolant_int_params['Re'] >= Re_bnds[1]:
return _calculate_flow_split(asm_obj, Cf, 'turbulent')
else:
return _calculate_flow_split(asm_obj, Cf, 'transition', Re_bnds, beta)
def _calculate_flow_split(asm_obj, Cf_dict, regime, Re_bnds=None, beta=1.0):
"""Worker function to calculate the flow split into the
different types of subchannels based on the Cheng-Todreas
model.
Parameters
----------
asm_obj : DASSH Assembly object
Contains the geometric description of the assembly
Cf_dict : dict
Dictionary containing subchannel friction factor constants;
keys: ['laminar', 'turbulent']
regime : str {'laminar', 'turbulent', 'transition'}
Flow regime with which to evaluate flow split ratios
Re_bnds : list (optional)
Reynolds number flow regime boundaries for calculating
intermittency factor in transition regime
beta : float
Beta is a factor used to combine the laminar and turbulent
flowpslit terms in the transition region. It comes from
Cheng's 1984 thesis in which he recommends a value of
0.05. There, Figure 4.19 shows the edge flowsplit assuming
beta=0.05. However, in reality beta=0.05 gives weird results
and beta=1.0 matches what's shown in the figure. Therefore,
it'set to 1.0 here by default.
Returns
-------
numpy.ndarray
Flow split between interior, edge, and corner coolant
subchannels
Notes
-----
This method is imported by the flow split model in the
Upgraded Cheng-Todreas correlation (flowsplit_uctd)
"""
if regime == 'transition':
try:
na = asm_obj.corr_constants['fs']['na']
except (KeyError, AttributeError):
na = [asm_obj.subchannel.n_sc['coolant']['interior']
* asm_obj.params['area'][0],
asm_obj.subchannel.n_sc['coolant']['edge']
* asm_obj.params['area'][1],
asm_obj.subchannel.n_sc['coolant']['corner']
* asm_obj.params['area'][2]]
flow_split = np.zeros(3)
intf_b = ctd.calc_intermittency_factor(
asm_obj, Re_bnds[0], Re_bnds[1])
xratio_t = asm_obj.corr_constants['fs']['xr']['transition'].copy()
xratio_t[0] = (xratio_t[0]
* (1 - intf_b)**_GAMMA
/ asm_obj.coolant_int_params['Re'])
xratio_t[1] = (xratio_t[1]
* intf_b**_GAMMA
/ asm_obj.coolant_int_params['Re']**_M['turbulent']
)**_EXP2['turbulent']
# xratio = xratio_t1 + beta * xratio_t2
xratio = xratio_t[0] + beta * xratio_t[1]
x1x2 = xratio[1] / xratio[0] # Equation 4.51 in Cheng 1984
x3x2 = xratio[1] / xratio[2] # Equation 4.51 in Cheng 1984
flow_split[1] = (asm_obj.bundle_params['area']
/ (na[1] + x1x2 * na[0] + x3x2 * na[2]))
flow_split[0] = x1x2 * flow_split[1]
flow_split[2] = x3x2 * flow_split[1]
else:
flow_split = asm_obj.corr_constants['fs']['fs'][regime]
# x1x2 = asm_obj.corr_constants['fs']['xr'][regime][0]
# x3x2 = asm_obj.corr_constants['fs']['xr'][regime][1]
#
# # Flow split to subchannel type 2
# flow_split[1] = (asm_obj.bundle_params['area']
# / (na[1] + x1x2 * na[0] + x3x2 * na[2]))
# flow_split[0] = x1x2 * flow_split[1]
# flow_split[2] = x3x2 * flow_split[1]
return flow_split
def calc_constants(asm_obj):
"""Calculate constants needed by the CTD flowsplit calculation"""
const = ctd.calc_constants(asm_obj)
del const['Cf_b']
# Total subchannel area for each subchannel type
const['na'] = [asm_obj.subchannel.n_sc['coolant']['interior']
* asm_obj.params['area'][0],
asm_obj.subchannel.n_sc['coolant']['edge']
* asm_obj.params['area'][1],
asm_obj.subchannel.n_sc['coolant']['corner']
* asm_obj.params['area'][2]]
# REGIME RATIO CONSTANTS
const['xr'] = _calc_regime_ratio_constants(asm_obj, const['Cf_sc'])
# # Transition regime
# const['xr'] = {}
# const['xr']['transition'] = np.array([
# (const['Cf_sc']['laminar']
# * asm_obj.bundle_params['de']
# / asm_obj.params['de']**2),
# (const['Cf_sc']['turbulent']
# * asm_obj.bundle_params['de']**_M['turbulent']
# / asm_obj.params['de']**(_M['turbulent'] + 1))
# ])
#
# # Laminar/turbulent regime
# for k in ['laminar', 'turbulent']:
# const['xr'][k] = np.array([
# ((asm_obj.params['de'][0] / asm_obj.params['de'][1])**_EXP1[k]
# * (const['Cf_sc'][k][1] / const['Cf_sc'][k][0])**_EXP2[k]),
# ((asm_obj.params['de'][2] / asm_obj.params['de'][1])**_EXP1[k]
# * (const['Cf_sc'][k][1] / const['Cf_sc'][k][2])**_EXP2[k])
# ])
# Laminar/turbulent: constant flow split!
const['fs'] = _calc_constant_flowsplits(asm_obj, const)
# const['fs'] = {}
# for k in ['laminar', 'turbulent']:
# const['fs'][k] = np.zeros(3)
# const['fs'][k][1] = (asm_obj.bundle_params['area']
# / (const['na'][1]
# + const['xr'][k][0] * const['na'][0]
# + const['xr'][k][1] * const['na'][2]))
# const['fs'][k][0] = const['xr'][k][0] * const['fs'][k][1]
# const['fs'][k][2] = const['xr'][k][1] * const['fs'][k][1]
return const
def _calc_regime_ratio_constants(asm_obj, Cf_sc):
"""Constant ratios for laminar, turbulent, and transition regimes"""
xr = {}
xr['transition'] = np.array([
(Cf_sc['laminar']
* asm_obj.bundle_params['de']
/ asm_obj.params['de']**2),
(Cf_sc['turbulent']
* asm_obj.bundle_params['de']**_M['turbulent']
/ asm_obj.params['de']**(_M['turbulent'] + 1))
])
# Laminar/turbulent regime
for k in ['laminar', 'turbulent']:
xr[k] = np.array([
((asm_obj.params['de'][0] / asm_obj.params['de'][1])**_EXP1[k]
* (Cf_sc[k][1] / Cf_sc[k][0])**_EXP2[k]),
((asm_obj.params['de'][2] / asm_obj.params['de'][1])**_EXP1[k]
* (Cf_sc[k][1] / Cf_sc[k][2])**_EXP2[k])
])
return xr
def _calc_constant_flowsplits(asm_obj, const):
"""Laminar and turbulent flowsplits are constant"""
fs = {}
for k in ['laminar', 'turbulent']:
fs[k] = np.zeros(3)
fs[k][1] = (asm_obj.bundle_params['area']
/ (const['na'][1]
+ const['xr'][k][0] * const['na'][0]
+ const['xr'][k][1] * const['na'][2]))
fs[k][0] = const['xr'][k][0] * fs[k][1]
fs[k][2] = const['xr'][k][1] * fs[k][1]
return fs
| 38.578125 | 78 | 0.562576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,743 | 0.581511 |
8c6005532028b79259ef2ccbde6cb3de137f6931 | 13,037 | py | Python | nexthop_summary.py | dalekirkman1/SecureCRT-tools | fd2e60cb0ec561da5c900a6396993c114d925c5a | [
"Apache-2.0"
] | 2 | 2021-03-18T05:14:24.000Z | 2022-03-30T08:54:49.000Z | nexthop_summary.py | dalekirkman1/SecureCRT-tools | fd2e60cb0ec561da5c900a6396993c114d925c5a | [
"Apache-2.0"
] | null | null | null | nexthop_summary.py | dalekirkman1/SecureCRT-tools | fd2e60cb0ec561da5c900a6396993c114d925c5a | [
"Apache-2.0"
] | 1 | 2021-02-18T23:46:22.000Z | 2021-02-18T23:46:22.000Z | # $language = "python"
# $interface = "1.0"
# ################################################ SCRIPT INFO ###################################################
# Author: Jamie Caesar
# Email: jcaesar@presidio.com
#
# This script will grab the route table information from a Cisco IOS or NXOS device and export details about each
# next-hop address (how many routes and from which protocol) into a CSV file. It will also list all connected networks
# and give a detailed breakdown of every route that goes to each next-hop.
#
#
# ################################################ SCRIPT SETTING ###################################################
#
# Global settings that affect all scripts (output directory, date format, etc) is stored in the "global_settings.json"
# file in the "settings" directory.
#
# If any local settings are used for this script, they will be stored in the same settings folder, with the same name
# as the script that uses them, except ending with ".json".
#
# All settings can be manually modified in JSON format (the same syntax as Python lists and dictionaries). Be aware of
# required commas between items, or else options are likely to get run together and break the script.
#
# **IMPORTANT** All paths saved in .json files must contain either forward slashes (/home/jcaesar) or
# DOUBLE back-slashes (C:\\Users\\Jamie). Single backslashes will be considered part of a control character and will
# cause an error on loading.
#
# ################################################ IMPORTS ###################################################
import os
import sys
import logging
# If the "crt" object exists, this is being run from SecureCRT. Get script directory so we can add it to the
# PYTHONPATH, which is needed to import our custom modules.
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
os.chdir(script_dir)
# Now we can import our custom modules
import securecrt_tools.sessions as sessions
import securecrt_tools.settings as settings
import securecrt_tools.utilities as utils
import securecrt_tools.ipaddress as ipaddress
# ################################################ LOAD SETTINGS ###################################################
session_set_filename = os.path.join(script_dir, "settings", settings.global_settings_filename)
session_settings = settings.SettingsImporter(session_set_filename, settings.global_defs)
# Set logger variable -- this won't be used unless debug setting is True
logger = logging.getLogger("securecrt")
# ################################################ SCRIPT ###################################################
def update_empty_interfaces(route_table):
"""
Takes the routes table as a list of dictionaries (with dict key names used in parse_routes function) and does
recursive lookups to find the outgoing interface for those entries in the route-table where the outgoing interface
isn't listed.
:param route_table: <list> A list of dictionaries - specifically with the keys 'network', 'protocol', 'nexthop'
and 'interface
:return: The updated route_table object with outbound interfaces filled in.
"""
def recursive_lookup(nexthop):
for network in connected:
if nexthop in network:
return connected[network]
for network in statics:
if nexthop in network:
return recursive_lookup(statics[network])
return None
logger.debug("STARTING update_empty_interfaces")
connected = {}
unknowns = {}
statics = {}
for route in route_table:
if route['protocol'] == 'connected':
connected[route['network']] = route['interface']
if route['protocol'] == 'static':
if route['nexthop']:
statics[route['network']] = route['nexthop']
if route['nexthop'] and not route['interface']:
unknowns[route['nexthop']] = None
for nexthop in unknowns:
unknowns[nexthop] = recursive_lookup(nexthop)
for route in route_table:
if not route['interface']:
if route['nexthop'] in unknowns:
route['interface'] = unknowns[route['nexthop']]
logger.debug("ENDING update_empty_interfaces")
def parse_routes(fsm_routes):
"""
This function will take the TextFSM parsed route-table from the `textfsm_parse_to_dict` function. Each dictionary
in the TextFSM output represents a route entry. Each of these dictionaries will be updated to convert IP addresses
into ip_address or ip_network objects (from the ipaddress.py module). Some key names will also be updated also.
:param fsm_routes: <list of dicts> TextFSM output from the `textfsm_parse_to_dict` function.
:return: <list of dicts> An updated list of dictionaries that replaces IP address strings with objects from the
ipaddress.py module from Google.
"""
logger.debug("STARTING parse_routes function.")
complete_table = []
for route in fsm_routes:
new_entry = {}
logger.debug("Processing route entry: {0}".format(str(route)))
new_entry['network'] = ipaddress.ip_network(u"{0}/{1}".format(route['NETWORK'], route['MASK']))
new_entry['protocol'] = utils.normalize_protocol(route['PROTOCOL'])
if route['NEXTHOP_IP'] == '':
new_entry['nexthop'] = None
else:
new_entry['nexthop'] = ipaddress.ip_address(unicode(route['NEXTHOP_IP']))
if route["NEXTHOP_IF"] == '':
new_entry['interface'] = None
else:
new_entry['interface'] = route['NEXTHOP_IF']
# Nexthop VRF will only occur in NX-OS route tables (%vrf-name after the nexthop)
if 'NEXTHOP_VRF' in route:
if route['NEXTHOP_VRF'] == '':
new_entry['vrf'] = None
else:
new_entry['vrf'] = route['NEXTHOP_VRF']
logger.debug("Adding updated route entry '{0}' based on the information: {1}".format(str(new_entry),
str(route)))
complete_table.append(new_entry)
update_empty_interfaces(complete_table)
logger.debug("ENDING parse_route function")
return complete_table
def nexthop_summary(textfsm_dict):
"""
A function that builds a CSV output (list of lists) that displays the summary information after analyzing the
input route table.
:param textfsm_dict:
:return:
"""
# Identify connected or other local networks -- most found in NXOS to exlude from next-hops. These are excluded
# from the nexthop summary (except connected has its own section in the output).
logger.debug("STARTING nexthop_summary function")
local_protos = ['connected', 'local', 'hsrp', 'vrrp', 'glbp']
# Create a list of all dynamic protocols from the provided route table. Add total and statics to the front.
proto_list = []
for entry in textfsm_dict:
if entry['protocol'] not in proto_list and entry['protocol'] not in local_protos:
logger.debug("Found protocol '{0}' in the table".format(entry['protocol']))
proto_list.append(entry['protocol'])
proto_list.sort(key=utils.human_sort_key)
proto_list.insert(0, 'total')
proto_list.insert(0, 'interface')
# Create dictionaries to store summary information as we process the route table.
summary_table = {}
connected_table = {}
detailed_table = {}
# Process the route table to populate the above 3 dictionaries.
for entry in textfsm_dict:
logger.debug("Processing route: {0}".format(str(entry)))
# If the route is connected, local or an FHRP entry
if entry['protocol'] in local_protos:
if entry['protocol'] == 'connected':
if entry['interface'] not in connected_table:
connected_table[entry['interface']] = []
connected_table[entry['interface']].append(str(entry['network']))
else:
if entry['nexthop']:
if 'vrf' in entry and entry['vrf']:
nexthop = "{0}%{1}".format(entry['nexthop'], entry['vrf'])
else:
nexthop = str(entry['nexthop'])
elif entry['interface'].lower() == "null0":
nexthop = 'discard'
if nexthop not in summary_table:
# Create an entry for this next-hop, containing zero count for all protocols.
summary_table[nexthop] = {}
summary_table[nexthop].update(zip(proto_list, [0] * len(proto_list)))
summary_table[nexthop]['interface'] = entry['interface']
# Increment total and protocol specific count
summary_table[nexthop][entry['protocol']] += 1
summary_table[nexthop]['total'] += 1
if nexthop not in detailed_table:
detailed_table[nexthop] = []
detailed_table[nexthop].append((str(entry['network']), entry['protocol']))
# Convert summary_table into a format that can be printed to the CSV file.
output = []
header = ["Nexthop", "Interface", "Total"]
header.extend(proto_list[2:])
output.append(header)
summary_keys = sorted(summary_table.keys(), key=utils.human_sort_key)
for key in summary_keys:
line = [key]
for column in proto_list:
line.append(summary_table[key][column])
output.append(line)
output.append([])
# Convert the connected_table into a format that can be printed to the CSV file (and append to output)
output.append([])
output.append(["Connected:"])
output.append(["Interface", "Network(s)"])
connected_keys = sorted(connected_table.keys(), key=utils.human_sort_key)
for key in connected_keys:
line = [key]
for network in connected_table[key]:
line.append(network)
output.append(line)
output.append([])
# Convert the detailed_table into a format that can be printed to the CSV file (and append to output)
output.append([])
output.append(["Route Details"])
output.append(["Nexthop", "Network", "Protocol"])
detailed_keys = sorted(detailed_table.keys(), key=utils.human_sort_key)
for key in detailed_keys:
for network in detailed_table[key]:
line = [key]
line.extend(list(network))
output.append(line)
output.append([])
# Return the output, ready to be sent to directly to a CSV file
logger.debug("ENDING nexthop_summary function")
return output
def script_main(session):
supported_os = ["IOS", "NXOS"]
if session.os not in supported_os:
logger.debug("Unsupported OS: {0}. Exiting program.".format(session.os))
session.message_box("{0} is not a supported OS for this script.".format(session.os), "Unsupported OS",
options=sessions.ICON_STOP)
return
else:
send_cmd = "show ip route"
selected_vrf = session.prompt_window("Enter the VRF name. (Leave blank for default VRF)")
if selected_vrf != "":
send_cmd = send_cmd + " vrf {0}".format(selected_vrf)
session.hostname = session.hostname + "-VRF-{0}".format(selected_vrf)
logger.debug("Received VRF: {0}".format(selected_vrf))
raw_routes = session.get_command_output(send_cmd)
if session.os == "IOS":
template_file = "textfsm-templates/cisco_ios_show_ip_route.template"
else:
template_file = "textfsm-templates/cisco_nxos_show_ip_route.template"
fsm_results = utils.textfsm_parse_to_dict(raw_routes, template_file)
route_list = parse_routes(fsm_results)
output_filename = session.create_output_filename("nexthop-summary", ext=".csv")
output = nexthop_summary(route_list)
utils.list_of_lists_to_csv(output, output_filename)
# Clean up before closing session
session.end()
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, create our session object using the "crt" object provided by SecureCRT
if __name__ == "__builtin__":
# Create a session object for this execution of the script and pass it to our main() function
crt_session = sessions.CRTSession(crt, session_settings)
script_main(crt_session)
# Else, if this script is run directly then create a session object without the SecureCRT API (crt object) This would
# be done for debugging purposes (running the script outside of SecureCRT and feeding it the output it failed on)
elif __name__ == "__main__":
direct_session = sessions.DirectSession(os.path.realpath(__file__), session_settings)
script_main(direct_session) | 43.312292 | 119 | 0.635192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,555 | 0.502723 |
8c61ef6649f0e4d09559b96c6697fc05d18d8b67 | 2,495 | py | Python | find_str_in_dump_bin.py | jasonivey/scripts | 09f9702e5ce62abbb7699aae16b45b33711fe856 | [
"MIT"
] | null | null | null | find_str_in_dump_bin.py | jasonivey/scripts | 09f9702e5ce62abbb7699aae16b45b33711fe856 | [
"MIT"
] | null | null | null | find_str_in_dump_bin.py | jasonivey/scripts | 09f9702e5ce62abbb7699aae16b45b33711fe856 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import re
import fnmatch
import subprocess
import tempfile
import Utils
def GetFiles(dir, filePattern):
paths = []
for root, dirs, files in os.walk(dir):
for file in files:
if fnmatch.fnmatch(os.path.join(root, file), filePattern):
paths.append(os.path.join(root, file))
return paths
def ParseArgs(args):
filePattern = None
strPattern = None
ignoreCase = False
i = 0
count = len(args)
while i < count:
arg = args[i]
if Utils.IsSwitch(arg) and arg[1:].lower().startswith('f') and i + 1 < count:
filePattern = args[i + 1]
i += 1
elif Utils.IsSwitch(arg) and arg[1:].lower().startswith('s') and i + 1 < count:
strPattern = args[i + 1]
i += 1
elif Utils.IsSwitch(arg) and arg[1:].lower().startswith('i'):
ignoreCase = True
i += 1
if 'VS90COMNTOOLS' not in os.environ:
print('ERROR: VS90COMNTOOLS not defined in the environment!')
sys.exit(1)
if not filePattern or not strPattern:
print('ERROR: Specify a file pattern and a search string with -f and -s!')
sys.exit(1)
return filePattern, strPattern, ignoreCase
def DumpBin(filename):
filehandle, tmpfilename = tempfile.mkstemp()
os.close(filehandle)
vcvars = os.path.normpath( os.path.join( os.environ['VS90COMNTOOLS'], '..', '..', 'vc', 'bin', 'vcvars32.bat' ) )
command = '"%s" && dumpbin.exe /all /out:%s %s' % (vcvars, tmpfilename, filename)
print(command)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
return tmpfilename
if __name__ == '__main__':
filePattern, strPattern, ignoreCase = ParseArgs(sys.argv)
for filename in GetFiles(os.getcwd(), filePattern):
output_file_name = DumpBin(filename)
printHeader = False
with open(output_file_name) as file:
for line in file.readlines():
match = re.search(strPattern, line, re.I if ignoreCase else 0)
if not match:
continue
if not printHeader:
printHeader = True
print(('%s:' % filename))
print(('\t%s' % line.strip('\n')))
os.remove(output_file_name)
| 31.1875 | 118 | 0.565932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.11022 |
8c62511969548c233faf8da9c034a17e2c71d334 | 2,427 | py | Python | bracex/__main__.py | moreati/bracex | c8fcfa695cbf8cea27ac561e72a232c784f18deb | [
"MIT"
] | null | null | null | bracex/__main__.py | moreati/bracex | c8fcfa695cbf8cea27ac561e72a232c784f18deb | [
"MIT"
] | null | null | null | bracex/__main__.py | moreati/bracex | c8fcfa695cbf8cea27ac561e72a232c784f18deb | [
"MIT"
] | null | null | null | """
Expands a bash-style brace expression, and outputs each expansion.
Licensed under MIT
Copyright (c) 2018 - 2020 Isaac Muse <isaacmuse@gmail.com>
Copyright (c) 2021 Alex Willmer <alex@moreati.org.uk>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import argparse
import bracex
def main(argv=None):
"""Accept command line arguments and output brace expansion to stdout."""
parser = argparse.ArgumentParser(
prog='python -m bracex',
description='Expands a bash-style brace expression, and outputs each expansion.',
allow_abbrev=False,
)
parser.add_argument(
'expression',
help="Brace expression to expand",
)
terminators = parser.add_mutually_exclusive_group()
terminators.add_argument(
'--terminator', '-t',
default='\n',
metavar='STR',
help="Terminate each expansion with string STR (default: \\n)",
)
terminators.add_argument(
'-0',
action='store_const',
const='\0',
dest='terminator',
help="Terminate each expansion with a NUL character",
)
parser.add_argument(
'--version',
action='version',
version=bracex.__version__,
)
args = parser.parse_args(argv)
for expansion in bracex.iexpand(args.expression, limit=0):
print(expansion, end=args.terminator)
raise SystemExit(0)
if __name__ == '__main__':
main() # pragma: no cover
| 36.223881 | 113 | 0.711166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,642 | 0.676555 |
8c62d14fe132aaad38555461feeef1e612b314fa | 2,974 | py | Python | libs/ALICE/parsing_ALICE.py | EGI-Foundation/impact-report | ed521405ebfa43968e94e9a0b60379ad9bc2b931 | [
"MIT"
] | null | null | null | libs/ALICE/parsing_ALICE.py | EGI-Foundation/impact-report | ed521405ebfa43968e94e9a0b60379ad9bc2b931 | [
"MIT"
] | 1 | 2020-11-16T09:43:43.000Z | 2020-11-16T11:03:28.000Z | libs/ALICE/parsing_ALICE.py | EGI-Foundation/impact-report | ed521405ebfa43968e94e9a0b60379ad9bc2b931 | [
"MIT"
] | 1 | 2021-02-26T11:31:27.000Z | 2021-02-26T11:31:27.000Z | #!/usr/bin/env python3
import csv
import os
import requests
from bs4 import BeautifulSoup
from dateutil.parser import parse
def print_details(url, csv_filename, years):
"""
Parsing the scientific publications from the web site and
export the list in a CSV file
"""
item_year = item_journal = item_doi = item_title = ""
with open(csv_filename, "w", newline="") as csvfile:
# Header of the CSV file
fieldnames = ["Author(s)", "Year", "Title", "Journal", "DOI"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
index = 0
rn = requests.get(url)
soup = BeautifulSoup(rn.text, "lxml")
gdp_table = soup.find("table", {"class": "views-table"})
if gdp_table.findAll("tr") is not None:
for row in gdp_table.findAll("tr"):
for col in row.findAll("td"):
# Getting the title of the publication
item_title = (
col.text[0 : col.text.find("Article reference")]
).strip()
for li in row.findAll("li"):
if li.b is not None:
# Getting the Journal of the publication
if "Article reference:" in li.text:
item = li.text.split()
item_journal = item[2] + " " + item[3] + " " + item[4]
# Getting the year of the publication
if "Publication date:" in li.text:
item_year = li.text.split()[-1]
item_year = parse(item_year).year
if item_year in years:
# print(item_title)
# print(item_year)
# print(item_journal)
# print(item_doi)
writer.writerow(
{
"Author(s)": "ALICE Collaboration",
"Year": item_year,
"Title": item_title,
"Journal": item_journal,
"DOI": item_doi,
}
)
index = index + 1
def main():
print("- Parsing publications in progress...", end="")
url = (
"https://alice-publications.web.cern.ch/publications"
"?title=&field_draft_pub_date_value%5Bmin%5D="
"&field_draft_pub_date_value%5Bmax%5D=&items_per_page=100"
)
csv_filename = "publications.csv"
years = [2016, 2017, 2018, 2019]
print_details(url, csv_filename, years)
if os.stat(csv_filename).st_size > 34:
print("[OK]")
else:
print("[WARNING]")
print("No publications found in the list!")
if __name__ == "__main__":
main()
| 33.41573 | 86 | 0.481843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 839 | 0.282112 |
8c62f4bb9a7fb6c749e7188ab59f9b09cd57ecf8 | 7,699 | py | Python | ohlc.py | liam-e/wsb-tracker | c05858ff67180e1a3cbdb58a2f89ef52ff0c842f | [
"MIT"
] | 1 | 2021-01-22T02:02:30.000Z | 2021-01-22T02:02:30.000Z | ohlc.py | liam-e/wsb-tracker | c05858ff67180e1a3cbdb58a2f89ef52ff0c842f | [
"MIT"
] | null | null | null | ohlc.py | liam-e/wsb-tracker | c05858ff67180e1a3cbdb58a2f89ef52ff0c842f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import datetime as dt
import os
import sys
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
from matplotlib import style
from mplfinance.original_flavor import candlestick_ohlc
import data_loader
import sentiment_charts
os.chdir(sys.path[0])
style.use("dark_background")
mpl.rcParams.update({"grid.linestyle": "--", "grid.color": "darkgray"})
def indicator_chart(symbol, directory, start=dt.datetime(2020, 9, 1), smas=(10, 30, 50, 210), sentiment_value=None,
frequency_value=None, prefix=None):
print(f"Plotting ohlc chart for {symbol}...")
try:
start = start - dt.timedelta(days=max(smas))
now = dt.datetime.now()
df = data_loader.load_price_history(symbol, start, now)
if df is None or len(df) == 0:
print(f"Dataframe is empty for {symbol}.")
return
date_delta = df.index[-1] - df.index[0]
smas = [sma for sma in smas if sma < date_delta.days / 2]
fig, ax = plt.subplots()
fig.set_size_inches(18, 9)
for sma in smas:
df[f"SMA_{sma}"] = df["Adj Close"].rolling(window=sma).mean()
# Bollinger bands
bb_period = 15 # moving average
std_dev = 2
df[f"SMA_{bb_period}"] = df["Adj Close"].rolling(window=bb_period).mean()
df["std_dev"] = df["Adj Close"].rolling(window=bb_period).std()
df["lower_band"] = df[F"SMA_{bb_period}"] - (std_dev * df["std_dev"]) # upper Bollinger band
df["upper_band"] = df[F"SMA_{bb_period}"] + (std_dev * df["std_dev"]) # lower Bollinger band
df["Date"] = mdates.date2num(df.index)
# 10.4.4 stochastic
period = 10
K = 4
D = 4
df["rol_high"] = df["High"].rolling(window=period).max() # high of period
df["rol_low"] = df["High"].rolling(window=period).min() # low of period
df["stok"] = ((df["Adj Close"] - df["rol_low"]) / (df["rol_high"] - df["rol_low"])) * 100 # 10.1
df["K"] = df["stok"].rolling(window=K).mean() # 10.4
df["D"] = df["K"].rolling(window=D).mean() # 10.4.4
df["GD"] = df["K"].rolling(window=D).mean() # green dots
ohlc = []
df = df.iloc[max(smas):]
green_dot_date = []
green_dot = []
last_K = 0
last_D = 0
last_low = 0
last_close = 0
last_low_bb = 0
# Iterate through price history creating candlesticks and green/blue dots
for i in df.index:
candlestick = df["Date"][i], df["Open"][i], df["High"][i], df["Low"][i], df["Adj Close"][i]
ohlc.append(candlestick)
# Green dot
if df["K"][i] > df["D"][i] and last_K < last_D and last_K < 60:
if 30 in smas and df["High"][i] > df["SMA_30"][i]:
color = "chartreuse"
else:
color = "green"
plt.plot(df["Date"][i], df["High"][i], marker="o", ms=8, ls="", color=color)
plt.annotate(f"{df['High'][i]:.2f}", (df["Date"][i], df["High"][i]), fontsize=10)
green_dot_date.append(i)
green_dot.append(df["High"][i])
# Lower Bollinger Band Bounce
if ((last_low < last_low_bb) or (df["Low"][i] < df["lower_band"][i])) and (
df["Adj Close"][i] > last_close and df["Adj Close"][i] > df["lower_band"][i]) and last_K < 60:
plt.plot(df["Date"][i], df["Low"][i], marker="o", ms=8, ls="", color="deepskyblue") # plot blue dot
plt.annotate(f"{df['Low'][i]:.2f}", (df["Date"][i], df["Low"][i]), xytext=(-10, 7), fontsize=10)
# store values
last_K = df["K"][i]
last_D = df["D"][i]
last_low = df["Low"][i]
last_close = df["Adj Close"][i]
last_low_bb = df["lower_band"][i]
# Plot moving averages and BBands
sma_colors = ["cyan", "magenta", "yellow", "orange"]
for i, sma in enumerate(
smas): # This for loop calculates the EMAs for te stated periods and appends to dataframe
df[f"SMA_{sma}"].plot(label=f"{sma} SMA", color=sma_colors[i])
df["upper_band"].plot(label="Upper Band", color="dimgray", linestyle=":")
df["lower_band"].plot(label="Lower Band", color="dimgray", linestyle=":")
# plot candlesticks
candlestick_ohlc(ax, ohlc, width=0.75, colorup="w", colordown="r", alpha=0.75)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%B %d")) # change x axis back to datestamps
ax.xaxis.set_major_locator(mticker.MaxNLocator(8)) # add more x axis labels
plt.tick_params(axis="x", rotation=45) # rotate dates for readability
# Pivot Points
pivots = [] # Stores pivot values
dates = [] # Stores Dates corresponding to those pivot values
counter = 0 # Will keep track of whether a certain value is a pivot
lastPivot = 0 # Will store the last Pivot value
value_range = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
date_range = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in df.index:
current_max = max(value_range, default=0)
value = np.round(df["High"][i], 2)
value_range = value_range[1:9]
date_range = date_range[1:9]
value_range.append(value)
date_range.append(i)
if current_max == max(value_range, default=0):
counter += 1
else:
counter = 0
if counter == 5:
last_pivot = current_max
date_loc = value_range.index(last_pivot)
last_date = date_range[date_loc]
pivots.append(last_pivot)
dates.append(last_date)
timeD = dt.timedelta(days=30) # Sets length of dotted line on chart
for index in range(len(pivots)): # Iterates through pivot array
# print(str(pivots[index])+": "+str(dates[index])) #Prints Pivot, Date couple
plt.plot_date([dates[index] - (timeD * .075), dates[index] + timeD], # Plots horizontal line at pivot value
[pivots[index], pivots[index]], linestyle="--", linewidth=2, marker=",", color="chartreuse")
plt.annotate(str(pivots[index]), (mdates.date2num(dates[index]), pivots[index]), xytext=(-10, 7),
textcoords="offset points", fontsize=14, arrowprops=dict(arrowstyle="simple"))
plt.xlabel("Date") # set x axis label
plt.ylabel("Price") # set y axis label
if sentiment_value is not None and frequency_value is not None:
plt.title(f"{sentiment_charts.stock_label(symbol)} - daily indicator chart - "
f"sentiment = {sentiment_value:.2f}, frequency = {frequency_value*100:.2f}%")
else:
plt.title(f"{sentiment_charts.stock_label(symbol)} - daily indicator chart")
plt.ylim(df["Low"].min(), df["High"].max() * 1.05) # add margins
# plt.yscale("log")
plt.legend(loc="upper left")
plt.grid()
if prefix is not None:
prefix_str = f"{prefix}_"
else:
prefix_str = ""
file_path = f"public_html/finance/res/img/ohlc/{directory}"
if not os.path.exists(file_path):
os.makedirs(file_path)
plt.savefig(f"{file_path}/{prefix_str}{symbol}_ohlc.png", dpi=150)
plt.close(fig)
plt.clf()
except ValueError:
print("ValueError for " + symbol)
return
| 38.113861 | 120 | 0.561631 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,188 | 0.284193 |
8c6343dfd08cece5714c34b2c3f3052459ca0025 | 2,262 | py | Python | grapaold/layerfiles/gcomgraphand2.py | psorus/grapa | 6af343bb35c466c971ded1876e7a9d00e77cef00 | [
"MIT"
] | null | null | null | grapaold/layerfiles/gcomgraphand2.py | psorus/grapa | 6af343bb35c466c971ded1876e7a9d00e77cef00 | [
"MIT"
] | null | null | null | grapaold/layerfiles/gcomgraphand2.py | psorus/grapa | 6af343bb35c466c971ded1876e7a9d00e77cef00 | [
"MIT"
] | null | null | null | import numpy as np
import math
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer,Dense, Activation
import tensorflow.keras as keras# as k
import tensorflow as t
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam,SGD
from tensorflow.linalg import trace
def R(self,x,c_const=1000.0,cut=0.5):
return K.relu(c_const*(x-cut)+1)-K.relu(c_const*(x-c_const))
def doand(x,mode="prod",c_const=1000.0,cut=0.5):
if mode=="and":
ret=R(K.prod(x,axis=-1),c_const=c_const,cut=cut)
if mode=="prod":
ret=K.prod(x,axis=-1)
if mode=="or":
ret=R(K.sum(x,axis=-1),c_const=c_const,cut=cut)
if mode=="sum":
ret=K.sum(x,axis=-1)
return ret
class gcomgraphand2(Layer):#is capable of running "and" operations on two (?,c*gs,c*gs) graphs, resulting in (?,c*gs,c*gs)
def __init__(self,gs=20,c=2,mode="prod",cut=0.5,c_const=1000.0,**kwargs):
self.gs=gs
self.c=c
self.mode=mode
self.cut=cut
self.c_const=c_const
super(gcomgraphand2,self).__init__(**kwargs)
def build(self, input_shape):
#self.trafo=self.add_weight(name="trafo",
# shape=(self.param,self.c*self.c),
# initializer=self.initializer,
# trainable=self.trainable)
self.built=True
def call(self,q):
y=q[1]
x=q[0]
x=K.reshape(x,(-1,self.gs*self.c,self.gs*self.c,1))
y=K.reshape(y,(-1,self.gs*self.c,self.gs*self.c,1))
q=K.concatenate((x,y),axis=-1)
ret=doand(q,mode=self.mode,c_const=self.c_const,cut=self.cut)
return ret
def compute_output_shape(self,input_shape):
g_shape=input_shape[0]
g_shape2=input_shape[1]
assert len(g_shape)==3
assert g_shape[1]==self.gs*self.c
assert g_shape[2]==self.gs*self.c
assert g_shape2[1]==self.gs*self.c
assert g_shape2[2]==self.gs*self.c
return tuple([g_shape[0],self.gs*self.c,self.gs*self.c])
def get_config(self):
mi={"gs":self.gs,"c":self.c,"mode":self.mode,"cut":self.cut,"c_const":self.c_const}
th=super(gcomgraphand2,self).get_config()
th.update(mi)
return th
def from_config(config):
return gcomgraphand2(**config)
| 22.39604 | 123 | 0.645447 | 1,520 | 0.671972 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.164898 |
8c6372eae92e511cee18a6a07a3af3cd69edaeb3 | 9,077 | py | Python | rl/meta_ppo_agent.py | clvrai/coordination | 2b1bc8a6817b477f49c0cf6bdacd9c2f2e56f692 | [
"MIT"
] | 33 | 2020-02-15T07:52:05.000Z | 2021-12-27T04:19:45.000Z | rl/meta_ppo_agent.py | clvrai/coordination | 2b1bc8a6817b477f49c0cf6bdacd9c2f2e56f692 | [
"MIT"
] | null | null | null | rl/meta_ppo_agent.py | clvrai/coordination | 2b1bc8a6817b477f49c0cf6bdacd9c2f2e56f692 | [
"MIT"
] | 6 | 2020-10-12T01:37:02.000Z | 2022-02-21T12:49:49.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from rl.dataset import ReplayBuffer, RandomSampler
from rl.base_agent import BaseAgent
from rl.policies.mlp_actor_critic import MlpActor, MlpCritic
from util.logger import logger
from util.mpi import mpi_average
from util.pytorch import optimizer_cuda, count_parameters, \
compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, \
obs2tensor, to_tensor
from env.action_spec import ActionSpec
class MetaPPOAgent(BaseAgent):
""" Meta policy class. """
def __init__(self, config, ob_space):
super().__init__(config, ob_space)
if config.meta is None:
logger.warn('Creating a dummy meta policy.')
return
# parse body parts and skills
if config.subdiv:
# subdiv = ob1,ob2-ac1/ob3,ob4-ac2/...
clusters = config.subdiv.split('/')
clusters = [cluster.split('-')[1].split(',') for cluster in clusters]
else:
clusters = [ob_space.keys()]
if config.subdiv_skills:
subdiv_skills = config.subdiv_skills.split('/')
subdiv_skills = [skills.split(',') for skills in subdiv_skills]
else:
subdiv_skills = [['primitive']] * len(clusters)
self.subdiv_skills = subdiv_skills
assert len(subdiv_skills) == len(clusters), \
'subdiv_skills and clusters have different # subdivisions'
if config.meta == 'hard':
ac_space = ActionSpec(size=0)
for cluster, skills in zip(clusters, subdiv_skills):
ac_space.add(','.join(cluster), 'discrete', len(skills), 0, 1)
self.ac_space = ac_space
if config.diayn:
ob_clusters = config.subdiv.split('/')
ob_clusters = [cluster.split('-')[0].split(',') for cluster in ob_clusters]
for cluster, skills in zip(ob_clusters, subdiv_skills):
self.ac_space.add(','.join(cluster) + '_diayn', 'continuous', config.z_dim, 0, 1)
# build up networks
self._actor = MlpActor(config, ob_space, ac_space, tanh_policy=False)
self._old_actor = MlpActor(config, ob_space, ac_space, tanh_policy=False)
self._critic = MlpCritic(config, ob_space)
self._network_cuda(config.device)
self._actor_optim = optim.Adam(self._actor.parameters(), lr=config.lr_actor)
self._critic_optim = optim.Adam(self._critic.parameters(), lr=config.lr_critic)
sampler = RandomSampler()
self._buffer = ReplayBuffer(['ob', 'ac', 'done', 'rew', 'ret', 'adv',
'ac_before_activation', 'log_prob'],
config.buffer_size,
sampler.sample_func)
if config.is_chef:
logger.warn('Creating a meta PPO agent')
logger.info('The actor has %d parameters', count_parameters(self._actor))
logger.info('The critic has %d parameters', count_parameters(self._critic))
def store_episode(self, rollouts):
""" Stores @rollouts to replay buffer. """
self._compute_gae(rollouts)
self._buffer.store_episode(rollouts)
def _compute_gae(self, rollouts):
""" Computes GAE from @rollouts. """
T = len(rollouts['done'])
ob = rollouts['ob']
ob = self.normalize(ob)
ob = obs2tensor(ob, self._config.device)
vpred = self._critic(ob).detach().cpu().numpy()[:,0]
assert len(vpred) == T + 1
done = rollouts['done']
rew = rollouts['rew']
adv = np.empty((T, ) , 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1 - done[t]
delta = rew[t] + self._config.discount_factor * vpred[t + 1] * nonterminal - vpred[t]
adv[t] = lastgaelam = delta + self._config.discount_factor * self._config.gae_lambda * nonterminal * lastgaelam
ret = adv + vpred[:-1]
assert np.isfinite(adv).all()
assert np.isfinite(ret).all()
# update rollouts
if adv.std() == 0:
rollouts['adv'] = (adv * 0).tolist()
else:
rollouts['adv'] = ((adv - adv.mean()) / adv.std()).tolist()
rollouts['ret'] = ret.tolist()
def state_dict(self):
if self._config.meta is None:
return {}
return {
'actor_state_dict': self._actor.state_dict(),
'critic_state_dict': self._critic.state_dict(),
'actor_optim_state_dict': self._actor_optim.state_dict(),
'critic_optim_state_dict': self._critic_optim.state_dict(),
'ob_norm_state_dict': self._ob_norm.state_dict(),
}
def load_state_dict(self, ckpt):
if self._config.meta is None:
return
self._actor.load_state_dict(ckpt['actor_state_dict'])
self._critic.load_state_dict(ckpt['critic_state_dict'])
self._ob_norm.load_state_dict(ckpt['ob_norm_state_dict'])
self._network_cuda(self._config.device)
self._actor_optim.load_state_dict(ckpt['actor_optim_state_dict'])
self._critic_optim.load_state_dict(ckpt['critic_optim_state_dict'])
optimizer_cuda(self._actor_optim, self._config.device)
optimizer_cuda(self._critic_optim, self._config.device)
def _network_cuda(self, device):
self._actor.to(device)
self._old_actor.to(device)
self._critic.to(device)
def sync_networks(self):
sync_networks(self._actor)
sync_networks(self._critic)
def train(self):
self._copy_target_network(self._old_actor, self._actor)
for _ in range(self._config.num_batches):
transitions = self._buffer.sample(self._config.batch_size)
train_info = self._update_network(transitions)
self._buffer.clear()
train_info.update({
'actor_grad_norm': compute_gradient_norm(self._actor),
'actor_weight_norm': compute_weight_norm(self._actor),
'critic_grad_norm': compute_gradient_norm(self._critic),
'critic_weight_norm': compute_weight_norm(self._critic),
})
return train_info
def _update_network(self, transitions):
info = {}
# pre-process observations
o = transitions['ob']
o = self.normalize(o)
bs = len(transitions['done'])
_to_tensor = lambda x: to_tensor(x, self._config.device)
o = _to_tensor(o)
ac = _to_tensor(transitions['ac'])
z = _to_tensor(transitions['ac_before_activation'])
ret = _to_tensor(transitions['ret']).reshape(bs, 1)
adv = _to_tensor(transitions['adv']).reshape(bs, 1)
old_log_pi = _to_tensor(transitions['log_prob']).reshape(bs, 1)
log_pi, ent = self._actor.act_log(o, z)
if (log_pi - old_log_pi).max() > 20:
print('(log_pi - old_log_pi) is too large', (log_pi - old_log_pi).max())
import ipdb; ipdb.set_trace()
# the actor loss
entropy_loss = self._config.entropy_loss_coeff * ent.mean()
ratio = torch.exp(torch.clamp(log_pi - old_log_pi, -20, 20))
surr1 = ratio * adv
surr2 = torch.clamp(ratio, 1.0 - self._config.clip_param,
1.0 + self._config.clip_param) * adv
actor_loss = -torch.min(surr1, surr2).mean()
if not np.isfinite(ratio.cpu().detach()).all() or not np.isfinite(adv.cpu().detach()).all():
import ipdb; ipdb.set_trace()
info['entropy_loss'] = entropy_loss.cpu().item()
info['actor_loss'] = actor_loss.cpu().item()
actor_loss += entropy_loss
discriminator_loss = self._actor.discriminator_loss()
if discriminator_loss is not None:
actor_loss += discriminator_loss * self._config.discriminator_loss_weight
info['discriminator_loss'] = discriminator_loss.cpu().item()
# the q loss
value_pred = self._critic(o)
value_loss = self._config.value_loss_coeff * (ret - value_pred).pow(2).mean()
info['value_target'] = ret.mean().cpu().item()
info['value_predicted'] = value_pred.mean().cpu().item()
info['value_loss'] = value_loss.cpu().item()
# update the actor
self._actor_optim.zero_grad()
actor_loss.backward()
sync_grads(self._actor)
self._actor_optim.step()
# update the critic
self._critic_optim.zero_grad()
value_loss.backward()
sync_grads(self._critic)
self._critic_optim.step()
# include info from policy
info.update(self._actor.info)
return mpi_average(info)
def act(self, ob, is_train=True):
"""
Returns a set of actions and the actors' activations given an observation @ob.
"""
if self._config.meta:
ob = self.normalize(ob)
return self._actor.act(ob, is_train, return_log_prob=True)
else:
return [0], None, None
| 37.6639 | 123 | 0.61452 | 8,575 | 0.944695 | 0 | 0 | 0 | 0 | 0 | 0 | 1,251 | 0.137821 |
8c63a890e4c7c21d82ab4e450776446d2c1bea12 | 978 | py | Python | article/tests.py | AngleMAXIN/nomooc | 0c06b8af405b8254c242b34e85b7cba99c8f5737 | [
"MIT"
] | 1 | 2019-12-04T03:20:16.000Z | 2019-12-04T03:20:16.000Z | article/tests.py | AngleMAXIN/NoMooc | 0c06b8af405b8254c242b34e85b7cba99c8f5737 | [
"MIT"
] | null | null | null | article/tests.py | AngleMAXIN/NoMooc | 0c06b8af405b8254c242b34e85b7cba99c8f5737 | [
"MIT"
] | null | null | null |
# Create your tests here.
from article.db_manager.article_manager import create_article_db
from utils.api.tests import APIClient, APITestCase
from utils.constants import ArticleTypeChoice
from utils.shortcuts import rand_str
def mock_create_article(title=None, content=None, art_type=None, owner_id=None):
title = title or rand_str(type='str')
content = content or rand_str(type='str')
art_type = art_type or ArticleTypeChoice[0][1]
owner_id = owner_id or 1
return create_article_db(title, content, art_type, owner_id)
class ArticleViewTest(APITestCase):
def setUp(self):
self.client = APIClient()
def test_create_article_view(self):
self.create_user('maxin', 'password', login=True)
article = mock_create_article()
result = self.client.get('/api/article/', data={'article_id': 1}).json()
self.assertEqual(result['result'], 'successful')
self.assertEqual(result['data']['title'], article.title)
| 34.928571 | 80 | 0.723926 | 433 | 0.44274 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.114519 |
8c63e675baf1b7e7f373786c7af3c27f18480a82 | 756 | py | Python | util/geometry.py | c-ali/boomgan | 0fd1d13149d8d5719d12aa36f09f46461ca29dbb | [
"MIT"
] | 3 | 2022-03-14T12:41:16.000Z | 2022-03-19T01:11:43.000Z | util/geometry.py | c-ali/boomgan | 0fd1d13149d8d5719d12aa36f09f46461ca29dbb | [
"MIT"
] | null | null | null | util/geometry.py | c-ali/boomgan | 0fd1d13149d8d5719d12aa36f09f46461ca29dbb | [
"MIT"
] | 1 | 2022-03-14T12:41:18.000Z | 2022-03-14T12:41:18.000Z | import numpy as np
def orthogonalize(normal, non_ortho):
h = normal * non_ortho
return non_ortho - normal * h
def make_orthonormal_vector(normal, dims=512):
# random unit vector
rand_dir = np.random.randn(dims)
# make orthonormal
result = orthogonalize(normal, rand_dir)
return result / np.linalg.norm(result)
def random_circle(radius, ndim):
'''Given a radius, parametrizes a random circle'''
n1 = np.random.randn(ndim)
n1 /= np.linalg.norm(n1)
n2 = make_orthonormal_vector(n1, ndim)
def circle(theta):
return np.repeat(n1[None, :], theta.shape[0], axis=0) * np.cos(theta)[:, None] * radius + np.repeat(n2[None, :], theta.shape[0], axis=0) * np.sin(theta)[:, None] * radius
return circle
| 29.076923 | 178 | 0.665344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.116402 |
8c647caabdd38ccf3ebcf3bcf1fbd6e8fff030ed | 45,129 | py | Python | services/models.py | hanaahajj/Serviceinfo_hanaa | 829de07b39fbf17c102799edb9d48a88b11a7540 | [
"BSD-3-Clause"
] | null | null | null | services/models.py | hanaahajj/Serviceinfo_hanaa | 829de07b39fbf17c102799edb9d48a88b11a7540 | [
"BSD-3-Clause"
] | null | null | null | services/models.py | hanaahajj/Serviceinfo_hanaa | 829de07b39fbf17c102799edb9d48a88b11a7540 | [
"BSD-3-Clause"
] | null | null | null | from collections import defaultdict
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator
from django.db.transaction import atomic
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _, get_language
from sorl.thumbnail import ImageField
from sorl.thumbnail.shortcuts import get_thumbnail
from . import jira_support
from .tasks import email_provider_about_service_approval_task
from .utils import absolute_url, get_path_to_service
class NameInCurrentLanguageMixin(object):
@property
def name(self):
# Try to return the name field of the currently selected language
# if we have such a field and it has something in it.
# Otherwise, punt and return the first of the English, Arabic, or
# French names that has anything in it.
language = get_language()
field_name = 'name_%s' % language[:2]
if hasattr(self, field_name) and getattr(self, field_name):
return getattr(self, field_name)
return self.name_en or self.name_ar or self.name_fr
def __str__(self):
return self.name
class ProviderType(NameInCurrentLanguageMixin, models.Model):
number = models.IntegerField(unique=True)
name_en = models.CharField(
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
_("name in French"),
max_length=256,
default='',
blank=True,
)
def get_api_url(self):
"""Return the PATH part of the URL to access this object using the API"""
return reverse('providertype-detail', args=[self.id])
def at_least_one_letter(s):
return any([c.isalpha() for c in s])
def blank_or_at_least_one_letter(s):
return s == '' or at_least_one_letter(s)
class Provider(NameInCurrentLanguageMixin, models.Model):
name_en = models.CharField(
# Translators: Provider name
_("name in English"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
name_ar = models.CharField(
# Translators: Provider name
_("name in Arabic"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
name_fr = models.CharField(
# Translators: Provider name
_("name in French"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
type = models.ForeignKey(
ProviderType,
verbose_name=_("type"),
)
phone_number = models.CharField(
_("phone number"),
max_length=20,
validators=[
RegexValidator(settings.PHONE_NUMBER_REGEX)
]
)
website = models.URLField(
_("website"),
blank=True,
default='',
)
description_en = models.TextField(
# Translators: Provider description
_("description in English"),
default='',
blank=True,
)
description_ar = models.TextField(
# Translators: Provider description
_("description in Arabic"),
default='',
blank=True,
)
description_fr = models.TextField(
# Translators: Provider description
_("description in French"),
default='',
blank=True,
)
user = models.OneToOneField(
to=settings.AUTH_USER_MODEL,
verbose_name=_('user'),
help_text=_('user account for this provider'),
)
number_of_monthly_beneficiaries = models.IntegerField(
_("number of targeted beneficiaries monthly"),
blank=True, null=True,
validators=[
MinValueValidator(0),
MaxValueValidator(1000000)
]
)
focal_point_name_en = models.CharField(
_("focal point name in English"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
focal_point_name_ar = models.CharField(
_("focal point name in Arabic"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
focal_point_name_fr = models.CharField(
_("focal point name in French"),
max_length=256, # Length is a guess
default='',
blank=True,
validators=[blank_or_at_least_one_letter]
)
focal_point_phone_number = models.CharField(
_("focal point phone number"),
max_length=20,
validators=[
RegexValidator(settings.PHONE_NUMBER_REGEX)
]
)
address_en = models.TextField(
_("provider address in English"),
default='',
blank=True,
)
address_ar = models.TextField(
_("provider address in Arabic"),
default='',
blank=True,
)
address_fr = models.TextField(
_("provider address in French"),
default='',
blank=True,
)
def get_api_url(self):
"""Return the PATH part of the URL to access this object using the API"""
return reverse('provider-detail', args=[self.id])
def get_fetch_url(self):
"""Return the PATH part of the URL to fetch this object using the API"""
return reverse('provider-fetch', args=[self.id])
def notify_jira_of_change(self):
JiraUpdateRecord.objects.create(
update_type=JiraUpdateRecord.PROVIDER_CHANGE,
provider=self
)
def get_admin_edit_url(self):
"""Return the PATH part of the URL to edit this object in the admin"""
return reverse('admin:services_provider_change', args=[self.id])
class ServiceAreaManager(models.GeoManager):
def top_level(self):
"""
Return the top-level areas, i.e. the ones with no parents
"""
return super().get_queryset().filter(parent=None)
class ServiceArea(NameInCurrentLanguageMixin, models.Model):
name_en = models.CharField(
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
_("name in French"),
max_length=256,
default='',
blank=True,
)
parent = models.ForeignKey(
to='self',
verbose_name=_('parent area'),
help_text=_('the area that contains this area'),
null=True,
blank=True,
related_name='children',
)
lebanon_region = models.ForeignKey(
'LebanonRegion',
null=True,
default=None,
on_delete=models.SET_NULL,
)
objects = ServiceAreaManager()
@property
def centroid(self):
return self.lebanon_region.centroid
def get_api_url(self):
return reverse('servicearea-detail', args=[self.id])
class SelectionCriterion(models.Model):
"""
A selection criterion limits who can receive the service.
It's just a text string. E.g. "age under 18".
"""
text_en = models.CharField(max_length=100, blank=True, default='')
text_fr = models.CharField(max_length=100, blank=True, default='')
text_ar = models.CharField(max_length=100, blank=True, default='')
service = models.ForeignKey('services.Service', related_name='selection_criteria')
class Meta(object):
verbose_name_plural = _("selection criteria")
def clean(self):
if not any([self.text_en, self.text_fr, self.text_ar]):
raise ValidationError(_("Selection criterion must have text in at least "
"one language"))
def __str__(self):
return ', '.join([text for text in [self.text_en, self.text_ar, self.text_fr] if text])
def get_api_url(self):
return reverse('selectioncriterion-detail', args=[self.id])
class ServiceType(NameInCurrentLanguageMixin, models.Model):
number = models.IntegerField(unique=True)
icon = models.ImageField(
upload_to='service-type-icons',
verbose_name=_("icon"),
blank=True,
)
name_en = models.CharField(
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
_("name in French"),
max_length=256,
default='',
blank=True,
)
comments_en = models.CharField(
_("comments in English"),
max_length=512,
default='',
blank=True,
)
comments_ar = models.CharField(
_("comments in Arabic"),
max_length=512,
default='',
blank=True,
)
comments_fr = models.CharField(
_("comments in French"),
max_length=512,
default='',
blank=True,
)
class Meta(object):
ordering = ['number', ]
def get_api_url(self):
return reverse('servicetype-detail', args=[self.id])
def get_icon_url(self):
"""Return URL PATH of the icon image for this record"""
# For convenience of serializers
if self.icon:
return self.icon.url
class Service(NameInCurrentLanguageMixin, models.Model):
provider = models.ForeignKey(
Provider,
verbose_name=_("provider"),
)
name_en = models.CharField(
# Translators: Service name
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
# Translators: Service name
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
# Translators: Service name
_("name in French"),
max_length=256,
default='',
blank=True,
)
area_of_service = models.ForeignKey(
ServiceArea,
verbose_name=_("area of service"),
)
description_en = models.TextField(
# Translators: Service description
_("description in English"),
default='',
blank=True,
)
description_ar = models.TextField(
# Translators: Service description
_("description in Arabic"),
default='',
blank=True,
)
description_fr = models.TextField(
# Translators: Service description
_("description in French"),
default='',
blank=True,
)
additional_info_en = models.TextField(
_("additional information in English"),
blank=True,
default='',
)
additional_info_ar = models.TextField(
_("additional information in Arabic"),
blank=True,
default='',
)
additional_info_fr = models.TextField(
_("additional information in French"),
blank=True,
default='',
)
cost_of_service = models.TextField(
_("cost of service"),
blank=True,
default='',
)
is_mobile = models.BooleanField(
_("mobile service"),
blank=True,
default=False,
)
# Note: we don't let multiple non-archived versions of a service record pile up
# there should be no more than two, one in current status and/or one in some other
# status.
STATUS_DRAFT = 'draft'
STATUS_CURRENT = 'current'
STATUS_REJECTED = 'rejected'
STATUS_CANCELED = 'canceled'
STATUS_ARCHIVED = 'archived'
STATUS_CHOICES = (
# New service or edit of existing service is pending approval
(STATUS_DRAFT, _('draft')),
# This Service has been approved and not superseded. Only services with
# status 'current' appear in the public interface.
(STATUS_CURRENT, _('current')),
# The staff has rejected the service submission or edit
(STATUS_REJECTED, _('rejected')),
# The provider has canceled service. They can do this on draft or current services.
# It no longer appears in the public interface.
(STATUS_CANCELED, _('canceled')),
# The record is obsolete and we don't want to see it anymore
(STATUS_ARCHIVED, _('archived')),
)
status = models.CharField(
_('status'),
max_length=10,
choices=STATUS_CHOICES,
default=STATUS_DRAFT,
)
update_of = models.ForeignKey(
'self',
help_text=_('If a service record represents a modification of another service '
'record, this field links to that other record.'),
null=True,
blank=True,
related_name='updates',
)
location = models.PointField(
_('location'),
blank=True,
null=True,
)
# Open & close hours by day. If None, service is closed that day.
sunday_open = models.TimeField(null=True, blank=True)
sunday_close = models.TimeField(null=True, blank=True)
monday_open = models.TimeField(null=True, blank=True)
monday_close = models.TimeField(null=True, blank=True)
tuesday_open = models.TimeField(null=True, blank=True)
tuesday_close = models.TimeField(null=True, blank=True)
wednesday_open = models.TimeField(null=True, blank=True)
wednesday_close = models.TimeField(null=True, blank=True)
thursday_open = models.TimeField(null=True, blank=True)
thursday_close = models.TimeField(null=True, blank=True)
friday_open = models.TimeField(null=True, blank=True)
friday_close = models.TimeField(null=True, blank=True)
saturday_open = models.TimeField(null=True, blank=True)
saturday_close = models.TimeField(null=True, blank=True)
type = models.ForeignKey(
ServiceType,
verbose_name=_("type"),
)
objects = models.GeoManager()
image = ImageField(
upload_to="service-images/",
help_text=_(
"Upload an image file (GIF, JPEG, PNG, WebP) with a square aspect "
"ratio (Width equal to Height). The image size should be at least "
"1280 x 1280 for best results. SVG files are not supported."),
blank=True,
default='',
)
def get_api_url(self):
return reverse('service-detail', args=[self.id])
def get_absolute_url(self):
"""Called from CMS-related code to get app view from a search hit"""
return get_path_to_service(self.id)
def get_provider_fetch_url(self):
# For convenience of the serializer
return self.provider.get_fetch_url()
def get_admin_edit_url(self):
return reverse('admin:services_service_change', args=[self.id])
def email_provider_about_approval(self):
"""Schedule a task to send an email to the provider"""
email_provider_about_service_approval_task.delay(self.pk)
def may_approve(self):
return self.status == self.STATUS_DRAFT
def may_reject(self):
return self.status == self.STATUS_DRAFT
def cancel(self):
"""
Cancel a pending service update, or withdraw a current service
from the directory.
"""
# First cancel any pending changes to this service
for pending_change in self.updates.filter(status=Service.STATUS_DRAFT):
pending_change.cancel()
previous_status = self.status
self.status = Service.STATUS_CANCELED
self.save()
if previous_status == Service.STATUS_DRAFT:
JiraUpdateRecord.objects.create(
service=self,
update_type=JiraUpdateRecord.CANCEL_DRAFT_SERVICE)
elif previous_status == Service.STATUS_CURRENT:
JiraUpdateRecord.objects.create(
service=self,
update_type=JiraUpdateRecord.CANCEL_CURRENT_SERVICE)
def save(self, *args, **kwargs):
new_service = self.pk is None
superseded_draft = None
with atomic(): # All or none of this
if (new_service
and self.status == Service.STATUS_DRAFT
and self.update_of
and self.update_of.status == Service.STATUS_DRAFT):
# Any edit of a record that's still in review means we're
# superseding one draft with another.
superseded_draft = self.update_of
# Bump this one up a level - we're replacing a pending change.
self.update_of = superseded_draft.update_of
# If it's mobile, force the location to the center of the area
if self.is_mobile:
self.location = self.area_of_service.centroid
super().save(*args, **kwargs)
if new_service:
# Now we've safely saved this new record.
# Did we replace an existing draft? Archive the previous one.
if superseded_draft:
superseded_draft.status = Service.STATUS_ARCHIVED
superseded_draft.save()
JiraUpdateRecord.objects.create(
service=self,
superseded_draft=superseded_draft,
update_type=JiraUpdateRecord.SUPERSEDED_DRAFT)
elif self.update_of:
# Submitted a proposed change to an existing service
JiraUpdateRecord.objects.create(
service=self,
update_type=JiraUpdateRecord.CHANGE_SERVICE)
else:
# Submitted a new service
JiraUpdateRecord.objects.create(
service=self,
update_type=JiraUpdateRecord.NEW_SERVICE)
def validate_for_approval(self):
"""
Raise a ValidationError if this service's data doesn't look valid to
be a current, approved service.
Current checks:
* self.full_clean()
* .location must be set
* at least one language field for each of several translated fields must be set
* status must be DRAFT
"""
try:
self.full_clean()
except ValidationError as e:
errs = e.error_dict
else:
errs = {}
if not self.location:
errs['location'] = [_('This field is required.')]
for field in ['name', 'description']:
if not any([getattr(self, '%s_%s' % (field, lang)) for lang in ['en', 'ar', 'fr']]):
errs[field] = [_('This field is required.')]
if self.status != Service.STATUS_DRAFT:
errs['status'] = [_('Only services in draft status may be approved.')]
if errs:
raise ValidationError(errs)
def staff_approve(self, staff_user):
"""
Staff approving the service (new or changed).
:param staff_user: The user who approved
:raises: ValidationErrror
"""
# Make sure it's ready
self.validate_for_approval()
# if there's already a current record, archive it
if self.update_of and self.update_of.status == Service.STATUS_CURRENT:
self.update_of.status = Service.STATUS_ARCHIVED
self.update_of.save()
self.status = Service.STATUS_CURRENT
self.save()
self.email_provider_about_approval()
JiraUpdateRecord.objects.create(
service=self,
update_type=JiraUpdateRecord.APPROVE_SERVICE,
by=staff_user
)
def validate_for_rejecting(self):
"""
Raise a ValidationError if this service's data doesn't look valid to
be rejected.
Current checks:
* self.full_clean()
* status must be DRAFT
"""
try:
self.full_clean()
except ValidationError as e:
errs = e.error_dict
else:
errs = {}
if self.status != Service.STATUS_DRAFT:
errs['status'] = [_('Only services in draft status may be rejected.')]
if errs:
raise ValidationError(errs)
def staff_reject(self, staff_user):
"""
Staff rejecting the service (new or changed)
:param staff_user: The user who rejected
"""
# Make sure it's ready
self.validate_for_rejecting()
self.status = Service.STATUS_REJECTED
self.save()
JiraUpdateRecord.objects.create(
service=self,
update_type=JiraUpdateRecord.REJECT_SERVICE,
by=staff_user
)
@property
def longitude(self):
if self.location:
return self.location[0]
@longitude.setter
def longitude(self, value):
if self.location is None:
self.location = Point(0, 0)
self.location[0] = value
@property
def latitude(self):
if self.location:
return self.location[1]
@latitude.setter
def latitude(self, value):
if self.location is None:
self.location = Point(0, 0)
self.location[1] = value
def get_thumbnail_url(self, width=100, height=100):
"""Shortcut to get the URL for an image thumbnail."""
if self.image and hasattr(self.image, 'url'):
frmt = "PNG" if self.image.path.lower().endswith('.png') else "JPEG"
size = "{}x{}".format(width, height)
thumbnail = get_thumbnail(self.image, size, upscale=False, format=frmt, crop='center')
return thumbnail.url
return None
class JiraUpdateRecord(models.Model):
service = models.ForeignKey(Service, blank=True, null=True, related_name='jira_records')
superseded_draft = models.ForeignKey(Service, blank=True, null=True)
provider = models.ForeignKey(Provider, blank=True, null=True, related_name='jira_records')
feedback = models.ForeignKey(
'services.Feedback', blank=True, null=True, related_name='jira_records')
request_for_service = models.ForeignKey(
'services.RequestForService', blank=True, null=True, related_name='jira_records')
PROVIDER_CHANGE = 'provider-change'
NEW_SERVICE = 'new-service'
CHANGE_SERVICE = 'change-service'
CANCEL_DRAFT_SERVICE = 'cancel-draft-service'
CANCEL_CURRENT_SERVICE = 'cancel-current-service'
SUPERSEDED_DRAFT = 'superseded-draft'
APPROVE_SERVICE = 'approve-service'
REJECT_SERVICE = 'rejected-service'
FEEDBACK = 'feedback'
REQUEST_FOR_SERVICE = 'request-for-service'
UPDATE_CHOICES = (
(PROVIDER_CHANGE, _('Provider updated their information')),
(NEW_SERVICE, _('New service submitted by provider')),
(CHANGE_SERVICE, _('Change to existing service submitted by provider')),
(CANCEL_DRAFT_SERVICE, _('Provider canceled a draft service')),
(CANCEL_CURRENT_SERVICE, _('Provider canceled a current service')),
(SUPERSEDED_DRAFT, _('Provider superseded a previous draft')),
(APPROVE_SERVICE, _('Staff approved a new or changed service')),
(REJECT_SERVICE, _('Staff rejected a new or changed service')),
(FEEDBACK, _('User submitted feedback')),
(REQUEST_FOR_SERVICE, _('User submitted request for service.')),
)
# Update types that indicate a new Service record was created
NEW_SERVICE_RECORD_UPDATE_TYPES = [
NEW_SERVICE, CHANGE_SERVICE, SUPERSEDED_DRAFT,
]
# Update types that indicate a draft or service is being canceled/deleted
END_SERVICE_UPDATE_TYPES = [
CANCEL_DRAFT_SERVICE, CANCEL_CURRENT_SERVICE,
]
STAFF_ACTION_SERVICE_UPDATE_TYPES = [
APPROVE_SERVICE, REJECT_SERVICE
]
SERVICE_CHANGE_UPDATE_TYPES = (
NEW_SERVICE_RECORD_UPDATE_TYPES + END_SERVICE_UPDATE_TYPES
+ STAFF_ACTION_SERVICE_UPDATE_TYPES
)
PROVIDER_CHANGE_UPDATE_TYPES = [
PROVIDER_CHANGE,
]
NEW_JIRA_RECORD_UPDATE_TYPES = [
NEW_SERVICE, CHANGE_SERVICE, CANCEL_CURRENT_SERVICE, PROVIDER_CHANGE
]
update_type = models.CharField(
_('update type'),
max_length=max([len(x[0]) for x in UPDATE_CHOICES]),
choices=UPDATE_CHOICES,
)
jira_issue_key = models.CharField(
_("JIRA issue"),
max_length=256,
blank=True,
default='')
by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
)
class Meta(object):
# The service udpate types can each only happen once per service
unique_together = (('service', 'update_type'),)
def save(self, *args, **kwargs):
errors = []
is_new = self.pk is None
if self.update_type == '':
errors.append('must have a non-blank update_type')
elif self.update_type == self.FEEDBACK:
if not self.feedback:
errors.append('%s must specify feedback' % self.update_type)
elif self.update_type == self.REQUEST_FOR_SERVICE:
if not self.request_for_service:
errors.append('%s must specify request for service' % self.update_type)
elif self.update_type in self.PROVIDER_CHANGE_UPDATE_TYPES:
if not self.provider:
errors.append('%s must specify provider' % self.update_type)
if self.service:
errors.append('%s must not specify service' % self.update_type)
elif self.update_type in self.SERVICE_CHANGE_UPDATE_TYPES:
if self.service:
if self.update_type == self.NEW_SERVICE and self.service.update_of:
errors.append('%s must not specify a service that is an update of another'
% self.update_type)
# If we're not creating a new record, be more tolerant; the service might
# have been updated one way or another.
if (is_new and self.update_type == self.CHANGE_SERVICE
and not self.service.update_of):
errors.append('%s must specify a service that is an update of another'
% self.update_type)
else:
errors.append('%s must specify service' % self.update_type)
if self.provider:
errors.append('%s must not specify provider' % self.update_type)
if self.update_type == self.SUPERSEDED_DRAFT and not self.superseded_draft:
errors.append('%s must specifiy superseded draft service')
else:
errors.append('unrecognized update_type: %s' % self.update_type)
if self.update_type in self.STAFF_ACTION_SERVICE_UPDATE_TYPES:
if not self.by:
errors.append('%s must specify user in "by" field')
if errors:
raise Exception('%s cannot be saved: %s' % (str(self), ', '.join(e for e in errors)))
super().save(*args, **kwargs)
def do_jira_work(self, jira=None):
sentinel_value = 'PENDING'
# Bail out early if we don't yet have a pk, if we already have a JIRA
# issue key set, or if some other thread is already working on getting
# an issue created/updated.
if not self.pk or JiraUpdateRecord.objects.filter(pk=self.pk, jira_issue_key='').update(
jira_issue_key=sentinel_value) != 1:
return
try:
if not jira:
jira = jira_support.get_jira()
if self.update_type in JiraUpdateRecord.NEW_JIRA_RECORD_UPDATE_TYPES:
kwargs = jira_support.default_newissue_kwargs()
service = None
service_url = None
change_type = {
JiraUpdateRecord.NEW_SERVICE: 'New service',
JiraUpdateRecord.CHANGE_SERVICE: 'Changed service',
JiraUpdateRecord.CANCEL_CURRENT_SERVICE: 'Canceled service',
JiraUpdateRecord.PROVIDER_CHANGE: 'Changed provider',
}[self.update_type]
if self.update_type in JiraUpdateRecord.SERVICE_CHANGE_UPDATE_TYPES:
service = self.service
service_url = absolute_url(service.get_admin_edit_url())
provider = self.service.provider
elif self.update_type in self.PROVIDER_CHANGE_UPDATE_TYPES:
provider = self.provider
kwargs['summary'] = '%s from %s' % (change_type, provider)
template_name = {
JiraUpdateRecord.NEW_SERVICE: 'jira/new_service.txt',
JiraUpdateRecord.CHANGE_SERVICE: 'jira/changed_service.txt',
JiraUpdateRecord.CANCEL_CURRENT_SERVICE: 'jira/canceled_service.txt',
JiraUpdateRecord.PROVIDER_CHANGE: 'jira/changed_provider.txt',
}[self.update_type]
context = {
'site': Site.objects.get_current(),
'provider': provider,
'provider_url': absolute_url(provider.get_admin_edit_url()),
'service': service,
'service_url': service_url,
}
if service and service.update_of:
context['service_parent_url'] = \
absolute_url(service.update_of.get_admin_edit_url())
kwargs['description'] = render_to_string(template_name, context)
new_issue = jira.create_issue(**kwargs)
self.jira_issue_key = new_issue.key
self.save()
elif self.update_type == self.SUPERSEDED_DRAFT:
# Track down the issue that's already been created so we
# can comment on it.
previous_record = JiraUpdateRecord.objects.get(service=self.superseded_draft)
issue_key = previous_record.jira_issue_key
context = {
'service': self.service,
'service_url': absolute_url(self.service.get_admin_edit_url()),
}
comment = render_to_string('jira/superseded_draft.txt', context)
jira.add_comment(issue_key, comment)
self.jira_issue_key = issue_key
self.save()
elif self.update_type == self.CANCEL_DRAFT_SERVICE:
# Track down the issue that's already been created so we
# can comment on it.
previous_record = JiraUpdateRecord.objects.get(
update_type__in=JiraUpdateRecord.NEW_SERVICE_RECORD_UPDATE_TYPES,
service=self.service
)
issue_key = previous_record.jira_issue_key
comment = 'Pending draft change was canceled by the provider.'
jira.add_comment(issue_key, comment)
self.jira_issue_key = issue_key
self.save()
elif self.update_type in self.STAFF_ACTION_SERVICE_UPDATE_TYPES:
# Track down the issue that's already been created so we
# can comment on it.
previous_record = JiraUpdateRecord.objects.get(
update_type__in=JiraUpdateRecord.NEW_SERVICE_RECORD_UPDATE_TYPES,
service=self.service
)
issue_key = previous_record.jira_issue_key
messages = {
(self.NEW_SERVICE, self.APPROVE_SERVICE):
"The new service was approved by %s.",
(self.NEW_SERVICE, self.REJECT_SERVICE):
"The new service was rejected by %s.",
(self.CHANGE_SERVICE, self.APPROVE_SERVICE):
"The service change was approved by %s.",
(self.CHANGE_SERVICE, self.REJECT_SERVICE):
"The service change was rejected by %s.",
}
comment = messages.get((previous_record.update_type, self.update_type),
"The service's state was updated by %s.")
comment = comment % self.by.email
jira.add_comment(issue_key, comment)
self.jira_issue_key = issue_key
self.save()
elif self.update_type == self.FEEDBACK:
kwargs = jira_support.default_feedback_kwargs()
kwargs['summary'] = 'Feedback about %s' % (self.feedback.service,)
context = {
'site': Site.objects.get_current(),
'feedback': self.feedback,
'service': self.feedback.service,
'service_url': absolute_url(self.feedback.service.get_admin_edit_url()),
'provider': self.feedback.service.provider,
}
template_name = 'jira/feedback.txt'
kwargs['description'] = render_to_string(template_name, context)
new_issue = jira.create_issue(**kwargs)
self.jira_issue_key = new_issue.key
self.save()
elif self.update_type == self.REQUEST_FOR_SERVICE:
kwargs = jira_support.default_request_for_service_kwargs()
kwargs['summary'] = 'Request service to be added: %s' % (
self.request_for_service.service_name,)
context = {
'rfs': self.request_for_service,
'rfs_url': absolute_url(self.request_for_service.get_admin_edit_url()),
}
template_name = 'jira/request_for_service.txt'
kwargs['description'] = render_to_string(template_name, context)
new_issue = jira.create_issue(**kwargs)
self.jira_issue_key = new_issue.key
self.save()
finally:
# If we've not managed to save a valid JIRA issue key, reset value to
# empty string so it'll be tried again later.
JiraUpdateRecord.objects.filter(pk=self.pk, jira_issue_key=sentinel_value).update(
jira_issue_key='')
#
# FEEDBACK
#
class Nationality(NameInCurrentLanguageMixin, models.Model):
number = models.IntegerField(unique=True)
name_en = models.CharField(
_("name in English"),
max_length=256,
default='',
blank=True,
)
name_ar = models.CharField(
_("name in Arabic"),
max_length=256,
default='',
blank=True,
)
name_fr = models.CharField(
_("name in French"),
max_length=256,
default='',
blank=True,
)
class Meta:
verbose_name_plural = _("nationalities")
def get_api_url(self):
return reverse('nationality-detail', args=[self.id])
class Feedback(models.Model):
# About the user
name = models.CharField(
_("Name"),
max_length=256
)
phone_number = models.CharField(
_("Phone Number (NN-NNNNNN)"),
max_length=20,
validators=[
RegexValidator(settings.PHONE_NUMBER_REGEX)
]
)
nationality = models.ForeignKey(
verbose_name=_("Nationality"),
to=Nationality,
)
area_of_residence = models.ForeignKey(
ServiceArea,
verbose_name=_("Area of residence"),
)
# The service getting feedback
service = models.ForeignKey(
verbose_name=_("Service"),
to=Service,
)
# Questions about delivery of service
delivered = models.BooleanField(
help_text=_("Was service delivered?"),
default=False, # Don't really want a default here, but Django screams at you
)
quality = models.SmallIntegerField(
help_text=_("How would you rate the quality of the service you received (from 1 to 5, "
"where 5 is the highest rating possible)?"),
validators=[
MinValueValidator(1),
MaxValueValidator(5)
],
default=None,
blank=True,
null=True,
)
non_delivery_explained = models.CharField(
# This is required only if 'delivered' is false; so needs to be optional here
# and we'll validate that elsewhere
help_text=_("Did you receive a clear explanation for why the service you "
"sought was not delivered to you?"),
blank=True,
default=None,
null=True,
max_length=8,
choices=[
('no', _("No explanation")),
('unclear', _("Explanation was not clear")),
('unfair', _("Explanation was not fair")),
('yes', _("Clear and appropriate explanation")),
]
)
wait_time = models.CharField(
# Presumably, only required if 'delivered' is true
help_text=_("How long did you wait for the service to be delivered, after "
"contacting the service provider?"),
blank=True,
null=True,
default=None,
max_length=12,
choices=[
('lesshour', _("Less than 1 hour")),
('uptotwodays', _("Up to 2 days")),
('3-7days', _("3-7 days")),
('1-2weeks', _("1-2 weeks")),
('more', _("More than 2 weeks")),
]
)
wait_time_satisfaction = models.SmallIntegerField(
help_text=_("How do you rate your satisfaction with the time that you waited for "
"the service to be delivered (from 1 to 5, where 5 is the highest "
"rating possible)?"),
default=None,
null=True,
blank=True,
validators=[
MinValueValidator(1),
MaxValueValidator(5)
]
)
difficulty_contacting = models.CharField(
help_text=_("Did you experience difficulties contacting the provider of "
"the service you needed?"),
max_length=20,
choices=[
('no', _("No")),
('didntknow', _("Did not know how to contact them")),
('nophoneresponse', _("Tried to contact them by phone but received no response")),
('noresponse', _("Tried to contact them in person but received no response or "
"did not find their office")),
('unhelpful', _("Contacted them but response was unhelpful")),
('other', _("Other")),
]
)
other_difficulties = models.TextField(
# Only if 'other' selected above
help_text=_("Other difficulties contacting the service provider"),
blank=True,
default='',
)
staff_satisfaction = models.SmallIntegerField(
help_text=_("How would you rate your satisfaction with the staff of the organization "
"that provided services to you, (from 1 to 5, where 5 is the highest "
"rating possible)?"),
blank=True, # Only required if service was delivered
null=True,
default=None,
validators=[
MinValueValidator(1),
MaxValueValidator(5)
]
)
extra_comments = models.TextField(
help_text=_("Other comments"),
default='',
blank=True,
)
anonymous = models.BooleanField(
help_text=_("I want my feedback to be anonymous to the service provider"),
default=False,
)
def clean(self):
errs = defaultdict(list)
if self.delivered:
if self.quality is None:
errs['quality'].append(
_("Quality field is required if you answered 'Yes' to "
"'Was the service you sought delivered to you?'."))
if self.wait_time is None:
errs['wait_time'].append(
_("An answer is required to 'How long did you wait for the service to "
"be delivered, after contacting the service provider?' "
"if you answered 'Yes' to "
"'Was the service you sought delivered to you?'."))
if self.wait_time_satisfaction is None:
errs['wait_time_satisfaction'].append(
_("An answer is required to 'How do you rate your satisfaction with the "
"time that you waited for the service to be delivered?' "
"if you answered 'Yes' to "
"'Was the service you sought delivered to you?'.")
)
else:
if self.non_delivery_explained is None:
errs['non_delivery_explained'].append(
_("An answer is required to 'Did you receive a clear explanation for "
"why the service you sought was not delivered to you?' "
"if you answered 'No' to "
"'Was the service you sought delivered to you?'."))
if self.difficulty_contacting == 'other':
if not self.other_difficulties:
errs['other_difficulties'].append(
_("An answer is required to 'Other difficulties contacting the service "
"provider' "
"if you answered 'Other' to 'Did you experience difficulties contacting "
"the provider of the service you needed?'")
)
if errs:
raise ValidationError(errs)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.pk:
JiraUpdateRecord.objects.create(
feedback=self,
update_type=JiraUpdateRecord.FEEDBACK
)
class RequestForService(models.Model):
provider_name = models.CharField(
max_length=256,
validators=[at_least_one_letter]
)
service_name = models.CharField(
max_length=256,
validators=[at_least_one_letter]
)
area_of_service = models.ForeignKey(
ServiceArea,
verbose_name=_("area of service"),
)
service_type = models.ForeignKey(
ServiceType,
verbose_name=_("type"),
)
address = models.TextField()
contact = models.TextField()
description = models.TextField()
rating = models.SmallIntegerField(
help_text=_("How would you rate the quality of the service you received (from 1 to 5, "
"where 5 is the highest rating possible)?"),
validators=[
MinValueValidator(1),
MaxValueValidator(5)
],
default=None,
blank=True,
null=True,
)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.pk:
JiraUpdateRecord.objects.create(
request_for_service=self,
update_type=JiraUpdateRecord.REQUEST_FOR_SERVICE
)
def get_admin_edit_url(self):
"""Return the PATH part of the URL to edit this object in the admin"""
return reverse('admin:services_requestforservice_change', args=[self.id])
class LebanonRegion(models.Model):
"""Common model to represent levels 1, 2, 3"""
level = models.IntegerField(
choices=[
(1, _('Governate')),
(2, _('District or CAZA')),
# (3, _('Cadastral')),
]
)
area = models.FloatField()
perimeter = models.FloatField()
moh_na = models.CharField(max_length=25, help_text="Seems to be the governate")
moh_cod = models.CharField(max_length=5, help_text="Seems to be the governate")
kada_name = models.CharField(max_length=28, blank=True, default='',
help_text="Seems to be the CAZA or district")
kadaa_code = models.CharField(max_length=10, blank=True, default='',
help_text="Seems to be the CAZA or district")
cad_name = models.CharField(max_length=60, blank=True, default='')
cad_code = models.CharField(max_length=16, blank=True, default='')
shape_leng = models.FloatField()
shape_area = models.FloatField()
geom = models.MultiPolygonField(srid=4326)
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
name = models.CharField(max_length=60)
code = models.CharField(max_length=16)
objects = models.GeoManager()
class Meta:
ordering = ['level', 'name']
def __str__(self):
return "%s %s" % (self.get_level_display(), self.name)
@property
def centroid(self):
return self.geom.centroid
| 36.335749 | 98 | 0.596379 | 44,143 | 0.978152 | 0 | 0 | 1,185 | 0.026258 | 0 | 0 | 12,039 | 0.266769 |
4fb19360602138faa0d22fba8d442fb8c1895535 | 1,792 | py | Python | accounts/models.py | barissaslan/eventhub | 37aa005b3f2eab9a2c6c48d30b2f7f4483fa6749 | [
"MIT"
] | 4 | 2017-11-13T19:51:25.000Z | 2020-12-08T17:19:31.000Z | accounts/models.py | barissaslan/eventhub | 37aa005b3f2eab9a2c6c48d30b2f7f4483fa6749 | [
"MIT"
] | null | null | null | accounts/models.py | barissaslan/eventhub | 37aa005b3f2eab9a2c6c48d30b2f7f4483fa6749 | [
"MIT"
] | 3 | 2018-05-19T08:37:42.000Z | 2020-12-08T17:19:34.000Z | from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from event.models import Event
class UserManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(
email,
password=password,
)
user.is_admin = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
first_name = models.CharField(max_length=255, blank=True, null=True)
last_name = models.CharField(max_length=255, blank=True, null=True)
cell_phone = models.CharField(max_length=30, blank=True, null=True)
date_of_birth = models.DateField(blank=True, null=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
def get_full_name(self):
if self.first_name and self.last_name:
return "{} {}".format(self.first_name, self.last_name)
else:
return self.email
def get_short_name(self):
return self.first_name
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.is_admin
| 25.971014 | 72 | 0.648996 | 1,652 | 0.921875 | 0 | 0 | 62 | 0.034598 | 0 | 0 | 63 | 0.035156 |
4fb1eb801d7587deecd0cef69ce7045da334f1cc | 161 | py | Python | tracer/test.py | leopiney/tscf | d98fbfe06abbf1d29458ddd147b7f1d99118e4ed | [
"MIT"
] | null | null | null | tracer/test.py | leopiney/tscf | d98fbfe06abbf1d29458ddd147b7f1d99118e4ed | [
"MIT"
] | null | null | null | tracer/test.py | leopiney/tscf | d98fbfe06abbf1d29458ddd147b7f1d99118e4ed | [
"MIT"
] | null | null | null | import numpy as np
from scipy.optimize import linear_sum_assignment
np.random.seed(0)
c = np.random.rand(128, 128)
row_ind, col_ind = linear_sum_assignment(c)
| 20.125 | 48 | 0.78882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4fb2d5f6091ab479c98c350959da092722caf376 | 287 | py | Python | Exercicios/matriz.py | beatrizflorenccio/Projects-Python | fc584167a2816dc89f22baef0fa0f780af796c98 | [
"MIT"
] | 1 | 2021-10-10T08:18:45.000Z | 2021-10-10T08:18:45.000Z | Exercicios/matriz.py | beatrizflorenccio/Projects-Python | fc584167a2816dc89f22baef0fa0f780af796c98 | [
"MIT"
] | null | null | null | Exercicios/matriz.py | beatrizflorenccio/Projects-Python | fc584167a2816dc89f22baef0fa0f780af796c98 | [
"MIT"
] | null | null | null | #MaBe
matriz = [[0, 0, 0], [0, 0, 0,], [0, 0, 0]]
for l in range(0, 3):
for c in range(0,3):
matriz[l][c] = int(input(f'Digite o valor da posição {(c, l)}: '))
for obj in range(0, 3):
for i in range(0, 3):
print(f'[{matriz[obj][i]}]', end=' ')
print()
| 22.076923 | 74 | 0.477352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.242215 |
4fb2debb7f9b472451fb47af6368ed89d51ef079 | 968 | py | Python | iminuit/__init__.py | danielbrener/iminuit | e6d1cbc3d4d51e3556dbf95b569b62d6d9c38ad1 | [
"MIT"
] | 1 | 2018-10-02T14:52:37.000Z | 2018-10-02T14:52:37.000Z | iminuit/__init__.py | danielbrener/iminuit | e6d1cbc3d4d51e3556dbf95b569b62d6d9c38ad1 | [
"MIT"
] | null | null | null | iminuit/__init__.py | danielbrener/iminuit | e6d1cbc3d4d51e3556dbf95b569b62d6d9c38ad1 | [
"MIT"
] | null | null | null | """MINUIT from Python - Fitting like a boss
Basic usage example::
from iminuit import Minuit
def f(x, y, z):
return (x - 2) ** 2 + (y - 3) ** 2 + (z - 4) ** 2
m = Minuit(f)
m.migrad()
print(m.values) # {'x': 2,'y': 3,'z': 4}
print(m.errors) # {'x': 1,'y': 1,'z': 1}
Further information:
* Code: https://github.com/iminuit/iminuit
* Docs: https://iminuit.readthedocs.io
"""
__all__ = [
'Minuit',
'minimize',
'describe',
'Struct',
'__version__',
'test',
]
from ._libiminuit import Minuit
from ._minimize import minimize
from .util import describe, Struct
from .info import __version__
def test(args=None):
"""Execute the iminuit tests.
Requires pytest.
From the command line:
python -c 'import iminuit; iminuit.test()
"""
# http://pytest.org/latest/usage.html#calling-pytest-from-python-code
import pytest
args = ['-v', '--pyargs', 'iminuit']
pytest.main(args)
| 20.595745 | 73 | 0.599174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.717975 |
4fb3676c0934334a2485cd800af36848f5e51ef3 | 7,241 | py | Python | opendata_module/opmon_opendata/api/postgresql_manager.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
] | 2 | 2021-06-30T11:12:31.000Z | 2021-09-24T08:50:03.000Z | opendata_module/opmon_opendata/api/postgresql_manager.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
] | null | null | null | opendata_module/opmon_opendata/api/postgresql_manager.py | nordic-institute/X-Road-Metrics | 249d859466bf6065257cf8b3c27d0e9db4ab2378 | [
"MIT"
] | 2 | 2021-07-02T12:31:37.000Z | 2021-11-09T08:44:09.000Z | # The MIT License
# Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS)
# Copyright (c) 2017-2020 Estonian Information System Authority (RIA)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import psycopg2 as pg
from dateutil import relativedelta
class PostgreSQL_Manager(object):
def __init__(self, settings):
self._settings = settings['postgres']
self._table_name = settings['postgres']['table-name']
self._connection_string = self._get_connection_string()
self._field_name_map = self._get_field_name_map(settings['opendata']['field-descriptions'].keys())
self._logs_time_buffer = relativedelta.relativedelta(days=settings['opendata']['delay-days'])
def get_column_names_and_types(self):
with pg.connect(self._connection_string) as connection:
cursor = connection.cursor()
cursor.execute("SELECT column_name,data_type FROM information_schema.columns WHERE table_name = %s;",
(self._table_name,))
data = cursor.fetchall()
return [(self._field_name_map[name], type_) for name, type_ in data]
def get_data(self, constraints=None, order_by=None, columns=None, limit=None):
with pg.connect(self._connection_string) as connection:
cursor = connection.cursor()
subquery_name = 'T'
selected_columns_str = self._get_selected_columns_string(columns, subquery_name)
request_in_date_constraint_str, other_constraints_str = self._get_constraints_string(cursor, constraints,
subquery_name)
order_by_str = self._get_order_by_string(order_by, subquery_name)
limit_str = self._get_limit_string(cursor, limit)
cursor.execute(
("SELECT {selected_columns} FROM (SELECT * "
"FROM {table_name} {request_in_date_constraint}) as {subquery_name} {other_constraints}"
"{order_by} {limit};").format(
**{
'selected_columns': selected_columns_str,
'table_name': self._table_name,
'request_in_date_constraint': request_in_date_constraint_str,
'other_constraints': other_constraints_str,
'order_by': order_by_str,
'limit': limit_str,
'subquery_name': subquery_name}
)
)
data = cursor.fetchall()
return data
def get_min_and_max_dates(self):
with pg.connect(self._connection_string) as connection:
cursor = connection.cursor()
cursor.execute('SELECT min(requestindate), max(requestindate) FROM ' + self._table_name)
min_and_max = [date - self._logs_time_buffer for date in cursor.fetchone()]
return min_and_max
def _get_connection_string(self):
args = [
f"host={self._settings['host']}",
f"dbname={self._settings['database-name']}"
]
optional_settings = {key: self._settings.get(key) for key in ['port', 'user', 'password']}
optional_args = [f"{key}={value}" if value else "" for key, value in optional_settings.items()]
return ' '.join(args + optional_args)
def _get_database_settings(self, config):
settings = {'host_address': config['writer']['host_address'],
'port': config['writer']['port'],
'database_name': config['writer']['database_name'],
'user': config['writer']['user'],
'password': config['writer']['password']}
return settings
def _get_field_name_map(self, field_names):
return {field_name.lower(): field_name for field_name in field_names}
def _get_constraints_string(self, cursor, constraints, subquery_name):
if not constraints:
return ''
request_in_date_constraint = None
other_constraint_parts = []
for constraint in constraints:
if constraint['column'] != 'requestInDate':
if constraint['value'] == 'None':
null_constraint = 'IS NULL' if constraint['operator'] == '=' else 'IS NOT NULL'
other_constraint_parts.append("{subquery_name}.{column} {null_constraint}".format(**{
'column': constraint['column'],
'null_constraint': null_constraint,
'subquery_name': subquery_name
}))
else:
other_constraint_parts.append(cursor.mogrify("{subquery_name}.{column} {operator} %s".format(**{
'column': constraint['column'].lower(),
'operator': constraint['operator'],
'subquery_name': subquery_name
}), (constraint['value'],)).decode('utf8'))
else:
request_in_date_constraint = 'WHERE ' + cursor.mogrify("{column} {operator} %s".format(**{
'column': constraint['column'].lower(),
'operator': constraint['operator']
}), (constraint['value'],)).decode('utf8')
other_constraints = ('WHERE ' + ' AND '.join(other_constraint_parts)) if other_constraint_parts else ''
return request_in_date_constraint, other_constraints
def _get_selected_columns_string(self, columns, subquery_name):
if not columns:
return '*'
else:
return ', '.join('{0}.{1}'.format(subquery_name, column.lower()) for column in columns)
def _get_order_by_string(self, order_by, subquery_name):
if not order_by:
return ''
return 'ORDER BY ' + ', '.join('{subquery_name}.{column} {order}'.format(**{
'subquery_name': subquery_name,
'column': clause['column'],
'order': clause['order']
}) for clause in order_by)
def _get_limit_string(self, cursor, limit):
return cursor.mogrify("LIMIT %s", (limit,)).decode('utf8')
| 46.121019 | 117 | 0.615523 | 5,940 | 0.820329 | 0 | 0 | 0 | 0 | 0 | 0 | 2,460 | 0.339732 |
4fb3d07bcc344ebefd9f84df21d8880d53f7089f | 3,456 | py | Python | koapy/backtrader/KrxHistoricalDailyPriceDataFromSQLite.py | resoliwan/koapy | b0616f252bb3588695dfb37c7d9b8580a65649a3 | [
"MIT"
] | 1 | 2021-09-25T22:33:01.000Z | 2021-09-25T22:33:01.000Z | koapy/backtrader/KrxHistoricalDailyPriceDataFromSQLite.py | resoliwan/koapy | b0616f252bb3588695dfb37c7d9b8580a65649a3 | [
"MIT"
] | null | null | null | koapy/backtrader/KrxHistoricalDailyPriceDataFromSQLite.py | resoliwan/koapy | b0616f252bb3588695dfb37c7d9b8580a65649a3 | [
"MIT"
] | 1 | 2021-11-12T15:33:29.000Z | 2021-11-12T15:33:29.000Z | import pandas as pd
from backtrader import TimeFrame, date2num
from sqlalchemy import create_engine, inspect
from tqdm import tqdm
from koapy.backtrader.SQLiteData import SQLiteData
from koapy.utils.data.KrxHistoricalDailyPriceDataForBacktestLoader import (
KrxHistoricalDailyPriceDataForBacktestLoader,
)
class KrxHistoricalDailyPriceDataFromSQLite(SQLiteData):
# pylint: disable=no-member
params = (
("engine", None),
("symbol", None),
("name", None),
("fromdate", None),
("todate", None),
("compression", 1),
("timeframe", TimeFrame.Days),
("calendar", None),
("timestampcolumn", 0),
("timestampcolumntimezone", None),
("lazy", False),
)
lines = (
"amount",
"marketcap",
"shares",
)
def __init__(self):
assert self.p.timeframe == TimeFrame.Days
assert self.p.compression == 1
self.p.tablename = self.p.tablename or self.p.symbol or None
self.p.name = self.p.name or self.p.symbol or self.p.tablename or ""
super().__init__()
def _load(self):
if self._cursor is None:
return False
try:
date, open_, high, low, close, volume, amount, marcap, shares = next(
self._cursor
)
except StopIteration:
return False
else:
dt = pd.Timestamp(date)
self.lines.datetime[0] = date2num(dt)
self.lines.open[0] = open_
self.lines.high[0] = high
self.lines.low[0] = low
self.lines.close[0] = close
self.lines.volume[0] = volume
self.lines.openinterest[0] = 0.0
self.lines.amount[0] = amount
self.lines.marketcap[0] = marcap
self.lines.shares[0] = shares
return True
@classmethod
def dump_from_store(
cls,
source_filename,
dest_filename,
symbols=None,
fromdate=None,
todate=None,
progress_bar=True,
):
loader = KrxHistoricalDailyPriceDataForBacktestLoader(source_filename)
if symbols is None:
symbols = loader.get_symbols()
engine = create_engine("sqlite:///" + dest_filename)
progress = tqdm(symbols, disable=not progress_bar)
for symbol in progress:
progress.set_description("Dumping Symbol [%s]" % symbol)
data = loader.load(symbol, start_time=fromdate, end_time=todate)
data.to_sql(symbol, engine, if_exists="replace")
@classmethod
def adddata_fromfile(
cls,
cerebro,
filename,
symbols=None,
fromdate=None,
todate=None,
progress_bar=True,
):
engine = create_engine("sqlite:///" + filename)
inspector = inspect(engine)
if symbols is None:
symbols = inspector.get_table_names()
progress = tqdm(symbols, disable=not progress_bar)
for symbol in progress:
progress.set_description("Adding Symbol [%s]" % symbol)
# pylint: disable=unexpected-keyword-arg
data = cls(
engine=engine,
tablename=symbol,
fromdate=fromdate,
todate=todate,
symbol=symbol,
name=symbol,
)
cerebro.adddata(data, name=data.p.name)
| 28.8 | 81 | 0.57147 | 3,141 | 0.908854 | 0 | 0 | 1,556 | 0.450231 | 0 | 0 | 292 | 0.084491 |
4fb59e361ec35f6e244383c444a57733db66dc29 | 13,207 | py | Python | cid/parser/pre_processing.py | zeljko-bal/CID | 52ecc445c441ec63386c9f092b226090588a3789 | [
"MIT"
] | 1 | 2017-09-15T06:14:54.000Z | 2017-09-15T06:14:54.000Z | cid/parser/pre_processing.py | zeljko-bal/CID | 52ecc445c441ec63386c9f092b226090588a3789 | [
"MIT"
] | null | null | null | cid/parser/pre_processing.py | zeljko-bal/CID | 52ecc445c441ec63386c9f092b226090588a3789 | [
"MIT"
] | null | null | null | from textx.exceptions import TextXSemanticError
from cid.parser.model import ParameterCliValue, BoolWithPositivePattern
from cid.common.utils import get_cli_pattern_count, is_iterable, element_type
# ------------------------------- HELPER FUNCTIONS -------------------------------
def contains_duplicate_names(lst):
defined = [e.name for e in lst if not hasattr(e, 'imported') and not hasattr(e, 'local')]
local = [e.local for e in lst if hasattr(e, 'local') and e.local]
imported = [e.imported for e in lst if hasattr(e, 'imported') and e.imported]
return len(defined) != len(set(defined)) or len(local) != len(set(local)) or len(imported) != len(set(imported))
def split_import_path(import_path):
return './' + ('/'.join(import_path.elements[:-1])) + '.cid', import_path.elements[-1]
def import_reference_path(ref):
return '/' + '/'.join(ref.elements)
# ------------------------------- PRE PROCESSING -------------------------------
def process_script(script):
# check for duplicate free parameter names
script.free_parameters = [parameter for parameter in script.elements if element_type(parameter) == 'Parameter']
if contains_duplicate_names(script.free_parameters):
raise TextXSemanticError("Found duplicate free parameter names.")
# check for duplicate free command names
script.free_commands = [command for command in script.elements if element_type(command) == 'Command']
if contains_duplicate_names(script.free_commands):
raise TextXSemanticError("Found duplicate free command names.")
# check for duplicate import paths
if len(script.imports) != len(set([imp.path for imp in script.imports])):
raise TextXSemanticError("Found duplicate import paths.")
# check for duplicate import aliases
if len(script.imports) != len(set([imp.alias for imp in script.imports])):
raise TextXSemanticError("Found duplicate import aliases.")
# -------------------------------
def process_import_statement(import_statement):
if not import_statement.alias:
import_statement.alias = import_statement.path
import_statement.alias = import_reference_path(import_statement.alias)
import_statement.file_path, import_statement.element_name = split_import_path(import_statement.path)
# -------------------------------
def process_import_reference(import_reference):
if import_reference.imported:
import_reference.imported = import_reference_path(import_reference.imported)
# -------------------------------
def process_command(command):
"""
Model structure changes:
del command.usage
"""
# command.usages = all usages
if command.usages:
command.usages = [usage.body for usage in command.usages]
elif command.usage:
command.usages = [command.usage]
del command.usage
command.description = ' '.join(command.description.split()) # reduce excess white space
command.help = ' '.join(command.help.split()) # reduce excess white space
# defaults --------------
if not command.title:
command.title = command.name.replace('_', ' ').replace('-', ' ').strip().title()
if not command.cli_command:
command.cli_command = command.name
# additional checks --------------
if contains_duplicate_names(command.parameters):
raise TextXSemanticError("Found parameters with duplicate names in command: '{}'".format(command.name))
if contains_duplicate_names(command.sub_commands):
raise TextXSemanticError("Found sub commands with duplicate names in command: '{}'".format(command.name))
# -------------------------------
def process_parameter(parameter):
"""
Model structure changes:
add parameter.nonpositional
fix parameter.default
add parameter.all_patterns
add parameter.cli_pattern_vars
add parameter.cli_pattern_count
del parameter.empty_str_disallowed
add parameter.none_allowed
del parameter.default_is_none
Checks performed: TODO
Model changes: TODO
"""
# set default bool cli pattern
if parameter.type == 'Bool' and not parameter.cli:
parameter.cli = ParameterCliValue(BoolWithPositivePattern('--{name}'.format(name=parameter.name)))
# set parameter.nonpositional
parameter.nonpositional = parameter.cli and parameter.cli.cli_pattern
# fix parameter.default model structure
if len(parameter.default) == 0:
parameter.default = None
elif len(parameter.default) == 1:
parameter.default = parameter.default[0]
if parameter.nonpositional:
# set parameter.all_patterns
parameter.cli.cli_pattern.parent = parameter
parameter.all_patterns = [parameter.cli.cli_pattern] + parameter.cli_aliases
# set parameter.cli_pattern_count
parameter.cli_pattern_count = get_cli_pattern_count(parameter.all_patterns[0])
# all_patterns
for pattern in parameter.all_patterns:
if hasattr(pattern, 'vars') and pattern.vars:
# transform vars into a list of strings
pattern.vars = [v.value for v in pattern.vars]
# set pattern.count
pattern.count = len(pattern.vars)
# set parameter.cli_pattern_vars
if not hasattr(parameter, 'cli_pattern_vars'):
parameter.cli_pattern_vars = pattern.vars
else:
if not (len(parameter.cli_pattern_vars) == len(pattern.vars) and
all([parameter.cli_pattern_vars[i] == pattern.vars[i] for i in range(0, len(pattern.vars))])):
raise TextXSemanticError("Different argument names found for patterns in parameter: '{}'".format(parameter.name))
# StringParamPattern checks
if element_type(pattern) == "StringParamPattern":
if parameter.type == "Bool":
raise TextXSemanticError("Non boolean cli pattern in Bool type parameter: '{}'.".format(parameter.name))
if pattern.count_char and not parameter.type == "Num":
raise TextXSemanticError("Counter pattern in non Num type parameter: '{}'.".format(parameter.name))
if parameter.cli_pattern_count != get_cli_pattern_count(pattern):
raise TextXSemanticError("Different parameter count values encountered in cli patterns for parameter: '{}'".format(parameter.name))
elif element_type(pattern) in ['BoolWithPositivePattern', 'BoolNegativeOnlyPattern'] and not parameter.type == "Bool":
raise TextXSemanticError("Boolean cli pattern in non Bool type parameter: '{}'.".format(parameter.name))
else:
parameter.cli_pattern_count = 1
# empty_str_allowed
if (parameter.empty_str_allowed or parameter.empty_str_disallowed) and parameter.type != 'Str':
raise TextXSemanticError("Found empty_str_allowed or empty_str_disallowed in non Str parameter: '{}'".format(parameter.name))
if parameter.default == '' and parameter.empty_str_disallowed:
raise TextXSemanticError("Found empty_str_disallowed and default value is an empty string for parameter: '{}'.".format(parameter.name))
del parameter.empty_str_disallowed
# title
if not parameter.title:
parameter.title = parameter.name.replace('_', ' ').replace('-', ' ').strip().title()
# multiplicity
if not parameter.multiplicity:
parameter.multiplicity = 1
if parameter.multiplicity != '*' and parameter.multiplicity <= 0:
raise TextXSemanticError("Multiplicity must be greater than zero for: '{}'.".format(parameter.name))
if not parameter.nonpositional and parameter.multiplicity not in [1, '*']:
raise TextXSemanticError("Multiplicity for positional parameters must be either 1 or '*': '{}'.".format(parameter.name))
if not parameter.multiplicity == 1 and parameter.type == "Bool":
raise TextXSemanticError("Multiplicity for Bool type parameters must be 1: '{}'.".format(parameter.name))
# help
parameter.help = ' '.join(parameter.help.split()) # reduce excess white space
# description
parameter.description = ' '.join(parameter.description.split()) # reduce excess white space
if not parameter.description:
parameter.description = '{default_desc}'
# default
if parameter.default_is_none:
if parameter.type == 'Bool':
raise TextXSemanticError("Found default_is_none and parameter type is 'Bool': '{}'".format(parameter.name))
if parameter.default:
raise TextXSemanticError("Found default_is_none and parameter has a default defined: '{}'.".format(parameter.name))
if not parameter.default:
if parameter.default_is_none:
parameter.default = None
else:
if parameter.type == 'Bool':
# if parameter doesnt contain both positive and negative patterns
if not ([p for p in parameter.all_patterns if p.positive] and [p for p in parameter.all_patterns if p.negative]):
# set to False by default
parameter.default = 'False'
# else: leave None (for a case where neither positive nor negative arg is provided)
del parameter.default_is_none
if parameter.default:
if parameter.cli_pattern_count not in [1, '*']:
if not is_iterable(parameter.default) or len(parameter.default) != parameter.cli_pattern_count:
raise TextXSemanticError("Parameter '{}' with {} values must have that many default values defined.".format(parameter.name, parameter.cli_pattern_count))
else:
if is_iterable(parameter.default):
raise TextXSemanticError("Parameter '{}' should only have a single default value.".format(parameter.name))
if parameter.default == '':
parameter.empty_str_allowed = True
if parameter.nonpositional and parameter.default is not None:
if parameter.cli_pattern_count not in [1, '*']:
if not isinstance(parameter.default, list):
parameter.default = [parameter.default] * parameter.cli_pattern_count
elif len(parameter.default) != parameter.cli_pattern_count:
raise TextXSemanticError("Parameter pattern count and default values count do not match: '{}'.".format(parameter.name))
if parameter.type == 'Bool':
if parameter.default and parameter.default.lower() not in ['true', 'false']:
raise TextXSemanticError("Default value is not true or false and parameter type is 'Bool': '{}'".format(parameter.name))
# add parameter.none_allowed
parameter.none_allowed = parameter.default is None or [p for p in parameter.all_patterns if p.positive] and [p for p in parameter.all_patterns if p.negative]
# date_format
if not parameter.date_format and parameter.type == 'Date':
parameter.date_format = "dd.MM.yyyy"
# choices
if parameter.choices and not parameter.type == 'Choice':
raise TextXSemanticError("Choices found in non 'Choice' parameter: '{}'.".format(parameter.name))
if parameter.type == 'Choice' and not parameter.choices:
raise TextXSemanticError("Choices are required in 'Choice' parameter: '{}'".format(parameter.name))
# constraints
for constraint in parameter.constraints:
supported_constraints = {
'Str': ['LengthConstraint', 'StringFlagConstraint', 'RegexConstraint', 'CodeConstraint'],
'Choice': ['CodeConstraint'],
'Num': ['NumericValueConstraint', 'NumberFlagConstraint', 'CodeConstraint'],
'Bool': ['CodeConstraint'],
'Date': ['DateConstraint', 'CodeConstraint'],
'File': ['FileFlagConstraint', 'CodeConstraint', 'RegexConstraint'],
}[parameter.type]
if element_type(constraint) not in supported_constraints:
raise TextXSemanticError("Constraint type '{}' is unsupported for parameter type '{}': '{}'.".format(element_type(constraint), parameter.type, parameter.name))
# -------------------------------
def process_cli_or_group(or_group):
# transform the tree structure into a list
or_group.elements = [or_group.lhs]
if element_type(or_group.rhs) == 'CliOrGroup':
or_group.elements += or_group.rhs.elements
for el in or_group.rhs.elements:
el.parent = or_group
else:
or_group.elements.append(or_group.rhs)
del or_group.lhs
del or_group.rhs
# check for CliOptionalGroup in CliOrGroup
for element in or_group.elements:
if element_type(element) == 'CliOptionalGroup':
print('warning: CliOptionalGroup in CliOrGroup')
# -------------------------------
object_processors = {
'Script': process_script,
'ImportStatement': process_import_statement,
'ParameterReference': process_import_reference,
'CommandReference': process_import_reference,
'Command': process_command,
'Parameter': process_parameter,
'CliOrGroup': process_cli_or_group,
}
| 43.160131 | 171 | 0.662755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,107 | 0.310971 |
4fb887e16c8e670c35cf19aaf804300bc4ca22e4 | 40,372 | py | Python | jsfuzz/fuzzer/grammarinator_deps/ECMAScriptUnparser.py | gustavopinto/entente | 19b65d8cafd77c198c9c441f4f5e01503360309b | [
"BSD-2-Clause"
] | 5 | 2018-03-20T21:53:38.000Z | 2018-12-28T21:08:47.000Z | jsfuzz/fuzzer/grammarinator_deps/ECMAScriptUnparser.py | gustavopinto/entente | 19b65d8cafd77c198c9c441f4f5e01503360309b | [
"BSD-2-Clause"
] | 14 | 2018-04-09T20:16:00.000Z | 2019-06-11T12:31:10.000Z | jsfuzz/fuzzer/grammarinator_deps/ECMAScriptUnparser.py | gustavopinto/entente | 19b65d8cafd77c198c9c441f4f5e01503360309b | [
"BSD-2-Clause"
] | 12 | 2018-04-06T00:52:24.000Z | 2018-07-10T19:44:16.000Z | # Generated by Grammarinator 17.7
from itertools import chain
from grammarinator.runtime import *
import ECMAScriptUnlexer
class ECMAScriptUnparser(Grammarinator):
def __init__(self, unlexer):
super(ECMAScriptUnparser, self).__init__()
self.unlexer = unlexer
self.set_options()
@depthcontrol
def program(self):
current = self.create_node(UnparserRule(name='program'))
if self.unlexer.max_depth >= 4:
for _ in self.zero_or_one():
current += self.sourceElements()
current += self.unlexer.EOF()
return current
program.min_depth = 1
@depthcontrol
def sourceElements(self):
current = self.create_node(UnparserRule(name='sourceElements'))
if self.unlexer.max_depth >= 0:
for _ in self.one_or_more():
current += self.sourceElement()
return current
sourceElements.min_depth = 3
@depthcontrol
def sourceElement(self):
current = self.create_node(UnparserRule(name='sourceElement'))
choice = self.choice([0 if [2, 3][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1])])
if choice == 0:
current += self.statement()
elif choice == 1:
current += self.functionDeclaration()
return current
sourceElement.min_depth = 2
@depthcontrol
def statement(self):
current = self.create_node(UnparserRule(name='statement'))
choice = self.choice([0 if [1, 5, 2, 4, 4, 3, 2, 2, 2, 4, 3, 4, 4, 3, 2][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])])
if choice == 0:
current += self.block()
elif choice == 1:
current += self.variableStatement()
elif choice == 2:
current += self.emptyStatement()
elif choice == 3:
current += self.expressionStatement()
elif choice == 4:
current += self.ifStatement()
elif choice == 5:
current += self.iterationStatement()
elif choice == 6:
current += self.continueStatement()
elif choice == 7:
current += self.breakStatement()
elif choice == 8:
current += self.returnStatement()
elif choice == 9:
current += self.withStatement()
elif choice == 10:
current += self.labelledStatement()
elif choice == 11:
current += self.switchStatement()
elif choice == 12:
current += self.throwStatement()
elif choice == 13:
current += self.tryStatement()
elif choice == 14:
current += self.debuggerStatement()
return current
statement.min_depth = 1
@depthcontrol
def block(self):
current = self.create_node(UnparserRule(name='block'))
current += self.create_node(UnlexerRule(src='{'))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.statementList()
current += self.create_node(UnlexerRule(src='}'))
return current
block.min_depth = 0
@depthcontrol
def statementList(self):
current = self.create_node(UnparserRule(name='statementList'))
if self.unlexer.max_depth >= 0:
for _ in self.one_or_more():
current += self.statement()
return current
statementList.min_depth = 2
@depthcontrol
def variableStatement(self):
current = self.create_node(UnparserRule(name='variableStatement'))
current += self.unlexer.Var()
current += self.variableDeclarationList()
current += self.eos()
return current
variableStatement.min_depth = 4
@depthcontrol
def variableDeclarationList(self):
current = self.create_node(UnparserRule(name='variableDeclarationList'))
current += self.variableDeclaration()
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_more():
current += self.create_node(UnlexerRule(src=','))
current += self.variableDeclaration()
return current
variableDeclarationList.min_depth = 3
@depthcontrol
def variableDeclaration(self):
current = self.create_node(UnparserRule(name='variableDeclaration'))
current += self.unlexer.Identifier()
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.initialiser()
return current
variableDeclaration.min_depth = 2
@depthcontrol
def initialiser(self):
current = self.create_node(UnparserRule(name='initialiser'))
current += self.create_node(UnlexerRule(src='='))
current += self.singleExpression()
return current
initialiser.min_depth = 2
@depthcontrol
def emptyStatement(self):
current = self.create_node(UnparserRule(name='emptyStatement'))
current += self.unlexer.SemiColon()
return current
emptyStatement.min_depth = 1
@depthcontrol
def expressionStatement(self):
current = self.create_node(UnparserRule(name='expressionStatement'))
current += self.expressionSequence()
current += self.eos()
return current
expressionStatement.min_depth = 3
@depthcontrol
def ifStatement(self):
current = self.create_node(UnparserRule(name='ifStatement'))
current += self.unlexer.If()
current += self.create_node(UnlexerRule(src='('))
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.statement()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_one():
current += self.unlexer.Else()
current += self.statement()
return current
ifStatement.min_depth = 3
@depthcontrol
def iterationStatement(self):
current = self.create_node(UnparserRule(name='iterationStatement'))
choice = self.choice([0 if [3, 3, 2, 4, 3, 3][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1, 1, 1])])
if choice == 0:
current += self.unlexer.Do()
current += self.statement()
current += self.unlexer.While()
current += self.create_node(UnlexerRule(src='('))
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.eos()
elif choice == 1:
current += self.unlexer.While()
current += self.create_node(UnlexerRule(src='('))
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.statement()
elif choice == 2:
current += self.unlexer.For()
current += self.create_node(UnlexerRule(src='('))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=';'))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=';'))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.statement()
elif choice == 3:
current += self.unlexer.For()
current += self.create_node(UnlexerRule(src='('))
current += self.unlexer.Var()
current += self.variableDeclarationList()
current += self.create_node(UnlexerRule(src=';'))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=';'))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.statement()
elif choice == 4:
current += self.unlexer.For()
current += self.create_node(UnlexerRule(src='('))
current += self.singleExpression()
current += self.unlexer.In()
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.statement()
elif choice == 5:
current += self.unlexer.For()
current += self.create_node(UnlexerRule(src='('))
current += self.unlexer.Var()
current += self.variableDeclaration()
current += self.unlexer.In()
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.statement()
return current
iterationStatement.min_depth = 2
@depthcontrol
def continueStatement(self):
current = self.create_node(UnparserRule(name='continueStatement'))
current += self.unlexer.Continue()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_one():
current += self.unlexer.Identifier()
current += self.eos()
return current
continueStatement.min_depth = 1
@depthcontrol
def breakStatement(self):
current = self.create_node(UnparserRule(name='breakStatement'))
current += self.unlexer.Break()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_one():
current += self.unlexer.Identifier()
current += self.eos()
return current
breakStatement.min_depth = 1
@depthcontrol
def returnStatement(self):
current = self.create_node(UnparserRule(name='returnStatement'))
current += self.unlexer.Return()
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.expressionSequence()
current += self.eos()
return current
returnStatement.min_depth = 1
@depthcontrol
def withStatement(self):
current = self.create_node(UnparserRule(name='withStatement'))
current += self.unlexer.With()
current += self.create_node(UnlexerRule(src='('))
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.statement()
return current
withStatement.min_depth = 3
@depthcontrol
def switchStatement(self):
current = self.create_node(UnparserRule(name='switchStatement'))
current += self.unlexer.Switch()
current += self.create_node(UnlexerRule(src='('))
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
current += self.caseBlock()
return current
switchStatement.min_depth = 3
@depthcontrol
def caseBlock(self):
current = self.create_node(UnparserRule(name='caseBlock'))
current += self.create_node(UnlexerRule(src='{'))
if self.unlexer.max_depth >= 5:
for _ in self.zero_or_one():
current += self.caseClauses()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_one():
current += self.defaultClause()
if self.unlexer.max_depth >= 5:
for _ in self.zero_or_one():
current += self.caseClauses()
current += self.create_node(UnlexerRule(src='}'))
return current
caseBlock.min_depth = 0
@depthcontrol
def caseClauses(self):
current = self.create_node(UnparserRule(name='caseClauses'))
if self.unlexer.max_depth >= 0:
for _ in self.one_or_more():
current += self.caseClause()
return current
caseClauses.min_depth = 4
@depthcontrol
def caseClause(self):
current = self.create_node(UnparserRule(name='caseClause'))
current += self.unlexer.Case()
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=':'))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.statementList()
return current
caseClause.min_depth = 3
@depthcontrol
def defaultClause(self):
current = self.create_node(UnparserRule(name='defaultClause'))
current += self.unlexer.Default()
current += self.create_node(UnlexerRule(src=':'))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.statementList()
return current
defaultClause.min_depth = 1
@depthcontrol
def labelledStatement(self):
current = self.create_node(UnparserRule(name='labelledStatement'))
current += self.unlexer.Identifier()
current += self.create_node(UnlexerRule(src=':'))
current += self.statement()
return current
labelledStatement.min_depth = 2
@depthcontrol
def throwStatement(self):
current = self.create_node(UnparserRule(name='throwStatement'))
current += self.unlexer.Throw()
current += self.expressionSequence()
current += self.eos()
return current
throwStatement.min_depth = 3
@depthcontrol
def tryStatement(self):
current = self.create_node(UnparserRule(name='tryStatement'))
choice = self.choice([0 if [3, 2, 3][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1])])
if choice == 0:
current += self.unlexer.Try()
current += self.block()
current += self.catchProduction()
elif choice == 1:
current += self.unlexer.Try()
current += self.block()
current += self.finallyProduction()
elif choice == 2:
current += self.unlexer.Try()
current += self.block()
current += self.catchProduction()
current += self.finallyProduction()
return current
tryStatement.min_depth = 2
@depthcontrol
def catchProduction(self):
current = self.create_node(UnparserRule(name='catchProduction'))
current += self.unlexer.Catch()
current += self.create_node(UnlexerRule(src='('))
current += self.unlexer.Identifier()
current += self.create_node(UnlexerRule(src=')'))
current += self.block()
return current
catchProduction.min_depth = 2
@depthcontrol
def finallyProduction(self):
current = self.create_node(UnparserRule(name='finallyProduction'))
current += self.unlexer.Finally()
current += self.block()
return current
finallyProduction.min_depth = 1
@depthcontrol
def debuggerStatement(self):
current = self.create_node(UnparserRule(name='debuggerStatement'))
current += self.unlexer.Debugger()
current += self.eos()
return current
debuggerStatement.min_depth = 1
@depthcontrol
def functionDeclaration(self):
current = self.create_node(UnparserRule(name='functionDeclaration'))
current += self.unlexer.Function()
current += self.unlexer.Identifier()
current += self.create_node(UnlexerRule(src='('))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.formalParameterList()
current += self.create_node(UnlexerRule(src=')'))
current += self.create_node(UnlexerRule(src='{'))
current += self.functionBody()
current += self.create_node(UnlexerRule(src='}'))
return current
functionDeclaration.min_depth = 2
@depthcontrol
def formalParameterList(self):
current = self.create_node(UnparserRule(name='formalParameterList'))
current += self.unlexer.Identifier()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_more():
current += self.create_node(UnlexerRule(src=','))
current += self.unlexer.Identifier()
return current
formalParameterList.min_depth = 2
@depthcontrol
def functionBody(self):
current = self.create_node(UnparserRule(name='functionBody'))
if self.unlexer.max_depth >= 4:
for _ in self.zero_or_one():
current += self.sourceElements()
return current
functionBody.min_depth = 0
@depthcontrol
def arrayLiteral(self):
current = self.create_node(UnparserRule(name='arrayLiteral'))
current += self.create_node(UnlexerRule(src='['))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.elementList()
if self.unlexer.max_depth >= 0:
for _ in self.zero_or_one():
current += self.create_node(UnlexerRule(src=','))
if self.unlexer.max_depth >= 1:
for _ in self.zero_or_one():
current += self.elision()
current += self.create_node(UnlexerRule(src=']'))
return current
arrayLiteral.min_depth = 0
@depthcontrol
def elementList(self):
current = self.create_node(UnparserRule(name='elementList'))
if self.unlexer.max_depth >= 1:
for _ in self.zero_or_one():
current += self.elision()
current += self.singleExpression()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_more():
current += self.create_node(UnlexerRule(src=','))
if self.unlexer.max_depth >= 1:
for _ in self.zero_or_one():
current += self.elision()
current += self.singleExpression()
return current
elementList.min_depth = 2
@depthcontrol
def elision(self):
current = self.create_node(UnparserRule(name='elision'))
if self.unlexer.max_depth >= 0:
for _ in self.one_or_more():
current += self.create_node(UnlexerRule(src=','))
return current
elision.min_depth = 0
@depthcontrol
def objectLiteral(self):
current = self.create_node(UnparserRule(name='objectLiteral'))
choice = self.choice([0 if [0, 4][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1])])
if choice == 0:
current += self.create_node(UnlexerRule(src='{'))
current += self.create_node(UnlexerRule(src='}'))
elif choice == 1:
current += self.create_node(UnlexerRule(src='{'))
current += self.propertyNameAndValueList()
if self.unlexer.max_depth >= 0:
for _ in self.zero_or_one():
current += self.create_node(UnlexerRule(src=','))
current += self.create_node(UnlexerRule(src='}'))
return current
objectLiteral.min_depth = 0
@depthcontrol
def propertyNameAndValueList(self):
current = self.create_node(UnparserRule(name='propertyNameAndValueList'))
current += self.propertyAssignment()
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_more():
current += self.create_node(UnlexerRule(src=','))
current += self.propertyAssignment()
return current
propertyNameAndValueList.min_depth = 3
@depthcontrol
def propertyAssignment(self):
current = self.create_node(UnparserRule(name='propertyAssignment'))
choice = self.choice([0 if [2, 3, 3][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1])])
if choice == 0:
current += self.propertyName()
current += self.create_node(UnlexerRule(src=':'))
current += self.singleExpression()
elif choice == 1:
current += self.getter()
current += self.create_node(UnlexerRule(src='('))
current += self.create_node(UnlexerRule(src=')'))
current += self.create_node(UnlexerRule(src='{'))
current += self.functionBody()
current += self.create_node(UnlexerRule(src='}'))
elif choice == 2:
current += self.setter()
current += self.create_node(UnlexerRule(src='('))
current += self.propertySetParameterList()
current += self.create_node(UnlexerRule(src=')'))
current += self.create_node(UnlexerRule(src='{'))
current += self.functionBody()
current += self.create_node(UnlexerRule(src='}'))
return current
propertyAssignment.min_depth = 2
@depthcontrol
def propertyName(self):
current = self.create_node(UnparserRule(name='propertyName'))
choice = self.choice([0 if [3, 1, 3][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1])])
if choice == 0:
current += self.identifierName()
elif choice == 1:
current += self.unlexer.StringLiteral()
elif choice == 2:
current += self.numericLiteral()
return current
propertyName.min_depth = 1
@depthcontrol
def propertySetParameterList(self):
current = self.create_node(UnparserRule(name='propertySetParameterList'))
current += self.unlexer.Identifier()
return current
propertySetParameterList.min_depth = 2
@depthcontrol
def arguments(self):
current = self.create_node(UnparserRule(name='arguments'))
current += self.create_node(UnlexerRule(src='('))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.argumentList()
current += self.create_node(UnlexerRule(src=')'))
return current
arguments.min_depth = 0
@depthcontrol
def argumentList(self):
current = self.create_node(UnparserRule(name='argumentList'))
current += self.singleExpression()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_more():
current += self.create_node(UnlexerRule(src=','))
current += self.singleExpression()
return current
argumentList.min_depth = 2
@depthcontrol
def expressionSequence(self):
current = self.create_node(UnparserRule(name='expressionSequence'))
current += self.singleExpression()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_more():
current += self.create_node(UnlexerRule(src=','))
current += self.singleExpression()
return current
expressionSequence.min_depth = 2
@depthcontrol
def singleExpression(self):
current = self.create_node(UnparserRule(name='singleExpression'))
choice = self.choice([0 if [1, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 1, 1, 3][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])])
if choice == 0:
current += self.unlexer.Function()
if self.unlexer.max_depth >= 2:
for _ in self.zero_or_one():
current += self.unlexer.Identifier()
current += self.create_node(UnlexerRule(src='('))
if self.unlexer.max_depth >= 3:
for _ in self.zero_or_one():
current += self.formalParameterList()
current += self.create_node(UnlexerRule(src=')'))
current += self.create_node(UnlexerRule(src='{'))
current += self.functionBody()
current += self.create_node(UnlexerRule(src='}'))
elif choice == 1:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='['))
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=']'))
elif choice == 2:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='.'))
current += self.identifierName()
elif choice == 3:
current += self.singleExpression()
current += self.arguments()
elif choice == 4:
current += self.unlexer.New()
current += self.singleExpression()
if self.unlexer.max_depth >= 1:
for _ in self.zero_or_one():
current += self.arguments()
elif choice == 5:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='++'))
elif choice == 6:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='--'))
elif choice == 7:
current += self.unlexer.Delete()
current += self.singleExpression()
elif choice == 8:
current += self.unlexer.Void()
current += self.singleExpression()
elif choice == 9:
current += self.unlexer.Typeof()
current += self.singleExpression()
elif choice == 10:
current += self.create_node(UnlexerRule(src='++'))
current += self.singleExpression()
elif choice == 11:
current += self.create_node(UnlexerRule(src='--'))
current += self.singleExpression()
elif choice == 12:
current += self.create_node(UnlexerRule(src='+'))
current += self.singleExpression()
elif choice == 13:
current += self.create_node(UnlexerRule(src='-'))
current += self.singleExpression()
elif choice == 14:
current += self.create_node(UnlexerRule(src='~'))
current += self.singleExpression()
elif choice == 15:
current += self.create_node(UnlexerRule(src='!'))
current += self.singleExpression()
elif choice == 16:
current += self.singleExpression()
choice = self.choice([0 if [0, 0, 0][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1])])
if choice == 0:
current += self.create_node(UnlexerRule(src='*'))
elif choice == 1:
current += self.create_node(UnlexerRule(src='/'))
elif choice == 2:
current += self.create_node(UnlexerRule(src='%'))
current += self.singleExpression()
elif choice == 17:
current += self.singleExpression()
choice = self.choice([0 if [0, 0][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1])])
if choice == 0:
current += self.create_node(UnlexerRule(src='+'))
elif choice == 1:
current += self.create_node(UnlexerRule(src='-'))
current += self.singleExpression()
elif choice == 18:
current += self.singleExpression()
choice = self.choice([0 if [0, 0, 0][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1])])
if choice == 0:
current += self.create_node(UnlexerRule(src='<<'))
elif choice == 1:
current += self.create_node(UnlexerRule(src='>>'))
elif choice == 2:
current += self.create_node(UnlexerRule(src='>>>'))
current += self.singleExpression()
elif choice == 19:
current += self.singleExpression()
choice = self.choice([0 if [0, 0, 0, 0][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1])])
if choice == 0:
current += self.create_node(UnlexerRule(src='<'))
elif choice == 1:
current += self.create_node(UnlexerRule(src='>'))
elif choice == 2:
current += self.create_node(UnlexerRule(src='<='))
elif choice == 3:
current += self.create_node(UnlexerRule(src='>='))
current += self.singleExpression()
elif choice == 20:
current += self.singleExpression()
current += self.unlexer.Instanceof()
current += self.singleExpression()
elif choice == 21:
current += self.singleExpression()
current += self.unlexer.In()
current += self.singleExpression()
elif choice == 22:
current += self.singleExpression()
choice = self.choice([0 if [0, 0, 0, 0][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1])])
if choice == 0:
current += self.create_node(UnlexerRule(src='=='))
elif choice == 1:
current += self.create_node(UnlexerRule(src='!='))
elif choice == 2:
current += self.create_node(UnlexerRule(src='==='))
elif choice == 3:
current += self.create_node(UnlexerRule(src='!=='))
current += self.singleExpression()
elif choice == 23:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='&'))
current += self.singleExpression()
elif choice == 24:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='^'))
current += self.singleExpression()
elif choice == 25:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='|'))
current += self.singleExpression()
elif choice == 26:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='&&'))
current += self.singleExpression()
elif choice == 27:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='||'))
current += self.singleExpression()
elif choice == 28:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='?'))
current += self.singleExpression()
current += self.create_node(UnlexerRule(src=':'))
current += self.singleExpression()
elif choice == 29:
current += self.singleExpression()
current += self.create_node(UnlexerRule(src='='))
current += self.singleExpression()
elif choice == 30:
current += self.singleExpression()
current += self.assignmentOperator()
current += self.singleExpression()
elif choice == 31:
current += self.unlexer.This()
elif choice == 32:
current += self.unlexer.Identifier()
elif choice == 33:
current += self.literal()
elif choice == 34:
current += self.arrayLiteral()
elif choice == 35:
current += self.objectLiteral()
elif choice == 36:
current += self.create_node(UnlexerRule(src='('))
current += self.expressionSequence()
current += self.create_node(UnlexerRule(src=')'))
return current
singleExpression.min_depth = 1
@depthcontrol
def assignmentOperator(self):
current = self.create_node(UnparserRule(name='assignmentOperator'))
choice = self.choice([0 if [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])])
if choice == 0:
current += self.create_node(UnlexerRule(src='*='))
elif choice == 1:
current += self.create_node(UnlexerRule(src='/='))
elif choice == 2:
current += self.create_node(UnlexerRule(src='%='))
elif choice == 3:
current += self.create_node(UnlexerRule(src='+='))
elif choice == 4:
current += self.create_node(UnlexerRule(src='-='))
elif choice == 5:
current += self.create_node(UnlexerRule(src='<<='))
elif choice == 6:
current += self.create_node(UnlexerRule(src='>>='))
elif choice == 7:
current += self.create_node(UnlexerRule(src='>>>='))
elif choice == 8:
current += self.create_node(UnlexerRule(src='&='))
elif choice == 9:
current += self.create_node(UnlexerRule(src='^='))
elif choice == 10:
current += self.create_node(UnlexerRule(src='|='))
return current
assignmentOperator.min_depth = 0
@depthcontrol
def literal(self):
current = self.create_node(UnparserRule(name='literal'))
choice = self.choice([0 if [1, 3][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1])])
if choice == 0:
choice = self.choice([0 if [1, 1, 1, 3][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1])])
if choice == 0:
current += self.unlexer.NullLiteral()
elif choice == 1:
current += self.unlexer.BooleanLiteral()
elif choice == 2:
current += self.unlexer.StringLiteral()
elif choice == 3:
current += self.unlexer.RegularExpressionLiteral()
elif choice == 1:
current += self.numericLiteral()
return current
literal.min_depth = 1
@depthcontrol
def numericLiteral(self):
current = self.create_node(UnparserRule(name='numericLiteral'))
choice = self.choice([0 if [2, 2, 2][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1])])
if choice == 0:
current += self.unlexer.DecimalLiteral()
elif choice == 1:
current += self.unlexer.HexIntegerLiteral()
elif choice == 2:
current += self.unlexer.OctalIntegerLiteral()
return current
numericLiteral.min_depth = 2
@depthcontrol
def identifierName(self):
current = self.create_node(UnparserRule(name='identifierName'))
choice = self.choice([0 if [2, 2][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1])])
if choice == 0:
current += self.unlexer.Identifier()
elif choice == 1:
current += self.reservedWord()
return current
identifierName.min_depth = 2
@depthcontrol
def reservedWord(self):
current = self.create_node(UnparserRule(name='reservedWord'))
choice = self.choice([0 if [2, 2, 1][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1])])
if choice == 0:
current += self.keyword()
elif choice == 1:
current += self.futureReservedWord()
elif choice == 2:
choice = self.choice([0 if [1, 1][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1])])
if choice == 0:
current += self.unlexer.NullLiteral()
elif choice == 1:
current += self.unlexer.BooleanLiteral()
return current
reservedWord.min_depth = 1
@depthcontrol
def keyword(self):
current = self.create_node(UnparserRule(name='keyword'))
choice = self.choice([0 if [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])])
if choice == 0:
current += self.unlexer.Break()
elif choice == 1:
current += self.unlexer.Do()
elif choice == 2:
current += self.unlexer.Instanceof()
elif choice == 3:
current += self.unlexer.Typeof()
elif choice == 4:
current += self.unlexer.Case()
elif choice == 5:
current += self.unlexer.Else()
elif choice == 6:
current += self.unlexer.New()
elif choice == 7:
current += self.unlexer.Var()
elif choice == 8:
current += self.unlexer.Catch()
elif choice == 9:
current += self.unlexer.Finally()
elif choice == 10:
current += self.unlexer.Return()
elif choice == 11:
current += self.unlexer.Void()
elif choice == 12:
current += self.unlexer.Continue()
elif choice == 13:
current += self.unlexer.For()
elif choice == 14:
current += self.unlexer.Switch()
elif choice == 15:
current += self.unlexer.While()
elif choice == 16:
current += self.unlexer.Debugger()
elif choice == 17:
current += self.unlexer.Function()
elif choice == 18:
current += self.unlexer.This()
elif choice == 19:
current += self.unlexer.With()
elif choice == 20:
current += self.unlexer.Default()
elif choice == 21:
current += self.unlexer.If()
elif choice == 22:
current += self.unlexer.Throw()
elif choice == 23:
current += self.unlexer.Delete()
elif choice == 24:
current += self.unlexer.In()
elif choice == 25:
current += self.unlexer.Try()
return current
keyword.min_depth = 1
@depthcontrol
def futureReservedWord(self):
current = self.create_node(UnparserRule(name='futureReservedWord'))
choice = self.choice([0 if [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])])
if choice == 0:
current += self.unlexer.Class()
elif choice == 1:
current += self.unlexer.Enum()
elif choice == 2:
current += self.unlexer.Extends()
elif choice == 3:
current += self.unlexer.Super()
elif choice == 4:
current += self.unlexer.Const()
elif choice == 5:
current += self.unlexer.Export()
elif choice == 6:
current += self.unlexer.Import()
elif choice == 7:
current += self.unlexer.Implements()
elif choice == 8:
current += self.unlexer.Let()
elif choice == 9:
current += self.unlexer.Private()
elif choice == 10:
current += self.unlexer.Public()
elif choice == 11:
current += self.unlexer.Interface()
elif choice == 12:
current += self.unlexer.Package()
elif choice == 13:
current += self.unlexer.Protected()
elif choice == 14:
current += self.unlexer.Static()
elif choice == 15:
current += self.unlexer.Yield()
return current
futureReservedWord.min_depth = 1
@depthcontrol
def getter(self):
current = self.create_node(UnparserRule(name='getter'))
current += self.unlexer.Identifier()
current += self.propertyName()
return current
getter.min_depth = 2
@depthcontrol
def setter(self):
current = self.create_node(UnparserRule(name='setter'))
current += self.unlexer.Identifier()
current += self.propertyName()
return current
setter.min_depth = 2
@depthcontrol
def eos(self):
current = self.create_node(UnparserRule(name='eos'))
choice = self.choice([0 if [1, 1, 0, 0][i] > self.unlexer.max_depth else w for i, w in enumerate([1, 1, 1, 1])])
if choice == 0:
current += self.unlexer.SemiColon()
elif choice == 1:
current += self.unlexer.EOF()
elif choice == 2:
pass
elif choice == 3:
pass
return current
eos.min_depth = 0
@depthcontrol
def eof(self):
current = self.create_node(UnparserRule(name='eof'))
current += self.unlexer.EOF()
return current
eof.min_depth = 1
default_rule = program
| 38.819231 | 318 | 0.570271 | 40,243 | 0.996805 | 0 | 0 | 37,921 | 0.93929 | 0 | 0 | 1,254 | 0.031061 |
4fb9815011b0fc67585f72302dacbae27bef0e9a | 1,550 | py | Python | ml-scripts/transform-to-numpy.py | thejoeejoee/SUI-MIT-VUT-2020-2021 | aee307aa772c5a0e97578da5ebedd3e2cd39ab91 | [
"MIT"
] | null | null | null | ml-scripts/transform-to-numpy.py | thejoeejoee/SUI-MIT-VUT-2020-2021 | aee307aa772c5a0e97578da5ebedd3e2cd39ab91 | [
"MIT"
] | null | null | null | ml-scripts/transform-to-numpy.py | thejoeejoee/SUI-MIT-VUT-2020-2021 | aee307aa772c5a0e97578da5ebedd3e2cd39ab91 | [
"MIT"
] | 1 | 2021-01-15T19:01:45.000Z | 2021-01-15T19:01:45.000Z | #!/usr/bin/env python3
# Project: VUT FIT SUI Project - Dice Wars
# Authors:
# - Josef Kolář <xkolar71@stud.fit.vutbr.cz>
# - Dominik Harmim <xharmi00@stud.fit.vutbr.cz>
# - Petr Kapoun <xkapou04@stud.fit.vutbr.cz>
# - Jindřich Šesták <xsesta05@stud.fit.vutbr.cz>
# Year: 2020
# Description: Transforms game configurations into a numpy array.
import os
import pickle
import sys
from os import listdir
import numpy as np
DATA_DIR = os.path.join(os.path.dirname(__file__), '../../sui-learning-data-mixed')
winners = listdir(DATA_DIR)
loaded_confs = 0
loaded_files = 0
data = np.empty((0, 499 + 1), dtype=int)
for winner in winners:
if winner not in '1234':
continue
winner_id = int(winner)
for conf in listdir(os.path.join(DATA_DIR, winner)):
pth = os.path.join(DATA_DIR, winner, conf)
with open(pth, 'br') as f:
one_game_confs = pickle.load(file=f)
loaded_files += 1
items_count = len(one_game_confs)
one_game_confs_with_targets = np.empty((items_count, data.shape[1]))
one_game_confs_with_targets[:, 1:] = np.array(list(one_game_confs))
one_game_confs_with_targets[:, 0] = winner_id - 1
data = np.concatenate(
(one_game_confs_with_targets, data),
)
loaded_confs += items_count
print(
f'Loaded {loaded_confs: 6} (+{items_count:>3}) configurations from {loaded_files} files.',
file=sys.stderr
)
np.save(os.path.join(DATA_DIR, f'learning-data'), data)
| 26.724138 | 102 | 0.649032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 506 | 0.325402 |
4fba29d360396207764ed0558aa4154a354cbfed | 764 | py | Python | facegram/profiles/serializers/v1/serializers.py | mabdullahadeel/facegram | f0eaa42008e876ae892b50f9f621a25b17cc70d5 | [
"MIT"
] | 1 | 2021-09-26T13:37:22.000Z | 2021-09-26T13:37:22.000Z | facegram/profiles/serializers/v1/serializers.py | mabdullahadeel/facegram | f0eaa42008e876ae892b50f9f621a25b17cc70d5 | [
"MIT"
] | 1 | 2021-08-08T22:04:39.000Z | 2021-08-08T22:04:39.000Z | facegram/profiles/serializers/v1/serializers.py | mabdullahadeel/facegram | f0eaa42008e876ae892b50f9f621a25b17cc70d5 | [
"MIT"
] | null | null | null | from django.db.models import fields
from rest_framework import serializers
from facegram.profiles.models import Profile
from facegram.users.api.serializers import UserSerializer
class RetrieveUserProfileSerializerV1(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Profile
exclude = ("followers","following", "up_votes", "down_votes")
read_only_fields = ('id', 'follower_count', 'following_count')
class UpdateProfileSerializerV1(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('profile_pic', 'bio', 'location', 'interests', 'skills')
read_only_fields = ('id', 'follower_count', 'following_count', "up_votes", "down_votes")
depth = 1 | 38.2 | 96 | 0.719895 | 581 | 0.760471 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.244764 |
4fba4d67b134d7addd79973a9be2fba674cdf649 | 108 | py | Python | exampleMain.py | marcoprenassi/medical_informatics_examples | d58c6074e18063578b64e874f3a92eda31546cdb | [
"MIT"
] | null | null | null | exampleMain.py | marcoprenassi/medical_informatics_examples | d58c6074e18063578b64e874f3a92eda31546cdb | [
"MIT"
] | null | null | null | exampleMain.py | marcoprenassi/medical_informatics_examples | d58c6074e18063578b64e874f3a92eda31546cdb | [
"MIT"
] | null | null | null | import UMLS_Api_search_example as UAex
if __name__ == '__main__':
UAex.runExample("[INSERT API HERE]")
| 21.6 | 40 | 0.740741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.268519 |
4fbab7405719ae7d782fceeb047eb0c83c99705f | 3,604 | py | Python | oms_cms/backend/api/v2/socialaccount/views.py | RomanYarovoi/oms_cms | 49c6789242d7a35e81f4f208c04b18fb79249be7 | [
"BSD-3-Clause"
] | 18 | 2019-07-11T18:34:10.000Z | 2021-11-20T06:34:39.000Z | oms_cms/backend/api/v2/socialaccount/views.py | RomanYarovoi/oms_cms | 49c6789242d7a35e81f4f208c04b18fb79249be7 | [
"BSD-3-Clause"
] | 13 | 2019-07-24T11:27:58.000Z | 2022-03-28T01:07:31.000Z | oms_cms/backend/api/v2/socialaccount/views.py | RomanYarovoi/oms_cms | 49c6789242d7a35e81f4f208c04b18fb79249be7 | [
"BSD-3-Clause"
] | 18 | 2019-07-08T18:07:21.000Z | 2021-11-03T10:33:07.000Z | from rest_framework import generics, permissions
from rest_framework import filters as filters_rf
from django_filters import rest_framework as filters
from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken
from .serializers import SocialAppSerializer, SocialAppExtendedSerializer, SocialAccountSerializer, \
SocialAccountExtendedSerializer, SocialTokenSerializer, SocialTokenExtendedSerializer
class SocialAppListApi(generics.ListAPIView):
"""Список всех SocialApp"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.all()
serializer_class = SocialAppExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'provider', 'sites')
search_fields = ['name', 'client_id', 'id']
ordering = ['id']
class SocialAppRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления приложения соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.all()
lookup_field = 'id'
serializer_class = SocialAppSerializer
class SocialAppCreateApi(generics.CreateAPIView):
"""Добавление приложения соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.none()
serializer_class = SocialAppSerializer
class SocialAccountListApi(generics.ListAPIView):
"""Список всех аккаунтов соц. сетей"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.all()
serializer_class = SocialAccountExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'user', 'provider')
search_fields = ['user__username']
ordering = ['id']
class SocialAccountRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления аккаунта в соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.all()
serializer_class = SocialAccountSerializer
lookup_field = 'id'
class SocialAccountCreateApi(generics.CreateAPIView):
"""Добавление аккаунта соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.none()
serializer_class = SocialAccountSerializer
class SocialTokenListApi(generics.ListAPIView):
"""Список токенов"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.all()
serializer_class = SocialTokenExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'app', 'account')
search_fields = ['account__user__username', 'token', 'id']
ordering = ['id']
class SocialTokenRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления токенов"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.all()
serializer_class = SocialTokenSerializer
lookup_field = 'id'
class SocialTokenCreateApi(generics.CreateAPIView):
"""Добавление токена"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.none()
serializer_class = SocialTokenSerializer
| 39.173913 | 101 | 0.748613 | 3,390 | 0.882813 | 0 | 0 | 0 | 0 | 0 | 0 | 730 | 0.190104 |
4fbbc39f5b64d4fcd299e8d9717c38c66a7f8e51 | 8,448 | py | Python | biosimulators_test_suite/test_case/cli.py | Ryannjordan/Biosimulators_test_suite | 5f79f157ee8927df277b1967e9409ccfc6baf45f | [
"CC0-1.0",
"MIT"
] | null | null | null | biosimulators_test_suite/test_case/cli.py | Ryannjordan/Biosimulators_test_suite | 5f79f157ee8927df277b1967e9409ccfc6baf45f | [
"CC0-1.0",
"MIT"
] | null | null | null | biosimulators_test_suite/test_case/cli.py | Ryannjordan/Biosimulators_test_suite | 5f79f157ee8927df277b1967e9409ccfc6baf45f | [
"CC0-1.0",
"MIT"
] | null | null | null | """ Methods for test cases involving checking command-line interfaces
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2020-12-21
:Copyright: 2020, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from ..data_model import TestCase
from ..warnings import TestCaseWarning
from biosimulators_utils.simulator.environ import ENVIRONMENT_VARIABLES
import re
import subprocess
import warnings
__all__ = [
'CliDisplaysHelpInline',
'CliDescribesSupportedEnvironmentVariablesInline',
'CliDisplaysVersionInformationInline',
]
class CliDisplaysHelpInline(TestCase):
""" Test that a command-line interface provides inline help. """
def eval(self, specifications, synthetic_archives_dir=None, dry_run=False, cli=None):
""" Evaluate a simulator's performance on a test case
Args:
specifications (:obj:`dict`): specifications of the simulator to validate
synthetic_archives_dir (:obj:`str`, optional): Directory to save the synthetic COMBINE/OMEX archives
generated by the test cases
dry_run (:obj:`bool`): if :obj:`True`, do not use the simulator to execute COMBINE/OMEX archives.
cli (:obj:`str`, optional): command-line interface to use to execute the tests involving the simulation of COMBINE/OMEX
archives rather than a Docker image
Raises:
:obj:`Exception`: if the simulator did not pass the test case
"""
self.get_simulator_docker_image(specifications)
image_url = specifications['image']['url']
cli = [cli] if cli else ['docker', 'run', '--tty', '--rm', image_url]
result = subprocess.run(cli, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
log = result.stdout.decode() if result.stdout else ''
supported = (
'-i' in log
and '-o' in log
)
if not supported:
warnings.warn(('Command-line interfaces should display basic help when no arguments are provided.\n\n'
'The command-line interface displayed the following when no argument was provided:\n\n {}'
).format(log.replace('\n', '\n ')),
TestCaseWarning)
result = subprocess.run(cli + ['-h'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
log = result.stdout.decode() if result.stdout else ''
supported = (
'arguments' in log
and '-i' in log
and '--archive' in log
and '-o' in log
and '--out-dir' in log
)
if not supported:
warnings.warn(('Command-line interface should support the `-h` option for displaying help inline.\n\n'
'The command-line interface displayed the following when executed with `-h`:\n\n {}'
).format(log.replace('\n', '\n ')),
TestCaseWarning)
result = subprocess.run(cli + ['--help'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
log = result.stdout.decode() if result.stdout else ''
supported = (
'arguments' in log
and '-i' in log
and '--archive' in log
and '-o' in log
and '--out-dir' in log
)
if not supported:
warnings.warn(('Command-line interface should support the `--help` option for displaying help inline.\n\n'
'The command-line interface displayed the following when executed with `--help`:\n\n {}'
).format(log.replace('\n', '\n ')),
TestCaseWarning)
class CliDescribesSupportedEnvironmentVariablesInline(TestCase):
""" Test that the inline help for a command-line interface describes the environment variables that the
simulator supports.
"""
def eval(self, specifications, synthetic_archives_dir=None, dry_run=False, cli=None):
""" Evaluate a simulator's performance on a test case
Args:
specifications (:obj:`dict`): specifications of the simulator to validate
synthetic_archives_dir (:obj:`str`, optional): Directory to save the synthetic COMBINE/OMEX archives
generated by the test cases
dry_run (:obj:`bool`): if :obj:`True`, do not use the simulator to execute COMBINE/OMEX archives.
cli (:obj:`str`, optional): command-line interface to use to execute the tests involving the simulation of COMBINE/OMEX
archives rather than a Docker image
Raises:
:obj:`Exception`: if the simulator did not pass the test case
"""
self.get_simulator_docker_image(specifications)
image_url = specifications['image']['url']
cli = [cli] if cli else ['docker', 'run', '--tty', '--rm', image_url]
result = subprocess.run(cli + [' -h'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
log = result.stdout.decode() if result.stdout else ''
potentially_missing_env_vars = []
for var in ENVIRONMENT_VARIABLES.values():
if var.name not in log:
potentially_missing_env_vars.append(var.name)
if potentially_missing_env_vars:
msg = ('The inline help for a command-line interface for a simulation tool should describe the '
'environment variables that the simulation tool supports.\n\n'
'The command-line interface does not describe the following standard environment '
'variables recognized by BioSimulators:\n - {}\n\n'
'If the simulation tool implements these variables, they should be described in the inline help for '
'its command-line interface.\n\n'
'Note, support for these environment variables is optional. Simulation tools are not required to support '
'these variables.'
).format('\n - '.join("'" + var + "'" for var in sorted(potentially_missing_env_vars)))
warnings.warn(msg, TestCaseWarning)
class CliDisplaysVersionInformationInline(TestCase):
""" Test that a command-line interface provides version information inline. """
def eval(self, specifications, synthetic_archives_dir=None, dry_run=False, cli=None):
""" Evaluate a simulator's performance on a test case
Args:
specifications (:obj:`dict`): specifications of the simulator to validate
synthetic_archives_dir (:obj:`str`, optional): Directory to save the synthetic COMBINE/OMEX archives
generated by the test cases
dry_run (:obj:`bool`): if :obj:`True`, do not use the simulator to execute COMBINE/OMEX archives.
cli (:obj:`str`, optional): command-line interface to use to execute the tests involving the simulation of COMBINE/OMEX
archives rather than a Docker image
Raises:
:obj:`Exception`: if the simulator did not pass the test case
"""
self.get_simulator_docker_image(specifications)
image_url = specifications['image']['url']
cli = [cli] if cli else ['docker', 'run', '--tty', '--rm', image_url]
result = subprocess.run(cli + ['-v'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
log = result.stdout.decode() if result.stdout else ''
supported = re.search(r'\d+\.\d+', log)
if not supported:
warnings.warn(('Command-line interface should support the `-v` option for displaying version information inline.\n\n'
'The command-line interface displayed the following when executed with `-v`:\n\n {}'
).format(log.replace('\n', '\n ')),
TestCaseWarning)
result = subprocess.run(cli + ['--version'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
log = result.stdout.decode() if result.stdout else ''
supported = re.search(r'\d+\.\d+', log)
if not supported:
warnings.warn(('Command-line interface should support the `--version` option for displaying version information inline.\n\n'
'The command-line interface displayed the following when executed with `--version`:\n\n {}'
).format(log.replace('\n', '\n ')),
TestCaseWarning)
| 49.403509 | 136 | 0.619318 | 7,899 | 0.935014 | 0 | 0 | 0 | 0 | 0 | 0 | 4,513 | 0.534209 |
4fbc0acd853210dff0cd68f025ebd7fef3871469 | 18,871 | py | Python | akimous/editor.py | akimous/akimous | 1828c09407bc32b233500647290d698ba5e5549f | [
"BSD-3-Clause"
] | 12 | 2019-11-14T14:20:33.000Z | 2022-03-27T15:24:45.000Z | akimous/editor.py | akimous/akimous | 1828c09407bc32b233500647290d698ba5e5549f | [
"BSD-3-Clause"
] | 7 | 2020-04-05T05:37:52.000Z | 2020-09-27T14:21:41.000Z | akimous/editor.py | akimous/akimous | 1828c09407bc32b233500647290d698ba5e5549f | [
"BSD-3-Clause"
] | 3 | 2020-03-23T17:31:39.000Z | 2022-03-27T15:24:53.000Z | import json
import shlex
import sys
from asyncio import (CancelledError, create_subprocess_shell, create_task,
subprocess)
from collections import namedtuple
from functools import partial
from importlib import resources
from pathlib import Path
import jedi
import pyflakes.api
import wordsegment
from logzero import logger
from .completion_utilities import is_parameter_of_def
from .config import config
from .doc_generator import DocGenerator # 165ms, 13M memory
from .jedi_preloader import preload_modules
from .modeling.feature.feature_definition import tokenize
from .online_feature_extractor import \
OnlineFeatureExtractor # 90ms, 10M memory
from .project import persistent_state, save_state
from .pyflakes_reporter import PyflakesReporter
from .utils import Timer, detect_doc_type, log_exception, nop
from .websocket import register_handler
from .word_completer import search_prefix
# prevent pandas being imported by xgboost (save ~500ms)
_pandas = sys.modules.get('pandas', None)
if _pandas:
from xgboost.core import Booster, DMatrix
else:
sys.modules['pandas'] = None
from xgboost.core import Booster, DMatrix
del sys.modules['pandas']
DEBUG = False
MODEL_NAME = 'v12.xgb'
PredictionRow = namedtuple('PredictionRow', ('c', 't', 's', 'p'))
handles = partial(register_handler, 'editor')
doc_generator = DocGenerator()
with resources.path('akimous.resources', MODEL_NAME) as _path:
model = Booster(model_file=str(_path)) # 3 ms
model.set_param('nthread', 1)
logger.info('Model %s loaded.', MODEL_NAME)
def get_relative_path(context):
try:
return tuple(
context.path.relative_to(context.shared.project_root).parts)
except ValueError:
# the file does not belong to the project folder
return tuple(context.path.parts)
async def run_pylint(context, send):
if not config['linter']['pylint']:
return
if context.path.suffix != '.py':
return
try:
with Timer('Linting'):
absolute_path = context.path.absolute()
context.linter_process = await create_subprocess_shell(
f'cd {shlex.quote(str(absolute_path.parent))} && '
f'pylint {shlex.quote(str(absolute_path))} --output-format=json',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = await context.linter_process.communicate()
if stderr:
logger.error(stderr)
context.linter_output = json.loads(stdout)
await send('OfflineLints', {
'result': context.linter_output,
})
except (CancelledError, AttributeError):
# may raise AttributeError after the editor is closed
return
except Exception as e:
logger.exception(e)
async def run_yapf(context):
if not config['formatter']['yapf']:
return
if context.path.suffix != '.py':
return
with log_exception():
with Timer('YAPF'):
absolute_path = context.path.absolute()
context.yapf_process = await create_subprocess_shell(
f'cd {shlex.quote(str(absolute_path.parent))} && '
f'yapf {shlex.quote(str(absolute_path))} --in-place',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = await context.yapf_process.communicate()
if stdout:
logger.info(stdout)
if stderr:
logger.error(stderr)
async def run_isort(context):
if not config['formatter']['isort']:
return
if context.path.suffix != '.py':
return
with log_exception():
with Timer('Sorting'):
absolute_path = context.path.absolute()
context.isort_process = await create_subprocess_shell(
f'cd {shlex.quote(str(absolute_path.parent))} && '
f'isort {shlex.quote(str(absolute_path))} --atomic',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = await context.isort_process.communicate()
if stdout:
logger.info(stdout)
if stderr:
logger.error(stderr)
async def run_spell_checker(context, send):
if not config['linter']['spellChecker']:
return
with Timer('Spelling check'):
tokens = tokenize(context.content)
await send('SpellingErrors', {
'result':
await context.shared.spell_checker.check_spelling(tokens)
})
async def run_pyflakes(context, send):
if not config['linter']['pyflakes']:
return
if context.path.suffix != '.py':
return
reporter = context.pyflakes_reporter
reporter.clear()
pyflakes.api.check(context.content, '', reporter)
await send('RealTimeLints', dict(result=reporter.errors))
async def warm_up_jedi(context):
# Avoid jedi error when the file is empty.
if not context.doc:
logger.debug('File is empty')
return
jedi.Script('\n'.join(context.doc), path=str(context.path)).complete()
await jedi_preload_modules(context, 0, len(context.doc))
async def jedi_preload_modules(context, start_line, end_line):
if end_line > 32:
end_line = 32
await preload_modules(context.doc[start_line:end_line])
async def post_content_change(context, send):
with Timer('Post content change'):
context.doc = context.content.splitlines()
context.shared.doc = context.doc
# initialize feature extractor
context.feature_extractor = OnlineFeatureExtractor()
for line, line_content in enumerate(context.doc):
context.feature_extractor.fill_preprocessor_context(
line_content, line, context.doc)
create_task(warm_up_jedi(context))
create_task(run_spell_checker(context, send))
create_task(run_pyflakes(context, send))
context.linter_task = create_task(run_pylint(context, send))
@handles('_connected')
async def connected(msg, send, context):
context.warmed_up = False
context.doc = []
context.linter_task = create_task(nop())
# open file
context.path = Path(context.shared.project_root, *msg['filePath'])
file_state = persistent_state.get_file_state(context.path)
context.pos = file_state.get('pos', (0, 0))
context.is_python = context.path.suffix in ('.py', '.pyx')
context.pyflakes_reporter = PyflakesReporter()
with open(context.path) as f:
try:
content = f.read()
except UnicodeDecodeError:
await send(
'FailedToOpen',
f'Failed to open file {context.path}. (only text files are supported)'
)
return
context.content = content
# somehow risky, but it should not wait until the extractor ready
await send('FileOpened', {
'mtime': context.path.stat().st_mtime,
'content': content,
**file_state,
})
# update opened files
opened_files = context.shared.project_config['openedFiles']
path_tuple = get_relative_path(context)
if path_tuple not in opened_files:
opened_files.append(path_tuple)
await activate_editor(msg, send, context)
# skip all completion, linting etc. if it is not a Python file
if not context.is_python:
return
async def warm_up(context, send):
if context.warmed_up:
return
context.warmed_up = True
await post_content_change(context, send)
@handles('_disconnected')
async def disconnected(context):
context.linter_task.cancel()
persistent_state.set_file_state(context.path, {'pos': context.pos})
@handles('Close')
async def close(msg, send, context):
"""
Called when the editor is explicitly closed, not when it is disconnected
"""
opened_files = context.shared.project_config['openedFiles']
opened_files.remove(get_relative_path(context))
context.pos = msg['pos']
save_state(context)
@handles('Blur')
async def blur(msg, send, context):
context.pos = msg['pos']
@handles('Reload')
async def reload(msg, send, context):
with open(context.path) as f:
content = f.read()
context.content = content
await send('Reloaded', {'content': content})
await post_content_change(context, send)
@handles('ActivateEditor')
async def activate_editor(msg, send, context):
context.shared.doc = context.doc
# When the editor is activated by user (not when initializing)
if not msg:
await warm_up(context, send)
context.shared.project_config['activePanels'][
'middle'] = get_relative_path(context)
save_state(context)
@handles('Mtime')
async def modification_time(msg, send, context):
new_path = msg.get('newPath', None)
if new_path is not None:
logger.info('path modified from %s to %s', context.path, new_path)
context.path = Path(*new_path)
try:
await send('Mtime', {'mtime': context.path.stat().st_mtime})
except FileNotFoundError:
await send('FileDeleted', {})
@handles('SaveFile')
async def save_file(msg, send, context):
content = msg['content']
context.content = content
with open(context.path, 'w') as f:
f.write(content)
mtime_before_formatting = context.path.stat().st_mtime
result = {'mtime': mtime_before_formatting}
if not context.is_python:
await send('FileSaved', result)
return
await run_isort(context)
await run_yapf(context)
mtime_after_formatting = context.path.stat().st_mtime
if mtime_after_formatting != mtime_before_formatting:
with open(context.path) as f:
content = f.read()
context.content = content
result['mtime'] = mtime_after_formatting
result['content'] = content
await send('FileSaved', result)
await post_content_change(context, send)
@handles('SyncRange')
async def sync_range(msg, send, context):
from_line, to_line, lint, *lines = msg # to_line is exclusive
doc = context.doc
doc[from_line:to_line] = lines
context.content = '\n'.join(doc)
# for whatever reason the document in the browser is not in sync with the one here
if to_line > len(doc):
logger.warning('Request doc synchronization')
await send('RequestFullSync', None)
return
# If total number of lines changed, update from_line and below; otherwise, update changed range.
for i in range(from_line,
to_line if to_line - from_line == len(lines) else len(doc)):
context.feature_extractor.fill_preprocessor_context(doc[i], i, doc)
if to_line < 32:
await jedi_preload_modules(context, from_line, to_line)
if lint:
await run_spell_checker(context, send)
await run_pyflakes(context, send)
@handles('Predict')
async def predict(msg, send, context):
line_number, ch, line_content = msg
while len(context.doc) <= line_number:
context.doc.append('')
context.doc[line_number] = line_content
doc = '\n'.join(context.doc)
context.content = doc
if is_parameter_of_def(context.doc, line_number, ch):
# don't make prediction if it is defining function parameters
await send(
'Prediction', {
'line': line_number,
'ch': ch,
'result': [],
'parameterDefinition': True
})
return
try:
with Timer(f'Prediction ({line_number}, {ch})'):
j = jedi.Script(doc, path=str(context.path))
completions = j.complete(line_number + 1, ch)
offset = 0
with Timer(f'Rest ({line_number}, {ch})'):
if completions:
context.currentCompletions = {
completion.name: completion
for completion in completions
}
completion = completions[0]
offset = len(completion.complete) - len(completion.name)
feature_extractor = context.feature_extractor
feature_extractor.extract_online(completions, line_content,
line_number, ch, context.doc,
j.call_signatures())
# scores = model.predict_proba(feature_extractor.X)[:, 1] * 1000
d_test = DMatrix(feature_extractor.X)
scores = model.predict(
d_test, output_margin=True, validate_features=False) * 1000
# c.name_with_symbol is not reliable
# e.g. def something(path): len(p|)
# will return "path="
result = [
PredictionRow(c=c.name,
t=c.type,
s=int(s),
p=c.name_with_symbols[len(c.name):])
for c, s in zip(completions, scores)
]
else:
result = []
await send('Prediction', {
'line': line_number,
'ch': ch,
'offset': offset,
'result': result,
})
context.pos = (line_number, ch)
except Exception as e:
logger.exception(e)
await send('RequestFullSync', None)
@handles('PredictExtra')
async def predict_extra(msg, send, context):
"""
Prediction from tokens, words and snake-cases from word segments
"""
line_number, ch, text = msg
results = {} # used as an ordered set
# 1. words from dictionary
if len(results) < 6:
words = search_prefix(text)
for i, word in enumerate(words):
if word not in results:
results[word] = PredictionRow(c=word,
t='word',
s=990 - i,
p='')
# 2. existing tokens
tokens = context.feature_extractor.context.t0map.query_prefix(
text, line_number)
for i, token in enumerate(tokens):
if token not in results:
results[token] = PredictionRow(c=token, t='token', s=980 - i, p='')
# 3. segmented words
if len(results) < 6:
parts = text.split('_') # handle private variables starting with _
words = []
for part in parts:
if not part:
words.append(part)
else:
words.extend(wordsegment.segment(part))
snake = '_'.join(words)
if snake and snake not in results:
results[snake] = PredictionRow(c=snake,
t='word-segment',
s=1,
p='')
await send('ExtraPrediction', {
'line': line_number,
'ch': ch,
'result': list(results.values())
})
@handles('GetCompletionDocstring')
async def get_completion_docstring(msg, send, context):
# get docstring
completion = context.currentCompletions.get(msg['text'], None)
if not completion:
return
docstring = completion.docstring(fast=False)
definition = None
# try to follow definition if it fails to get docstring
if not docstring:
try:
definition = completion.infer()
except (NotImplementedError, AttributeError):
return
if not definition:
return
docstring = definition[0].docstring()
if not docstring:
return
if definition and hasattr(definition, 'params'):
parameters = definition.params
elif hasattr(completion, 'params'):
parameters = completion.params
else:
parameters = []
# render doc
doc_type = detect_doc_type(docstring)
html = None
if doc_type != 'text':
with log_exception():
html = doc_generator.make_html(docstring)
await send(
'CompletionDocstring', {
'doc': html if html else docstring,
'type': 'html' if html else 'text',
'parameters': bool(parameters),
})
@handles('GetFunctionDocumentation')
async def get_function_documentation(msg, send, context):
line_number = msg['line']
ch = msg['ch']
content = context.content
j = jedi.Script(content, path=str(context.path))
call_signatures = j.get_signatures(line_number + 1, ch)
if not call_signatures:
logger.debug('call signature is empty while obtaining docstring')
return
signature = call_signatures[0]
docstring = signature.docstring()
if not docstring:
return
doc_type = detect_doc_type(docstring)
html = None
if doc_type != 'text':
with log_exception():
html = doc_generator.make_html(docstring)
await send(
'FunctionDocumentation', {
'doc': html if html else docstring,
'fullName': signature.full_name,
'type': 'html' if html else 'text'
})
def definition_to_dict(d, project_root):
# use relative path if possible
# otherwise, the GUI will open two editors, one with relative path and one with absolute path
path = Path(d.module_path)
if project_root in path.parents:
path = path.relative_to(project_root)
return {
'path': path.parts,
'module': d.module_name,
'builtin': d.in_builtin_module(),
'definition': d.is_definition(),
'line': d.line - 1,
'from': d.column,
'to': d.column + len(d.name),
'code': d.get_line_code()
}
@handles('FindReferences')
async def find_references(msg, send, context):
definitions = []
assignments = []
usages = []
mode = msg['type']
line = msg['line'] + 1
ch = msg['ch']
j = jedi.Script(context.content, path=str(context.path))
if 'assignments' in mode:
references = j.goto(line, ch, follow_imports=True)
if 'usages' not in mode:
definitions.extend(r for r in references if r.is_definition())
assignments.extend(r for r in references if not r.is_definition())
if 'usages' in mode:
references = j.get_references(line, ch)
definitions.extend(r for r in references if r.is_definition())
usages.extend(r for r in references if not r.is_definition())
project_root = context.shared.project_root
await send(
'ReferencesFound', {
'definitions':
[definition_to_dict(x, project_root) for x in definitions],
'assignments':
[definition_to_dict(x, project_root) for x in assignments],
'usages': [definition_to_dict(x, project_root) for x in usages]
})
| 33.4 | 100 | 0.615442 | 0 | 0 | 0 | 0 | 12,059 | 0.639023 | 16,060 | 0.851041 | 3,284 | 0.174024 |
4fbc247cd588c810e04f1404311be33ad7cdbb7b | 684 | py | Python | user_interface/run_tests/test3/files_for_dakota/mycode.py | ukaea/ALC_UQ | a2747c94036b04f1279abb5683c6a225a878aea3 | [
"Apache-2.0"
] | 2 | 2021-11-24T10:43:50.000Z | 2021-12-07T20:02:38.000Z | user_interface/run_tests/test3/files_for_dakota/mycode.py | ukaea/ALC_UQ | a2747c94036b04f1279abb5683c6a225a878aea3 | [
"Apache-2.0"
] | null | null | null | user_interface/run_tests/test3/files_for_dakota/mycode.py | ukaea/ALC_UQ | a2747c94036b04f1279abb5683c6a225a878aea3 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import xarray as xr
import exceptions
from dakota_file import DakotaFile
my_netcdf = DakotaFile()
filename = 'DAKOTA.nc'
my_netcdf.read(filename)
variable_dict1 = my_netcdf.get_variable_as_dict('test_scan1')
variable_dict2 = my_netcdf.get_variable_as_dict('test_scan2')
variable_dict3 = my_netcdf.get_variable_as_dict('test_scan3')
file_out = open('DAKOTA_OUTPUT.dat','w')
file_out.write('test_scan1:\n')
values = variable_dict1['values']
file_out.write( str(values[0])+' '+str(values[1])+'\n' )
values = variable_dict2['values']
file_out.write( str(values)+'\n' )
values = variable_dict3['values']
file_out.write( str(values)+'\n' )
file_out.close()
| 22.8 | 61 | 0.755848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.179825 |
4fbd3bb906bc12f75a2bab54c176db3603866e8d | 1,973 | py | Python | jvd/capa/data.py | ccDev-Labs/JARV1S-Disassembler | ee41eb493c15a66b4695b6f24039c38471b7eb47 | [
"Apache-2.0"
] | null | null | null | jvd/capa/data.py | ccDev-Labs/JARV1S-Disassembler | ee41eb493c15a66b4695b6f24039c38471b7eb47 | [
"Apache-2.0"
] | null | null | null | jvd/capa/data.py | ccDev-Labs/JARV1S-Disassembler | ee41eb493c15a66b4695b6f24039c38471b7eb47 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from jvd.normalizer.syntax import get_definition
import sys
from jvd.utils import AttrDict
class DataUnit:
def __init__(self, json_obj, file_path):
super().__init__()
with open(file_path, "rb") as f:
self.fbytes = f.read()
self.obj = AttrDict.from_nested_dict(json_obj)
if not 'data' in self.obj.bin:
self.obj.bin.data = {}
if not 'strings' in self.obj.bin:
self.obj.bin.strings = {}
self.map_b = defaultdict(list)
for b in self.obj.blocks:
self.map_b[b.addr_f].append(b)
# flattened to nested
self.map_f = {}
self.map_f_xcall = defaultdict(list)
for f in self.obj.functions:
f.unit = self
f.blocks = self.map_b.get(f.addr_start, [])
self.map_f[f.addr_start] = f
if not hasattr(f, 'calls'):
f.calls = []
for c in f.calls:
self.map_f_xcall[c].append(f)
self.map_b = {}
for b in self.obj.blocks:
self.map_b[b.addr_start] = b
self.ins_dat_ref = {}
for b in self.obj.blocks:
if not hasattr(b, 'calls'):
b.calls = []
for i in b.ins:
if not hasattr(i, 'dr'):
i.dr = []
if not hasattr(i, 'cr'):
i.cr = []
if not hasattr(i, 'oprs'):
i.oprs = []
if len(i.dr) > 0:
self.ins_dat_ref[i.ea] = i.dr
# print('##', self.obj.bin.architecture)
self.syntax = get_definition(self.obj.bin.architecture)
self.import_names = None # self.obj.bin.import_functions
self.seg_addr = sorted(
[int(k) for k in self.obj.bin.seg.keys()]) + [sys.maxsize]
self.find_seg = lambda v: next(
x[0] for x in enumerate(self.seg_addr) if x[1] > v)
| 34.017241 | 70 | 0.516472 | 1,843 | 0.93411 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.070451 |
4fbe12f19888e0666719f9bebc6d8719b923b7bc | 38,570 | py | Python | anchore/anchore_policy.py | berez23/anchore | 594cce23f1d87d666397653054c22c2613247734 | [
"Apache-2.0"
] | 401 | 2016-06-16T15:29:48.000Z | 2022-03-24T10:05:16.000Z | anchore/anchore_policy.py | berez23/anchore | 594cce23f1d87d666397653054c22c2613247734 | [
"Apache-2.0"
] | 63 | 2016-06-16T21:10:27.000Z | 2020-07-01T06:57:27.000Z | anchore/anchore_policy.py | berez23/anchore | 594cce23f1d87d666397653054c22c2613247734 | [
"Apache-2.0"
] | 64 | 2016-06-16T13:05:57.000Z | 2021-07-16T10:03:45.000Z | import os
import json
import re
import sys
import logging
import hashlib
import uuid
import jsonschema
import tempfile
import controller
import anchore_utils
import anchore_auth
from anchore.util import contexts
_logger = logging.getLogger(__name__)
default_policy_version = '1_0'
default_whitelist_version = '1_0'
default_bundle_version = '1_0'
supported_whitelist_versions = [default_whitelist_version]
supported_bundle_versions = [default_bundle_version]
supported_policy_versions = [default_bundle_version]
# interface operations
def check():
if not load_policymeta():
return (False, "policys are not initialized: please run 'anchore policys sync' and try again")
return (True, "success")
def sync_policymeta(bundlefile=None, outfile=None):
ret = {'success': False, 'text': "", 'status_code': 1}
policyurl = contexts['anchore_config']['policy_url']
policy_timeout = contexts['anchore_config']['policy_conn_timeout']
policy_maxretries = contexts['anchore_config']['policy_max_retries']
policymeta = {}
if bundlefile:
if not os.path.exists(bundlefile):
ret['text'] = "no such file ("+str(bundlefile)+")"
return(False, ret)
try:
with open(bundlefile, 'r') as FH:
policymeta = json.loads(FH.read())
except Exception as err:
ret['text'] = "synced policy bundle cannot be read/is not valid JSON: exception - " +str(err)
return(False, ret)
else:
record = anchore_auth.anchore_auth_get(contexts['anchore_auth'], policyurl, timeout=policy_timeout, retries=policy_maxretries)
if record['success']:
try:
bundleraw = json.loads(record['text'])
policymeta = bundleraw['bundle']
except Exception as err:
ret['text'] = 'failed to parse bundle response from service - exception: ' + str(err)
return(False, ret)
else:
_logger.debug("failed to download policybundle: message from server - " + str(record))
themsg = "unspecificied failure while attempting to download bundle from anchore.io"
try:
if record['status_code'] == 404:
themsg = "no policy bundle found on anchore.io - please create and save a policy using the policy editor in anchore.io and try again"
elif record['status_code'] == 401:
themsg = "cannot download a policy bundle from anchore.io - current user does not have access rights to download custom policies"
except Exception as err:
themsg = "exception while inspecting response from server - exception: " + str(err)
ret['text'] = "failed to download policybundle: " + str(themsg)
return(False, ret)
if not verify_policy_bundle(bundle=policymeta):
_logger.debug("downloaded policy bundle failed to verify: " +str(policymeta))
ret['text'] = "input policy bundle does not conform to policy bundle schema"
return(False, ret)
if outfile:
if outfile != '-':
try:
with open(outfile, 'w') as OFH:
OFH.write(json.dumps(policymeta))
except Exception as err:
ret['text'] = "could not write downloaded policy bundle to specified file ("+str(outfile)+") - exception: " + str(err)
return(False, ret)
else:
if not contexts['anchore_db'].save_policymeta(policymeta):
ret['text'] = "cannot get list of policies from service\nMessage from server: " + record['text']
return (False, ret)
if policymeta:
ret['text'] = json.dumps(policymeta, indent=4)
return(True, ret)
def load_policymeta(policymetafile=None):
ret = {}
if policymetafile:
with open(policymetafile, 'r') as FH:
ret = json.loads(FH.read())
else:
ret = contexts['anchore_db'].load_policymeta()
if not ret:
# use the system default
default_policy_bundle_file = os.path.join(contexts['anchore_config'].config_dir, 'anchore_default_bundle.json')
try:
if os.path.exists(default_policy_bundle_file):
with open(default_policy_bundle_file, 'r') as FH:
ret = json.loads(FH.read())
else:
raise Exception("no such file: " + str(default_policy_bundle_file))
except Exception as err:
_logger.warn("could not load default bundle (" + str(default_policy_bundle_file) + ") - exception: " + str(err))
raise err
return(ret)
def save_policymeta(policymeta):
return(contexts['anchore_db'].save_policymeta(policymeta))
# bundle
# Convert
def convert_to_policy_bundle(name="default", version=default_bundle_version, policy_file=None, policy_version=default_policy_version, whitelist_files=[], whitelist_version=default_whitelist_version):
policies = {}
p = read_policy(name=str(uuid.uuid4()), file=policy_file)
policies.update(p)
whitelists = {}
for wf in whitelist_files:
w = read_whitelist(name=str(uuid.uuid4()), file=wf)
whitelists.update(w)
m = create_mapping(map_name="default", policy_name=policies.keys()[0], whitelists=whitelists.keys(), repotagstring='*/*:*')
mappings.append(m)
bundle = create_policy_bundle(name='default', policies=policies, policy_version=policy_version, whitelists=whitelists, whitelist_version=whitelist_version, mappings=mappings)
if not verify_policy_bundle(bundle=bundle):
return({})
return(bundle)
# C
def create_policy_bundle(name=None, version=default_bundle_version, policies={}, policy_version=default_policy_version, whitelists={}, whitelist_version=default_whitelist_version, mappings=[]):
ret = {
'id': str(uuid.uuid4()),
'name':name,
'version':version,
'policies':[],
'whitelists':[],
'mappings':[]
}
for f in policies:
el = {
'version':policy_version,
'id':f,
'name':f,
'rules':[]
}
el['rules'] = unformat_policy_data(policies[f])
ret['policies'].append(el)
for f in whitelists:
el = {
'version':whitelist_version,
'id':f,
'name':f,
'items':[]
}
el['items'] = unformat_whitelist_data(whitelists[f])
ret['whitelists'].append(el)
for m in mappings:
ret['mappings'].append(m)
_logger.debug("created bundle: ("+str(name)+") : " + json.dumps(ret.keys(), indent=4))
return(ret)
# R
def read_policy_bundle(bundle_file=None):
ret = {}
with open(bundle_file, 'r') as FH:
ret = json.loads(FH.read())
cleanstr = json.dumps(ret).encode('utf8')
ret = json.loads(cleanstr)
if not verify_policy_bundle(bundle=ret):
raise Exception("input bundle does not conform to bundle schema")
return(ret)
# V
def verify_policy_bundle(bundle={}):
bundle_schema = {}
try:
bundle_schema_file = os.path.join(contexts['anchore_config']['pkg_dir'], 'schemas', 'anchore-bundle.schema')
except:
from pkg_resources import Requirement, resource_filename
bundle_schema_file = os.path.join(resource_filename("anchore", ""), 'schemas', 'anchore-bundle.schema')
try:
if os.path.exists(bundle_schema_file):
with open (bundle_schema_file, "r") as FH:
bundle_schema = json.loads(FH.read())
except Exception as err:
_logger.error("could not load bundle schema: " + str(bundle_schema_file))
return(False)
if not bundle_schema:
_logger.error("could not load bundle schema: " + str(bundle_schema_file))
return(False)
else:
try:
jsonschema.validate(bundle, schema=bundle_schema)
except Exception as err:
_logger.error("could not validate bundle against schema: " + str(err))
return(False)
return(True)
# U
def update_policy_bundle(bundle={}, name=None, policies={}, whitelists={}, mappings={}):
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle is incomplete - cannot update bad bundle: " + json.dumps(bundle, indent=4))
ret = {}
ret.update(bundle)
new_bundle = create_policy_bundle(name=name, policies=policies, whitelists=whitelists, mappings=mappings)
for key in ['name', 'policies', 'whitelists', 'mappings']:
if new_bundle[key]:
ret[key] = new_bundle.pop(key, ret[key])
return(ret)
# SAVE
def write_policy_bundle(bundle_file=None, bundle={}):
if not verify_policy_bundle(bundle=bundle):
raise Exception("cannot verify input policy bundle, skipping write: " + str(bundle_file))
with open(bundle_file, 'w') as OFH:
OFH.write(json.dumps(bundle))
return(True)
# mapping
# C
def create_mapping(map_name=None, policy_name=None, whitelists=[], repotagstring=None):
ret = {}
ret['name'] = map_name
ret['policy_id'] = policy_name
ret['whitelist_ids'] = whitelists
image_info = anchore_utils.get_all_image_info(repotagstring)
registry = image_info.pop('registry', "N/A")
repo = image_info.pop('repo', "N/A")
tag = image_info.pop('tag', "N/A")
imageId = image_info.pop('imageId', "N/A")
digest = image_info.pop('digest', "N/A")
ret['registry'] = registry
ret['repository'] = repo
ret['image'] = {
'type':'tag',
'value':tag
}
ret['id'] = str(uuid.uuid4())
return(ret)
# policy/wl
# V
def verify_whitelist(whitelistdata=[], version=default_whitelist_version):
ret = True
if not isinstance(whitelistdata, list):
ret = False
if version in supported_whitelist_versions:
# do 1_0 format/checks
pass
return(ret)
# R
def read_whitelist(name=None, file=None, version=default_whitelist_version):
if not name:
raise Exception("bad input: " + str(name) + " : " + str(file))
if file:
if not os.path.exists(file):
raise Exception("input file does not exist: " + str(file))
wdata = anchore_utils.read_plainfile_tolist(file)
if not verify_whitelist(whitelistdata=wdata, version=version):
raise Exception("cannot verify whitelist data read from file as valid")
else:
wdata = []
ret = {}
ret[name] = wdata
return(ret)
def structure_whitelist(whitelistdata):
ret = []
for item in whitelistdata:
try:
(k,v) = re.match("([^\s]*)\s*([^\s]*)", item).group(1,2)
if not re.match("^\s*#.*", k):
ret.append([k, v])
except Exception as err:
pass
return(ret)
def unformat_whitelist_data(wldata):
ret = []
whitelists = structure_whitelist(wldata)
for wlitem in whitelists:
gate, triggerId = wlitem
el = {
'gate':gate,
'trigger_id':triggerId,
'id':str(uuid.uuid4())
}
ret.append(el)
return(ret)
def format_whitelist_data(wldata):
ret = []
version = wldata['version']
if wldata['version'] == default_whitelist_version:
for item in wldata['items']:
ret.append(' '.join([item['gate'], item['trigger_id']]))
else:
raise Exception ("detected whitelist version format in bundle not supported: " + str(version))
return(ret)
def extract_whitelist_data(bundle, wlid):
for wl in bundle['whitelists']:
if wlid == wl['id']:
return(format_whitelist_data(wl))
# R
def read_policy(name=None, file=None, version=default_bundle_version):
if not name or not file:
raise Exception("input error")
if not os.path.exists(file):
raise Exception("input file does not exist: " + str(file))
pdata = anchore_utils.read_plainfile_tolist(file)
if not verify_policy(policydata=pdata, version=version):
raise Exception("cannot verify policy data read from file as valid")
ret = {}
ret[name] = pdata
return(ret)
def structure_policy(policydata):
policies = {}
for l in policydata:
l = l.strip()
patt = re.compile('^\s*#')
if (l and not patt.match(l)):
polinput = l.split(':')
module = polinput[0]
check = polinput[1]
action = polinput[2]
modparams = ""
if (len(polinput) > 3):
modparams = ':'.join(polinput[3:])
if module not in policies:
policies[module] = {}
if check not in policies[module]:
policies[module][check] = {}
if 'aptups' not in policies[module][check]:
policies[module][check]['aptups'] = []
aptup = [action, modparams]
if aptup not in policies[module][check]['aptups']:
policies[module][check]['aptups'].append(aptup)
policies[module][check]['action'] = action
policies[module][check]['params'] = modparams
return(policies)
# return a give policyId from a bundle in raw poldata format
def extract_policy_data(bundle, polid):
for pol in bundle['policies']:
if polid == pol['id']:
return(format_policy_data(pol))
# convert from policy bundle policy format to raw poldata format
def format_policy_data(poldata):
ret = []
version = poldata['version']
if poldata['version'] == default_policy_version:
for item in poldata['rules']:
polline = ':'.join([item['gate'], item['trigger'], item['action'], ""])
if 'params' in item:
for param in item['params']:
polline = polline + param['name'] + '=' + param['value'] + " "
ret.append(polline)
else:
raise Exception ("detected policy version format in bundle not supported: " + str(version))
return(ret)
# convert from raw poldata format to bundle format
def unformat_policy_data(poldata):
ret = []
policies = structure_policy(poldata)
for gate in policies.keys():
try:
for trigger in policies[gate].keys():
action = policies[gate][trigger]['action']
params = policies[gate][trigger]['params']
el = {
'gate':gate,
'trigger':trigger,
'action':action,
'params':[]
}
for p in params.split():
(k,v) = p.split("=")
el['params'].append({'name':k, 'value':v})
ret.append(el)
except Exception as err:
print str(err)
pass
return(ret)
# V
def verify_policy(policydata=[], version=default_policy_version):
ret = True
if not isinstance(policydata, list):
ret = False
if version in supported_policy_versions:
# do 1_0 format/checks
pass
return(ret)
def run_bundle(anchore_config=None, bundle={}, image=None, matchtags=[], stateless=False, show_whitelisted=True, show_triggerIds=True):
retecode = 0
if not anchore_config or not bundle or not image:
raise Exception("input error")
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle does not conform to bundle schema")
imageId = anchore_utils.discover_imageId(image)
digests = []
if not matchtags:
matchtags = [image]
evalmap = {}
evalresults = {}
for matchtag in matchtags:
_logger.info("evaluating tag: " + str(matchtag))
mapping_results = get_mapping_actions(image=matchtag, imageId=imageId, in_digests=digests, bundle=bundle)
for pol,wl,polname,wlnames,mapmatch,match_json,evalhash in mapping_results:
evalmap[matchtag] = evalhash
_logger.debug("attempting eval: " + evalhash + " : " + matchtag)
if evalhash not in evalresults:
fnames = {}
try:
if stateless:
policies = structure_policy(pol)
whitelists = structure_whitelist(wl)
rc = execute_gates(imageId, policies)
result, fullresult = evaluate_gates_results(imageId, policies, {}, whitelists)
eval_result = structure_eval_results(imageId, fullresult, show_whitelisted=show_whitelisted, show_triggerIds=show_triggerIds, imageName=matchtag)
gate_result = {}
gate_result[imageId] = eval_result
else:
con = controller.Controller(anchore_config=anchore_config, imagelist=[imageId], allimages=contexts['anchore_allimages'], force=True)
for (fname, data) in [('tmppol', pol), ('tmpwl', wl)]:
fh, thefile = tempfile.mkstemp(dir=anchore_config['tmpdir'])
fnames[fname] = thefile
try:
with open(thefile, 'w') as OFH:
for l in data:
OFH.write(l + "\n")
except Exception as err:
raise err
finally:
os.close(fh)
gate_result = con.run_gates(policy=fnames['tmppol'], global_whitelist=fnames['tmpwl'], show_triggerIds=show_triggerIds, show_whitelisted=show_whitelisted)
evalel = {
'results': list(),
'policy_name':"N/A",
'whitelist_names':"N/A",
'policy_data':list(),
'whitelist_data':list(),
'mapmatch':"N/A",
'matched_mapping_rule': {}
}
evalel['results'] = gate_result
evalel['policy_name'] = polname
evalel['whitelist_names'] = wlnames
evalel['policy_data'] = pol
evalel['whitelist_data'] = wl
evalel['mapmatch'] = mapmatch
evalel['matched_mapping_rule'] = match_json
_logger.debug("caching eval result: " + evalhash + " : " + matchtag)
evalresults[evalhash] = evalel
ecode = result_get_highest_action(gate_result)
if ecode == 1:
retecode = 1
elif retecode == 0 and ecode > retecode:
retecode = ecode
except Exception as err:
_logger.error("policy evaluation error: " + str(err))
finally:
for f in fnames.keys():
if os.path.exists(fnames[f]):
os.remove(fnames[f])
else:
_logger.debug("skipping eval, result already cached: " + evalhash + " : " + matchtag)
ret = {}
for matchtag in matchtags:
ret[matchtag] = {}
ret[matchtag]['bundle_name'] = bundle['name']
try:
evalresult = evalresults[evalmap[matchtag]]
ret[matchtag]['evaluations'] = [evalresult]
except Exception as err:
raise err
return(ret, retecode)
def result_get_highest_action(results):
highest_action = 0
for k in results.keys():
action = results[k]['result']['final_action']
if action == 'STOP':
highest_action = 1
elif highest_action == 0 and action == 'WARN':
highest_action = 2
return(highest_action)
def get_mapping_actions(image=None, imageId=None, in_digests=[], bundle={}):
"""
Given an image, image_id, digests, and a bundle, determine which policies and whitelists to evaluate.
:param image: Image obj
:param imageId: image id string
:param in_digests: candidate digests
:param bundle: bundle dict to evaluate
:return: tuple of (policy_data, whitelist_data, policy_name, whitelist_names, matchstring, mapping_rule_json obj, evalhash)
"""
if not image or not bundle:
raise Exception("input error")
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle does not conform to bundle schema")
ret = []
image_infos = []
image_info = anchore_utils.get_all_image_info(image)
if image_info and image_info not in image_infos:
image_infos.append(image_info)
for m in bundle['mappings']:
polname = m['policy_id']
wlnames = m['whitelist_ids']
for image_info in image_infos:
#_logger.info("IMAGE INFO: " + str(image_info))
ii = {}
ii.update(image_info)
registry = ii.pop('registry', "N/A")
repo = ii.pop('repo', "N/A")
tags = []
fulltag = ii.pop('fulltag', "N/A")
if fulltag != 'N/A':
tinfo = anchore_utils.parse_dockerimage_string(fulltag)
if 'tag' in tinfo and tinfo['tag']:
tag = tinfo['tag']
for t in [image, fulltag]:
tinfo = anchore_utils.parse_dockerimage_string(t)
if 'tag' in tinfo and tinfo['tag'] and tinfo['tag'] not in tags:
tags.append(tinfo['tag'])
digest = ii.pop('digest', "N/A")
digests = [digest]
for d in image_info['digests']:
dinfo = anchore_utils.parse_dockerimage_string(d)
if 'digest' in dinfo and dinfo['digest']:
digests.append(dinfo['digest'])
p_ids = []
p_names = []
for p in bundle['policies']:
p_ids.append(p['id'])
p_names.append(p['name'])
wl_ids = []
wl_names = []
for wl in bundle['whitelists']:
wl_ids.append(wl['id'])
wl_names.append(wl['name'])
if polname not in p_ids:
_logger.info("policy not in bundle: " + str(polname))
continue
skip=False
for wlname in wlnames:
if wlname not in wl_ids:
_logger.info("whitelist not in bundle" + str(wlname))
skip=True
if skip:
continue
mname = m['name']
mregistry = m['registry']
mrepo = m['repository']
if m['image']['type'] == 'tag':
mtag = m['image']['value']
mdigest = None
mimageId = None
elif m['image']['type'] == 'digest':
mdigest = m['image']['value']
mtag = None
mimageId = None
elif m['image']['type'] == 'id':
mimageId = m['image']['value']
mtag = None
mdigest = None
else:
mtag = mdigest = mimageId = None
mregistry_rematch = mregistry
mrepo_rematch = mrepo
mtag_rematch = mtag
try:
matchtoks = []
for tok in mregistry.split("*"):
matchtoks.append(re.escape(tok))
mregistry_rematch = "^" + '(.*)'.join(matchtoks) + "$"
matchtoks = []
for tok in mrepo.split("*"):
matchtoks.append(re.escape(tok))
mrepo_rematch = "^" + '(.*)'.join(matchtoks) + "$"
matchtoks = []
for tok in mtag.split("*"):
matchtoks.append(re.escape(tok))
mtag_rematch = "^" + '(.*)'.join(matchtoks) + "$"
except Exception as err:
_logger.error("could not set up regular expression matches for mapping check - exception: " + str(err))
_logger.debug("matchset: " + str([mregistry_rematch, mrepo_rematch, mtag_rematch]) + " : " + str([mregistry, mrepo, mtag]) + " : " + str([registry, repo, tag, tags]))
if registry == mregistry or mregistry == '*' or re.match(mregistry_rematch, registry):
_logger.debug("checking mapping for image ("+str(image_info)+") match.")
if repo == mrepo or mrepo == '*' or re.match(mrepo_rematch, repo):
doit = False
matchstring = mname + ": N/A"
if tag:
if False and (mtag == tag or mtag == '*' or mtag in tags or re.match(mtag_rematch, tag)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mtag])
doit = True
else:
for t in tags:
if re.match(mtag_rematch, t):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mtag])
doit = True
break
if not doit and (digest and (mdigest == digest or mdigest in in_digests or mdigest in digests)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mdigest])
doit = True
if not doit and (imageId and (mimageId == imageId)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mimageId])
doit = True
matchstring = matchstring.encode('utf8')
if doit:
_logger.debug("match found for image ("+str(image_info)+") matchstring ("+str(matchstring)+")")
wldata = []
wldataset = set()
for wlname in wlnames:
wldataset = set(list(wldataset) + extract_whitelist_data(bundle, wlname))
wldata = list(wldataset)
poldata = extract_policy_data(bundle, polname)
wlnames.sort()
evalstr = ','.join([polname] + wlnames)
evalhash = hashlib.md5(evalstr).hexdigest()
ret.append( ( poldata, wldata, polname,wlnames, matchstring, m, evalhash) )
return(ret)
else:
_logger.debug("no match found for image ("+str(image_info)+") match.")
else:
_logger.debug("no match found for image ("+str(image_info)+") match.")
return(ret)
def execute_gates(imageId, policies, refresh=True):
import random
success = True
anchore_config = contexts['anchore_config']
imagename = imageId
gatesdir = '/'.join([anchore_config["scripts_dir"], "gates"])
workingdir = '/'.join([anchore_config['anchore_data_dir'], 'querytmp'])
outputdir = workingdir
_logger.info(imageId + ": evaluating policies...")
for d in [outputdir, workingdir]:
if not os.path.exists(d):
os.makedirs(d)
imgfile = '/'.join([workingdir, "queryimages." + str(random.randint(0, 99999999))])
anchore_utils.write_plainfile_fromstr(imgfile, imageId)
try:
gmanifest, failedgates = anchore_utils.generate_gates_manifest()
if failedgates:
_logger.error("some gates failed to run - check the gate(s) modules for errors: " + str(','.join(failedgates)))
success = False
else:
success = True
for gatecheck in policies.keys():
# get all commands that match the gatecheck
gcommands = []
for gkey in gmanifest.keys():
if gmanifest[gkey]['gatename'] == gatecheck:
gcommands.append(gkey)
# assemble the params from the input policy for this gatecheck
params = []
for trigger in policies[gatecheck].keys():
if 'params' in policies[gatecheck][trigger] and policies[gatecheck][trigger]['params']:
params.append(policies[gatecheck][trigger]['params'])
if not params:
params = ['all']
if gcommands:
for command in gcommands:
cmd = [command] + [imgfile, anchore_config['image_data_store'], outputdir] + params
_logger.debug("running gate command: " + str(' '.join(cmd)))
(rc, sout, cmdstring) = anchore_utils.run_command(cmd)
if rc:
_logger.error("FAILED")
_logger.error("\tCMD: " + str(cmdstring))
_logger.error("\tEXITCODE: " + str(rc))
_logger.error("\tOUTPUT: " + str(sout))
success = False
else:
_logger.debug("")
_logger.debug("\tCMD: " + str(cmdstring))
_logger.debug("\tEXITCODE: " + str(rc))
_logger.debug("\tOUTPUT: " + str(sout))
_logger.debug("")
else:
_logger.warn("WARNING: gatecheck ("+str(gatecheck)+") line in policy, but no gates were found that match this gatecheck")
except Exception as err:
_logger.error("gate evaluation failed - exception: " + str(err))
finally:
if imgfile and os.path.exists(imgfile):
try:
os.remove(imgfile)
except:
_logger.error("could not remove tempfile: " + str(imgfile))
if success:
report = generate_gates_report(imageId)
contexts['anchore_db'].save_gates_report(imageId, report)
_logger.info(imageId + ": evaluated.")
return(success)
def generate_gates_report(imageId):
# this routine reads the results of image gates and generates a formatted report
report = {}
outputs = contexts['anchore_db'].list_gate_outputs(imageId)
for d in outputs:
report[d] = contexts['anchore_db'].load_gate_output(imageId, d)
return(report)
def evaluate_gates_results(imageId, policies, image_whitelist, global_whitelist):
ret = list()
fullret = list()
final_gate_action = 'GO'
for m in policies.keys():
gdata = contexts['anchore_db'].load_gate_output(imageId, m)
for l in gdata:
(k, v) = re.match('(\S*)\s*(.*)', l).group(1, 2)
imageId = imageId
check = m
trigger = k
output = v
triggerId = hashlib.md5(''.join([check,trigger,output])).hexdigest()
# if the output is structured (i.e. decoded as an
# anchore compatible json string) then extract the
# elements for display
try:
json_output = json.loads(output)
if 'id' in json_output:
triggerId = str(json_output['id'])
if 'desc' in json_output:
output = str(json_output['desc'])
except:
pass
if k in policies[m]:
trigger = k
action = policies[check][trigger]['action']
r = {'imageId':imageId, 'check':check, 'triggerId':triggerId, 'trigger':trigger, 'output':output, 'action':action}
# this is where whitelist check should go
whitelisted = False
whitelist_type = "none"
if global_whitelist and ([m, triggerId] in global_whitelist):
whitelisted = True
whitelist_type = "global"
elif image_whitelist and 'ignore' in image_whitelist and (r in image_whitelist['ignore']):
whitelisted = True
whitelist_type = "image"
else:
# look for prefix wildcards
try:
for [gmod, gtriggerId] in global_whitelist:
if gmod == m:
# special case for backward compat
try:
if gmod == 'ANCHORESEC' and not re.match(".*\*.*", gtriggerId) and re.match("^CVE.*|^RHSA.*", gtriggerId):
gtriggerId = gtriggerId + "*"
except Exception as err:
_logger.warn("problem with backward compat modification of whitelist trigger - exception: " + str(err))
matchtoks = []
for tok in gtriggerId.split("*"):
matchtoks.append(re.escape(tok))
rematch = "^" + '(.*)'.join(matchtoks) + "$"
_logger.debug("checking regexp wl<->triggerId for match: " + str(rematch) + " : " + str(triggerId))
if re.match(rematch, triggerId):
_logger.debug("found wildcard whitelist match")
whitelisted = True
whitelist_type = "global"
break
except Exception as err:
_logger.warn("problem with prefix wildcard match routine - exception: " + str(err))
fullr = {}
fullr.update(r)
fullr['whitelisted'] = whitelisted
fullr['whitelist_type'] = whitelist_type
fullret.append(fullr)
if not whitelisted:
if policies[m][k]['action'] == 'STOP':
final_gate_action = 'STOP'
elif final_gate_action != 'STOP' and policies[m][k]['action'] == 'WARN':
final_gate_action = 'WARN'
ret.append(r)
else:
# whitelisted, skip evaluation
pass
ret.append({'imageId':imageId, 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action})
fullret.append({'imageId':imageId, 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action, 'whitelisted':False, 'whitelist_type':"none", 'triggerId':"N/A"})
return(ret, fullret)
def structure_eval_results(imageId, evalresults, show_triggerIds=False, show_whitelisted=False, imageName=None):
if not imageName:
imageName = imageId
record = {}
record['result'] = {}
record['result']['header'] = ['Image_Id', 'Repo_Tag']
if show_triggerIds:
record['result']['header'].append('Trigger_Id')
record['result']['header'] += ['Gate', 'Trigger', 'Check_Output', 'Gate_Action']
if show_whitelisted:
record['result']['header'].append('Whitelisted')
record['result']['rows'] = list()
for m in evalresults:
id = imageId
name = imageName
gate = m['check']
trigger = m['trigger']
output = m['output']
triggerId = m['triggerId']
action = m['action']
row = [id[0:12], name]
if show_triggerIds:
row.append(triggerId)
row += [gate, trigger, output, action]
if show_whitelisted:
row.append(m['whitelist_type'])
if not m['whitelisted'] or show_whitelisted:
record['result']['rows'].append(row)
if gate == 'FINAL':
record['result']['final_action'] = action
return(record)
# small test
if __name__ == '__main__':
from anchore.configuration import AnchoreConfiguration
config = AnchoreConfiguration(cliargs={})
anchore_utils.anchore_common_context_setup(config)
policies = {}
whitelists = {}
mappings = []
pol0 = read_policy(name=str(uuid.uuid4()), file='/root/.anchore/conf/anchore_gate.policy')
pol1 = read_policy(name=str(uuid.uuid4()), file='/root/.anchore/conf/anchore_gate.policy')
policies.update(pol0)
policies.update(pol1)
gl0 = read_whitelist(name=str(uuid.uuid4()))
wl0 = read_whitelist(name=str(uuid.uuid4()), file='/root/wl0')
whitelists.update(gl0)
whitelists.update(wl0)
map0 = create_mapping(map_name="default", policy_name=policies.keys()[0], whitelists=whitelists.keys(), repotagstring='*/*:*')
mappings.append(map0)
bundle = create_policy_bundle(name='default', policies=policies, policy_version=default_policy_version, whitelists=whitelists, whitelist_version=default_whitelist_version, mappings=mappings)
print "CREATED BUNDLE: " + json.dumps(bundle, indent=4)
rc = write_policy_bundle(bundle_file="/tmp/bun.json", bundle=bundle)
newbun = read_policy_bundle(bundle_file="/tmp/bun.json")
if newbun != bundle:
print "BUNDLE RESULT DIFFERENT AFTER SAVE/LOAD"
thebun = convert_to_policy_bundle(name='default', policy_file='/root/.anchore/conf/anchore_gate.policy', policy_version=default_policy_version, whitelist_files=['/root/wl0'], whitelist_version=default_whitelist_version)
rc = write_policy_bundle(bundle_file="/tmp/bun1.json", bundle=thebun)
pol0 = read_policy(name="meh", file='/root/.anchore/conf/anchore_gate.policy')
policies = structure_policy(pol0['meh'])
#rc = execute_gates("4a415e3663882fbc554ee830889c68a33b3585503892cc718a4698e91ef2a526", policies)
result, image_ecode = run_bundle(anchore_config=config, image='alpine', matchtags=[], bundle=thebun)
with open("/tmp/a", 'w') as OFH:
OFH.write(json.dumps(result, indent=4))
try:
result, image_ecode = run_bundle_stateless(anchore_config=config, image='alpine', matchtags=[], bundle=thebun)
with open("/tmp/b", 'w') as OFH:
OFH.write(json.dumps(result, indent=4))
except Exception as err:
import traceback
traceback.print_exc()
print str(err)
| 37.482993 | 223 | 0.554524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,386 | 0.191496 |
4fbe89f545d68a8d00c6f17228cacbd06163d537 | 16,407 | py | Python | config.py | tdaff/automation | 89d32af3aafe0e027c13d42cd0c43ecb12820b0c | [
"BSD-3-Clause"
] | 1 | 2021-12-13T13:33:44.000Z | 2021-12-13T13:33:44.000Z | config.py | tdaff/automation | 89d32af3aafe0e027c13d42cd0c43ecb12820b0c | [
"BSD-3-Clause"
] | null | null | null | config.py | tdaff/automation | 89d32af3aafe0e027c13d42cd0c43ecb12820b0c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
configuration for faps
Provides the Options class that will transparently handle the different option
sources through the .get() method. Pulls in defaults, site and job options plus
command line customisation. Instantiating Options will set up the logging for
the particular job.
"""
__all__ = ['Options']
# Python 3 fix
try:
import configparser
except ImportError:
import ConfigParser as configparser
import copy
import logging
import os
import re
import sys
import textwrap
# Python 3 fix
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from optparse import OptionParser
from logging import debug, error, info
import __main__
class Options(object):
"""
Transparent options handling.
A single unified way of dealing with input files and command line options
delivering sensible defaults for unspecified values. Access options with
the .get() method, or the method that specifies the expected type. It is
recommended to replace with a new instance each time the script is run,
otherwise commandline options or changed input files will not be picked up.
"""
def __init__(self, job_name=None):
"""Initialize options from all .ini files and the commandline."""
# use .get{type}() to read attributes, only access args directly
self.job_dir = ''
self.script_dir = ''
self.job_name = job_name
self.args = []
self.options = {}
self.cmdopts = {}
self._used_options = set()
self.defaults = configparser.SafeConfigParser()
self.site_ini = configparser.SafeConfigParser()
self.job_ini = configparser.SafeConfigParser()
# populate options
self._init_paths()
self.commandline()
self._init_logging()
self.load_defaults()
self.load_site_defaults()
self.load_job_defaults()
if self.options.job_type:
self.job_type_ini = configparser.SafeConfigParser()
self.load_job_type(self.options.job_type)
else:
self.job_type_ini = NullConfigParser()
def get(self, item):
"""Map values from different sources based on priorities."""
# report default options differently
option_source = 'D'
if item in self.__dict__:
# Instance attributes, such as job_name and job_dir
debug("an attribute: %s" % item)
option_source = 'A'
value = object.__getattribute__(self, item)
elif self.options.__dict__.get(item) is not None:
# Commandline options from optparse where option is set
debug("an option: %s" % item)
option_source = 'C'
value = self.options.__dict__[item]
elif item in self.cmdopts:
# Commandline -o custom key=value options
debug("a custom -o option: %s" % item)
option_source = 'O'
value = self.cmdopts[item]
elif self.job_ini.has_option('job_config', item):
# jobname.fap per-job setings
debug("a job option: %s" % item)
option_source = 'F'
value = self.job_ini.get('job_config', item)
elif self.job_type_ini.has_option('job_type', item):
debug("a job_type option: %s" % item)
option_source = 'J'
value = self.job_type_ini.get('job_type', item)
elif self.site_ini.has_option('site_config', item):
debug("a site option: %s" % item)
value = self.site_ini.get('site_config', item)
elif self.defaults.has_option('defaults', item):
debug("a default: %s" % item)
value = self.defaults.get('defaults', item)
else:
# Most things have a default, but not always. Error properly.
debug("unspecified option: %s" % item)
raise AttributeError(item)
# Show what options are used the first time they are accessed
# for the traceability
if item not in self._used_options:
if option_source == 'D':
debug("Default: %s = %s" % (item, value))
else:
info("Option (%s): %s = %s" % (option_source, item, value))
self._used_options.add(item)
# we output the raw value here and pass to caller for
return value
def getbool(self, item):
"""
Parse option and if the value of item is not already a bool return
True for "1", "yes", "true" and "on" and False for "0", "no", "false"
and "off". Case-insensitive.
"""
value = self.get(item)
if isinstance(value, bool):
return value
# Can't use isinstance with basestring to be 2.x and 3.x compatible
# fudge it by assuming strings can be lowered
elif hasattr(value, 'lower'):
if value.lower() in ["1", "yes", "true", "on"]:
return True
elif value.lower() in ["0", "no", "false", "off"]:
return False
else:
# Not a valid bool
raise ValueError(value)
else:
return bool(item)
def getint(self, item):
"""Return item's value as an integer."""
value = self.get(item)
return int(value)
def getfloat(self, item):
"""Return item's value as a float."""
value = self.get(item)
return float(value)
def gettuple(self, item, dtype=None):
"""Return item's value interpreted as a tuple of 'dtype' [strings]."""
value = self.get(item)
# Regex strips bracketing so can't nest, but safer than eval
value = [x for x in re.split('[\s,\(\)\[\]]*', value) if x]
if dtype is not None:
return tuple([dtype(x) for x in value])
else:
return tuple(value)
def _init_paths(self):
"""Find the script directory and set up working directory"""
# Where the script is has the config defaults.
if __name__ != '__main__':
self.script_dir = os.path.dirname(__file__)
else:
self.script_dir = os.path.abspath(sys.path[0])
# Where we run the job.
self.job_dir = os.getcwd()
def _init_logging(self):
"""
Setup the logging to terminal and .flog file, with levels as required.
Must run before any logging calls so we need to access attributes
rather than using self.get()!
"""
# Quiet always overrides verbose; always at least INFO in .flog
if self.options.silent:
stdout_level = logging.CRITICAL
file_level = logging.INFO
elif self.options.quiet:
stdout_level = logging.ERROR
file_level = logging.INFO
elif self.options.verbose:
stdout_level = logging.DEBUG
file_level = logging.DEBUG
else:
stdout_level = logging.INFO
file_level = logging.INFO
# Easier to do simple file configuration then add the stdout
logging.basicConfig(level=file_level,
format='[%(asctime)s] %(levelname)s %(message)s',
datefmt='%Y%m%d %H:%M:%S',
filename=self.job_name + '.flog',
filemode='a')
# Make these uniform widths
logging.addLevelName(10, '--')
logging.addLevelName(20, '>>')
logging.addLevelName(30, '**')
logging.addLevelName(40, '!!')
logging.addLevelName(50, 'XX')
if self.options.plain:
console = logging.StreamHandler(sys.stdout)
else:
# Use nice coloured console output
console = ColouredConsoleHandler(sys.stdout)
console.setLevel(stdout_level)
formatter = logging.Formatter('%(levelname)s %(message)s')
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def commandline(self):
"""Specified options, highest priority."""
usage = "usage: %prog [options] [COMMAND] JOB_NAME"
# use description for the script, not for this module
parser = OptionParser(usage=usage, version="%prog 0.1",
description=__main__.__doc__)
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose",
help="output extra debugging information")
parser.add_option("-q", "--quiet", action="store_true",
dest="quiet", help="only output warnings and errors")
parser.add_option("-s", "--silent", action="store_true",
dest="silent", help="no terminal output")
parser.add_option("-p", "--plain", action="store_true",
dest="plain", help="do not colourise or wrap output")
parser.add_option("-o", "--option", action="append", dest="cmdopts",
help="set custom options as key=value pairs")
parser.add_option("-i", "--interactive", action="store_true",
dest="interactive", help="enter interactive mode")
parser.add_option("-m", "--import", action="store_true",
dest="import", help="try and import old data")
parser.add_option("-n", "--no-submit", action="store_true",
dest="no_submit",
help="create input files only, do not run any jobs")
parser.add_option("-j", "--job-type", dest="job_type",
help="user preconfigured job settings")
parser.add_option("-d", "--daemon", action="store_true", dest="daemon",
help="run [lube] as a server and await input")
(local_options, local_args) = parser.parse_args()
# job_name may or may not be passed or set initially
if self.job_name:
if self.job_name in local_args:
local_args.remove(self.job_name)
elif len(local_args) == 0:
parser.error("No arguments given (try %prog --help)")
else:
# Take the last argument as the job name
self.job_name = local_args.pop()
# key value options from the command line
if local_options.cmdopts is not None:
for pair in local_options.cmdopts:
if '=' in pair:
pair = pair.split('=', 1) # maximum of one split
self.cmdopts[pair[0]] = pair[1]
else:
self.cmdopts[pair] = True
self.options = local_options
# Args are only the COMMANDS for the run
self.args = [arg.lower() for arg in local_args]
def load_defaults(self):
"""Load program defaults."""
# ConfigParser requires header sections so we add them to a StringIO
# of the file if they are missing. 2to3 should also deal with the
# renamed modules.
default_ini_path = os.path.join(self.script_dir, 'defaults.ini')
try:
filetemp = open(default_ini_path, 'r')
default_ini = filetemp.read()
filetemp.close()
if not '[defaults]' in default_ini.lower():
default_ini = '[defaults]\n' + default_ini
default_ini = StringIO(default_ini)
except IOError:
# file does not exist so we just use a blank string
debug('Default options not found! Something is very wrong.')
default_ini = StringIO('[defaults]\n')
self.defaults.readfp(default_ini)
def load_site_defaults(self):
"""Find where the script is and load defaults"""
site_ini_path = os.path.join(self.script_dir, 'site.ini')
try:
filetemp = open(site_ini_path, 'r')
site_ini = filetemp.read()
filetemp.close()
if not '[site_config]' in site_ini.lower():
site_ini = '[site_config]\n' + site_ini
site_ini = StringIO(site_ini)
except IOError:
# file does not exist so we just use a blank string
debug("No site options found; using defaults")
site_ini = StringIO('[site_config]\n')
self.site_ini.readfp(site_ini)
def load_job_defaults(self):
"""Find where the job is running and load defaults"""
job_ini_path = os.path.join(self.job_dir, self.job_name + '.fap')
try:
filetemp = open(job_ini_path, 'r')
job_ini = filetemp.read()
filetemp.close()
if not '[job_config]' in job_ini.lower():
job_ini = '[job_config]\n' + job_ini
job_ini = StringIO(job_ini)
debug("Job options read from %s" % job_ini_path)
except IOError:
# file does not exist so we just use a blank string
debug("No job options found; using defaults")
job_ini = StringIO('[job_config]\n')
self.job_ini.readfp(job_ini)
def load_job_type(self, job_type):
"""Find where the job is running and load defaults"""
home_dir = os.path.expanduser('~')
job_type_ini_path = os.path.join(home_dir, '.faps', job_type + '.fap')
try:
filetemp = open(job_type_ini_path, 'r')
job_type_ini = filetemp.read()
filetemp.close()
if not '[job_type]' in job_type_ini.lower():
job_type_ini = '[job_type]\n' + job_type_ini
job_type_ini = StringIO(job_type_ini)
debug("Job type options read from %s" % job_type_ini_path)
except IOError:
# file does not exist so we just use a blank string
error("Job type '%s' specified but options file '%s' not found" %
(job_type, job_type_ini_path))
job_type_ini = StringIO('[job_config]\n')
self.job_type_ini.readfp(job_type_ini)
def options_test():
"""Try and read a few options from different sources."""
testopts = Options()
print(testopts.get('job_name'))
print(testopts.get('cmdopts'))
print(testopts.get('args'))
print(testopts.get('verbose'))
print(testopts.get('script_dir'))
print(testopts.getbool('interactive'))
for arg in testopts.get('args'):
print('%s: %s' % (arg, testopts.get(arg)))
try:
print(testopts.getbool(arg))
except ValueError:
print('%s is not a bool' % arg)
try:
print(testopts.getint(arg))
except ValueError:
print('%s is not an int' % arg)
try:
print(testopts.getfloat(arg))
except ValueError:
print('%s is not a float' % arg)
try:
print(testopts.gettuple(arg))
except ValueError:
print('%s is not a tuple' % arg)
print(testopts.get('not an option'))
class ColouredConsoleHandler(logging.StreamHandler):
"""Makes colourised and wrapped output for the console."""
def emit(self, record):
"""Colourise and emit a record."""
# Need to make a actual copy of the record
# to prevent altering the message for other loggers
myrecord = copy.copy(record)
levelno = myrecord.levelno
if levelno >= 50: # CRITICAL / FATAL
front = '\033[30;41m' # black/red
elif levelno >= 40: # ERROR
front = '\033[30;41m' # black/red
elif levelno >= 30: # WARNING
front = '\033[30;43m' # black/yellow
elif levelno >= 20: # INFO
front = '\033[30;42m' # black/green
elif levelno >= 10: # DEBUG
front = '\033[30;46m' # black/cyan
else: # NOTSET and anything else
front = '\033[0m' # normal
myrecord.levelname = '%s%s\033[0m' % (front, myrecord.levelname)
logging.StreamHandler.emit(self, myrecord)
class NullConfigParser(object):
"""Use in place of a blank ConfigParser that has no options."""
def __init__(self, *args, **kwargs):
"""This is empty, so do nothing."""
pass
def has_option(*args, **kwargs):
"""Always return Fasle as there are no options."""
return False
if __name__ == '__main__':
options_test()
| 39.345324 | 79 | 0.581886 | 14,685 | 0.895045 | 0 | 0 | 0 | 0 | 0 | 0 | 5,862 | 0.357287 |
4fbebc964c40ea6dd1d24cd31662fdfe6e48593b | 5,652 | py | Python | winchester/config.py | SandyWalsh/stacktach-winchester | ac49955386b695868945a28b6597fe72b3b657e6 | [
"Apache-2.0"
] | null | null | null | winchester/config.py | SandyWalsh/stacktach-winchester | ac49955386b695868945a28b6597fe72b3b657e6 | [
"Apache-2.0"
] | null | null | null | winchester/config.py | SandyWalsh/stacktach-winchester | ac49955386b695868945a28b6597fe72b3b657e6 | [
"Apache-2.0"
] | null | null | null | import collections
import logging
import os
import yaml
logger = logging.getLogger(__name__)
class ConfigurationError(Exception):
pass
class ConfigItem(object):
def __init__(self, required=False, default=None, help='', multiple=False):
self.help = help
self.required = required
self.multiple = multiple
self.default = self.convert(default)
def convert(self, item, manager=None):
if not self.multiple:
return item
elif (isinstance(item, collections.Sequence)
and not isinstance(item, basestring)):
return item
else:
return [item]
class ConfigSection(collections.Mapping):
def __init__(self, required=True, help='', config_description=None):
self.config_description = config_description
self.help = help
self.required = required
self.default = None
def convert(self, item, manager):
return manager.wrap(item, self.config_description)
def __len__(self):
return len(self.config_description)
def __iter__(self):
return iter(self.config_description)
def __getitem__(self, key):
return self.config_description[key]
class ConfigManager(collections.Mapping):
@classmethod
def wrap(cls, conf, config_description):
if hasattr(conf, 'check_config'):
wrapped_conf = conf
else:
wrapped_conf = cls(conf, config_description)
return wrapped_conf
def __init__(self, config_dict, config_description):
self.config_paths = []
self._configs = dict()
self._description = config_description
self._required = set()
self._defaults = dict()
for k, item in self._description.items():
if item.required:
self._required.add(k)
if item.default is not None:
self._defaults[k] = item.default
for k, item in config_dict.items():
if k in self._description:
self._configs[k] = self._description[k].convert(item, self)
else:
self._configs[k] = item
self._keys = set(self._defaults.keys() + self._configs.keys())
def __len__(self):
return len(self._keys)
def __iter__(self):
return iter(self._keys)
def __getitem__(self, key):
if key in self._configs:
return self._configs[key]
if key in self._defaults:
return self._defaults[key]
raise KeyError(key)
def add_config_path(self, *args):
for path in args:
if path not in self.config_paths:
self.config_paths.append(path)
def check_config(self, prefix=''):
if prefix:
prefix = prefix + '/'
for r in self._required:
if r not in self:
msg = "Required Configuration setting %s%s is missing!" % (prefix,r)
logger.error(msg)
raise ConfigurationError(msg)
for k, item in self.items():
if hasattr(item, 'check_config'):
item.check_config(prefix="%s%s" % (prefix,k))
@classmethod
def _load_yaml_config(cls, config_data, filename="(unknown)"):
"""Load a yaml config file."""
try:
config = yaml.safe_load(config_data)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = ("Invalid YAML syntax in Configuration file "
"%(file)s at line: %(line)s, column: %(column)s."
% dict(file=filename,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = ("YAML error reading Configuration file "
"%(file)s"
% dict(file=filename))
logger.error(errmsg)
raise
logger.info("Configuration: %s", config)
return config
@classmethod
def _load_file(cls, filename, paths):
for path in paths:
fullpath = os.path.join(path, filename)
if os.path.isfile(fullpath):
with open(fullpath, 'r') as cf:
logger.debug("Loading configuration file: %s", fullpath)
return cf.read()
msg = "Unable to find file %s in %s" % (filename, str(paths))
logger.info(msg)
return None
@classmethod
def load_config_file(cls, filename, filetype=None, paths=None):
if not paths:
paths = ['.']
if filetype is None:
if (filename.lower().endswith('.yaml') or
filename.lower().endswith('.yml')):
filetype = 'yaml'
elif filename.lower().endswith('.json'):
filetype = 'json'
elif (filename.lower().endswith('.conf') or
filename.lower().endswith('.ini')):
filetype = 'ini'
else:
filetype = 'yaml'
data = cls._load_file(filename, paths)
if data is None:
raise ConfigurationError("Cannot find or read config file: %s" % filename)
try:
loader = getattr(cls, "_load_%s_config" % filetype)
except AttributeError:
raise ConfigurationError("Unknown config file type: %s" % filetype)
return loader(data, filename=filename)
def load_file(self, filename, filetype=None):
return self.load_config_file(filename, filetype, paths=self.config_paths)
| 32.860465 | 86 | 0.567587 | 5,544 | 0.980892 | 0 | 0 | 2,560 | 0.452937 | 0 | 0 | 517 | 0.091472 |
4fbf0c0afa244b2e383a987015bb42d2b03c6628 | 1,956 | py | Python | apps/users/views.py | chenyifaerfans/fafaer-apis | 896db11116fc78c597ebc1a90f547dc15004438d | [
"MIT"
] | null | null | null | apps/users/views.py | chenyifaerfans/fafaer-apis | 896db11116fc78c597ebc1a90f547dc15004438d | [
"MIT"
] | null | null | null | apps/users/views.py | chenyifaerfans/fafaer-apis | 896db11116fc78c597ebc1a90f547dc15004438d | [
"MIT"
] | 1 | 2019-03-17T12:46:20.000Z | 2019-03-17T12:46:20.000Z | from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
from django.db.models import Q
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from common.permissions import IsOwnerOrReadOnly
from .serializers import UserSerializer
User = get_user_model()
class CustomBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
try:
user = User.objects.get(Q(username=username)|Q(mobile=username))
if user.check_password(password):
return user
except Exception as e:
return None
class UserViewset(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
"""
retrieve:
获取某一个用户信息
"""
queryset = User.objects.filter(is_del=0)
serializer_class = UserSerializer
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)
authentication_classes = (JSONWebTokenAuthentication, SessionAuthentication)
def page_forbidden(request):
"""
全局403处理函数
:param request:
:return:
"""
from django.shortcuts import render_to_response
response = render_to_response('403.html', {})
response.status_code = 403
return response
def page_not_found(request):
"""
全局404处理函数
:param request:
:return:
"""
from django.shortcuts import render_to_response
response = render_to_response('404.html', {})
response.status_code = 404
return response
def server_error(request):
"""
全局500处理函数
:param request:
:return:
"""
from django.shortcuts import render_to_response
response = render_to_response('500.html', {})
response.status_code = 500
return response | 27.549296 | 80 | 0.716258 | 689 | 0.342786 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.163682 |
4fbff0bc9c697c0951f61a60296a6035d822cd65 | 941 | py | Python | backend.py | Fennec2000GH/KeywordFS | 0e1d7ab78d084e0c21cf4e7246ed169353cfca9b | [
"MIT"
] | null | null | null | backend.py | Fennec2000GH/KeywordFS | 0e1d7ab78d084e0c21cf4e7246ed169353cfca9b | [
"MIT"
] | null | null | null | backend.py | Fennec2000GH/KeywordFS | 0e1d7ab78d084e0c21cf4e7246ed169353cfca9b | [
"MIT"
] | null | null | null |
from genericpath import exists, isfile
import json, os
from pprint import pprint
from keyword_extraction import *
from topic_modeling import *
# from xml_parser import *
def file_to_json(path: str, storage_path: str = 'storage'):
"""
Converts file containing text to stored JSON object to track topics and keywords.
Parameters:
path (str): Path to file to be converted to JSON object.
storage_path (str): Path to directory to store JSON object.
Returns:
None
"""
# edge case
if not (os.path.exists(path=path) and os.path.isfile(path=path)):
raise ValueError(f'{path} is not a valid path to a file.')
os.makedirs(path=storage_path, exist_ok=True)
with open(file=path, mode='r') as file:
json_dict = dict({
'keywords': set(),
'topics': set(find_topics(documents=))
})
json.dump(obj=file)
| 30.354839 | 86 | 0.629118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.416578 |
4fc02fdb9ddc7d5747414ecaa26615c65041ad79 | 978 | py | Python | tar.py | pakit/recipes | e52f5f45648da27a9e096b0b3f5157666007b59c | [
"BSD-3-Clause"
] | 1 | 2015-11-20T18:43:42.000Z | 2015-11-20T18:43:42.000Z | tar.py | pakit/recipes | e52f5f45648da27a9e096b0b3f5157666007b59c | [
"BSD-3-Clause"
] | 2 | 2015-11-20T18:44:45.000Z | 2015-12-14T19:06:01.000Z | tar.py | pakit/recipes | e52f5f45648da27a9e096b0b3f5157666007b59c | [
"BSD-3-Clause"
] | null | null | null | """ Formula for building tar """
import os
from pakit import Archive, Git, Recipe
class Tar(Recipe):
"""
The GNU tar utility.
"""
def __init__(self):
super(Tar, self).__init__()
self.homepage = 'https://www.gnu.org/software/tar'
self.repos = {
'stable': Archive('http://ftp.gnu.org/gnu/tar/tar-1.28.tar.bz2',
hash='60e4bfe0602fef34cd908d91cf638e17eeb093'
'94d7b98c2487217dc4d3147562'),
'unstable': Git('git://git.savannah.gnu.org/tar.git'),
}
self.requires = ['gettext']
def build(self):
if os.path.exists('bootstrap'):
self.cmd('./bootstrap')
self.cmd('autoconf')
self.cmd('./configure --prefix={prefix}')
self.cmd('make')
self.cmd('make install')
def verify(self):
lines = self.cmd('tar --version').output()
assert lines[0].find('tar (GNU tar)') == 0
| 29.636364 | 76 | 0.5409 | 892 | 0.912065 | 0 | 0 | 0 | 0 | 0 | 0 | 393 | 0.40184 |
4fc1039c804ad07fec0a4b3a651fae130bfd51e3 | 6,277 | py | Python | httpclient.py | forgeno/CMPUT404-assignment-web-client | 630946020460d6c7acf850753ac27bbfe9afd273 | [
"Apache-2.0"
] | null | null | null | httpclient.py | forgeno/CMPUT404-assignment-web-client | 630946020460d6c7acf850753ac27bbfe9afd273 | [
"Apache-2.0"
] | null | null | null | httpclient.py | forgeno/CMPUT404-assignment-web-client | 630946020460d6c7acf850753ac27bbfe9afd273 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import time
import re
# you may use urllib to encode data appropriately
import urllib.parse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
self.socket = None
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def general_parser(self, data):
parseData = data.replace("/r","")
parseData = data.split("\n")
return parseData
def get_code(self, data):
statusCode = int(data[0].split(" ")[1]) #returns status code of response.
return statusCode
def get_headers(self,data, urlPath):
htmlTagIndex = data.find("\r\n\r\n")
if(htmlTagIndex == -1):
htmlTagIndex = 0
header = data[:htmlTagIndex]
header += "\nLocation: "+urlPath
return header
def get_body(self, data):
body = ""
htmlTagIndex = data.find("\r\n\r\n")
body = data[htmlTagIndex:]
return body
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self):
buffer = bytearray()
done = False
while not done:
part = self.socket.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
decodedBody = buffer.decode('utf-8')
return decodedBody
def GET(self, url, args=None):
domainName, urlPath, urlQuery, port = self.parseURL(url)
self.connect(domainName, port)
fakeUserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
header = "GET "+urlPath+urlQuery+" HTTP/1.1\r\nHost: "+domainName+"\r\nAccept: */*\nUser-Agent: "+fakeUserAgent+"\r\n\r\n"
self.sendall(header)
#print("###GET DATA SENT###\nDomain: {}\nPath: {}\nQuery: {}\nPort: {}\nHeader: {}\n".format(domainName, urlPath, urlQuery, port, header))
print("###GET DATA SENT###\n"+header)
returnData = self.recvall()
parseData = self.general_parser(returnData)
statusCode = self.get_code(parseData)
htmlBody = self.get_body(returnData)
htmlHeader = self.get_headers(returnData, urlPath)
print("###GET DATA RECIEVED###\n"+htmlBody)
self.close()
return HTTPResponse(statusCode, htmlBody)
def parseURL(self, url):
domain = url
path = ""
query = ""
slashIndex = url.find("//")
if(slashIndex != -1):
domain = url[slashIndex+2:]
pathStartIndex = domain.find("/")
if(pathStartIndex != -1):
path = domain[pathStartIndex:]
if(path == ""):
path = "/"
queryIndex = path.find("?")
if(queryIndex != -1):
query = path[queryIndex:]
path = path[:queryIndex]
if(pathStartIndex != -1):
domain = domain[:pathStartIndex]
try:
port = int(domain.split(":")[1])
domain = domain.split(":")[0]
except:
port = 80
return domain, path, query, port
def parsePostArgs(self, args):
postBody = ""
if(args == None):
postBody = ""
else:
for key in args.keys():
postBody += "{}={}&".format(key, args[key])
return postBody, len(postBody)
def POST(self, url, args=None):
#start_time = time.time()
postBody, postBodyLen = self.parsePostArgs(args)
domainName, urlPath, urlQuery, port = self.parseURL(url)
self.connect(domainName, port)
fakeUserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
header = "POST {} HTTP/1.1\nHost: {}\nConnection: keep-alive\nAccept: */*\nOrigin: {}\nUser-Agent: {}\nAccept-Encoding: gzip, deflate\nAccept-Language: en-US;q=0.9\nContent-Type: application/x-www-form-urlencoded; charset=UTF-8\nContent-Length: {}\r\n\r\n{}".format(urlPath+urlQuery,domainName,url,fakeUserAgent,postBodyLen,postBody)
self.sendall(header)
returnData = self.recvall()
print("###POST DATA SENT###\n"+header)
#print("#####SENT DATA#####: \n"+header)
parseData = self.general_parser(returnData)
statusCode = self.get_code(parseData)
htmlBody = self.get_body(returnData)
htmlHeader = self.get_headers(returnData, urlPath)
print("###POST DATA RECIEVED###: \n"+htmlBody)
self.close()
return HTTPResponse(statusCode, htmlBody)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
| 36.494186 | 341 | 0.602517 | 4,963 | 0.790664 | 0 | 0 | 0 | 0 | 0 | 0 | 1,991 | 0.31719 |
4fc2f03a744a03fe3211e5d85d1ad3585e62ba1b | 1,482 | py | Python | larcv/app/arxiv/arxiv/LArOpenCVHandle/mac/convert_test.py | mmajewsk/larcv2 | 9ee74e42b293d547d3a8510fa2139b2d4ccf6b89 | [
"MIT"
] | 14 | 2017-10-19T15:08:29.000Z | 2021-03-31T21:21:07.000Z | larcv/app/arxiv/arxiv/LArOpenCVHandle/mac/convert_test.py | mmajewsk/larcv2 | 9ee74e42b293d547d3a8510fa2139b2d4ccf6b89 | [
"MIT"
] | 32 | 2017-10-25T22:54:06.000Z | 2019-10-01T13:57:15.000Z | larcv/app/arxiv/arxiv/LArOpenCVHandle/mac/convert_test.py | mmajewsk/larcv2 | 9ee74e42b293d547d3a8510fa2139b2d4ccf6b89 | [
"MIT"
] | 16 | 2017-12-07T12:04:40.000Z | 2021-11-15T00:53:31.000Z | import ROOT,sys
from larlite import larlite as fmwk1
from larcv import larcv as fmwk2
from ROOT import handshake
io1=fmwk1.storage_manager(fmwk1.storage_manager.kBOTH)
io1.add_in_filename(sys.argv[1])
io1.set_out_filename('boke.root')
io1.open()
io2=fmwk2.IOManager(fmwk2.IOManager.kREAD)
io2.add_in_file(sys.argv[2])
io2.initialize()
hs=handshake.HandShaker()
ctr=0
while io1.next_event() and io2.read_entry(ctr):
ev_pfpart = io1.get_data(fmwk1.data.kPFParticle, "dl")
ev_vertex = io1.get_data(fmwk1.data.kVertex, "dl")
ev_shower = io1.get_data(fmwk1.data.kShower, "dl")
ev_track = io1.get_data(fmwk1.data.kTrack, "dl")
ev_cluster = io1.get_data(fmwk1.data.kCluster, "dl")
ev_hit = io1.get_data(fmwk1.data.kHit, "dl")
ev_ass = io1.get_data(fmwk1.data.kAssociation,"dl")
ev_hit_in = io1.get_data(fmwk1.data.kHit, "gaushit")
ev_pgraph = io2.get_data(fmwk2.kProductPGraph,'test')
ev_pixel2d = io2.get_data(fmwk2.kProductPixel2D,'test_ctor')
hs.pixel_distance_threshold(1.)
hs.set_larlite_pointers(ev_pfpart, ev_vertex,
ev_shower, ev_track,
ev_cluster, ev_hit,
ev_ass)
hs.construct(ev_pgraph, ev_pixel2d, ev_hit_in)
io1.set_id(io1.run_id(), io1.subrun_id(), io1.event_id())
#io1.next_event()
#io1.go_to()
#io2.read_entry()
#io1.save_entry()
ctr+=1
io1.close()
io2.finalize()
| 29.058824 | 64 | 0.668016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.08637 |
4fc4b797c995974062c04d59da99516bb81cce25 | 2,713 | py | Python | code/vectorized/vectorized_neural_network.py | le0x99/low-level-deep-learning | 9c68cce8aae7f541a5556901659378ffd859977a | [
"MIT"
] | null | null | null | code/vectorized/vectorized_neural_network.py | le0x99/low-level-deep-learning | 9c68cce8aae7f541a5556901659378ffd859977a | [
"MIT"
] | null | null | null | code/vectorized/vectorized_neural_network.py | le0x99/low-level-deep-learning | 9c68cce8aae7f541a5556901659378ffd859977a | [
"MIT"
] | null | null | null | import numpy as np
def sigmoid(Z): return 1./(1.+np.exp(-Z))
def softmax(Z): return np.exp(Z)/np.exp(Z).sum()
def softmax_batched(Z): return np.exp(Z) / np.sum(np.exp(Z), axis=1, keepdims=True)
def initialize_parameters():
W1 = np.random.randn(300,784) * 0.01
b1 = np.zeros((300,1))
W2 = np.random.randn(10,300) * 0.01
b2 = np.zeros((10,1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def forward_propagation(X, parameters, batched):
W1, b1 = parameters["W1"],parameters["b1"]
W2, b2 = parameters["W2"],parameters["b2"]
Z1 = W1@X + b1
A1 = sigmoid(Z1)
Z2 = W2@A1 + b2
if batched:
A2 = softmax_batched(Z2)
else:
A2 = softmax(Z2)
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
def compute_cost(A2, Y):
return (- np.log(A2)@Y.T).flatten()[0]
def compute_cost_stable(A2, Y, batched):
A2 = np.clip(A2, 1e-12, 1. - 1e-12)
m = A2.shape[0] if batched == True else 1.
ce = -np.sum(Y*np.log(A2+1e-9))/m
return ce
def backward_propagation(parameters, cache, X, Y, batched):
m = X.shape[0] if batched == True else 1.
W1, W2 = parameters["W1"], parameters["W2"]
A1, A2 = cache["A1"], cache["A2"]
db2 = (A2-Y).mean(keepdims=True)
if batched:
dW2 = (1/m)*(A2-Y)@A1.reshape(m,1,300)
else:
dW2 = (1/m)*(A2-Y)@A1.T
dZ2 = (A2-Y)
dgZ1 = A1*(1-A1)
dZ1 = W2.T@dZ2*dgZ1
db1 = dZ1.mean(axis=1, keepdims=True)#-1
if batched:
dW1 = ((1/m)*dZ1)@X.reshape(m,1,784)
else:
dW1 = ((1/m)*dZ1)@X.T
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
def update_parameters(parameters, grads, batched, learning_rate = 0.01):
W1,W2,b1,b2 = parameters["W1"], parameters["W2"], parameters["b1"], parameters["b2"]
dW1, db1, dW2, db2 = grads["dW1"], grads["db1"], grads["dW2"], grads["db2"]
if batched:
W1 -= learning_rate * grads["dW1"].mean(axis=0)
W2 -= learning_rate * grads["dW2"].mean(axis=0)
b1 = b1 - learning_rate * grads["db1"].mean(axis=0)
b2 -= learning_rate * grads["db2"].mean(axis=0)
else:
W1 -= learning_rate * grads["dW1"]
W2 -= learning_rate * grads["dW2"]
b1 = b1 - learning_rate * grads["db1"]
b2 -= learning_rate * grads["db2"]
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
| 25.35514 | 88 | 0.507925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.065979 |
4fc6d2223dbb154a6a63ae158a494705ee4d06f0 | 8,310 | py | Python | test/shed_functional/functional/test_1000_install_basic_repository.py | innovate-invent/galaxy | 10aa953a40e171246bdd1804c74e8019da8e8200 | [
"CC-BY-3.0"
] | 4 | 2018-10-29T18:34:38.000Z | 2021-09-29T23:30:42.000Z | test/shed_functional/functional/test_1000_install_basic_repository.py | innovate-invent/galaxy | 10aa953a40e171246bdd1804c74e8019da8e8200 | [
"CC-BY-3.0"
] | 30 | 2016-10-20T15:35:12.000Z | 2018-10-02T15:59:54.000Z | test/shed_functional/functional/test_1000_install_basic_repository.py | innovate-invent/galaxy | 10aa953a40e171246bdd1804c74e8019da8e8200 | [
"CC-BY-3.0"
] | 7 | 2016-11-03T19:11:01.000Z | 2020-05-11T14:23:52.000Z | from shed_functional.base.twilltestcase import common, ShedTwillTestCase
class BasicToolShedFeatures(ShedTwillTestCase):
'''Test installing a basic repository.'''
def test_0000_initiate_users(self):
"""Create necessary user accounts."""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
test_user_1 = self.test_db_util.get_user(common.test_user_1_email)
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
self.test_db_util.get_private_role(test_user_1)
self.login(email=common.admin_email, username=common.admin_username)
admin_user = self.test_db_util.get_user(common.admin_email)
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
self.test_db_util.get_private_role(admin_user)
self.galaxy_login(email=common.admin_email, username=common.admin_username)
galaxy_admin_user = self.test_db_util.get_galaxy_user(common.admin_email)
assert galaxy_admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
self.test_db_util.get_galaxy_private_role(galaxy_admin_user)
def test_0005_ensure_repositories_and_categories_exist(self):
'''Create the 0000 category and upload the filtering repository to it, if necessary.'''
self.login(email=common.admin_email, username=common.admin_username)
category = self.create_category(name='Test 0000 Basic Repository Features 2', description='Test Description 0000 Basic Repository Features 2')
category = self.create_category(name='Test 0000 Basic Repository Features 1', description='Test Description 0000 Basic Repository Features 1')
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.get_or_create_repository(name='filtering_0000',
description="Galaxy's filtering tool",
long_description="Long description of Galaxy's filtering tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id))
if self.repository_is_new(repository):
self.upload_file(repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 1.1.0 tarball.',
strings_displayed=[],
strings_not_displayed=[])
self.upload_file(repository,
filename='filtering/filtering_0000.txt',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded readme for 1.1.0',
strings_displayed=[],
strings_not_displayed=[])
self.upload_file(repository,
filename='filtering/filtering_2.2.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 2.2.0 tarball.',
strings_displayed=[],
strings_not_displayed=[])
self.upload_file(repository,
filename='readme.txt',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded readme for 2.2.0',
strings_displayed=[],
strings_not_displayed=[])
def test_0010_browse_tool_sheds(self):
"""Browse the available tool sheds in this Galaxy instance."""
self.galaxy_login(email=common.admin_email, username=common.admin_username)
self.visit_galaxy_url('/admin_toolshed/browse_tool_sheds')
self.check_page_for_string('Embedded tool shed for functional tests')
self.browse_tool_shed(url=self.url, strings_displayed=['Test 0000 Basic Repository Features 1', 'Test 0000 Basic Repository Features 2'])
def test_0015_browse_test_0000_category(self):
'''Browse the category created in test 0000. It should contain the filtering_0000 repository also created in that test.'''
category = self.test_db_util.get_category_by_name('Test 0000 Basic Repository Features 1')
self.browse_category(category, strings_displayed=['filtering_0000'])
def test_0020_preview_filtering_repository(self):
'''Load the preview page for the filtering_0000 repository in the tool shed.'''
self.preview_repository_in_tool_shed('filtering_0000', common.test_user_1_name, strings_displayed=['filtering_0000', 'Valid tools'])
def test_0025_install_filtering_repository(self):
self.install_repository('filtering_0000',
common.test_user_1_name,
'Test 0000 Basic Repository Features 1',
new_tool_panel_section_label='test_1000')
installed_repository = self.test_db_util.get_installed_repository_by_name_owner('filtering_0000', common.test_user_1_name)
strings_displayed = ['filtering_0000',
"Galaxy's filtering tool",
'user1',
self.url.replace('http://', ''),
str(installed_repository.installed_changeset_revision)]
self.display_galaxy_browse_repositories_page(strings_displayed=strings_displayed)
strings_displayed.extend(['Installed tool shed repository', 'Valid tools', 'Filter1'])
self.display_installed_repository_manage_page(installed_repository, strings_displayed=strings_displayed)
self.verify_tool_metadata_for_installed_repository(installed_repository)
def test_0030_install_filtering_repository_again(self):
'''Attempt to install the already installed filtering repository.'''
installed_repository = self.test_db_util.get_installed_repository_by_name_owner('filtering_0000', common.test_user_1_name)
# The page displayed after installation is the ajaxian "Montior installing tool shed repositories" page. Since the filter
# repository was already installed, nothing will be in the process of being installed, so the grid will not display 'filtering_0000'.
post_submit_strings_not_displayed = ['filtering_0000']
self.install_repository('filtering_0000',
common.test_user_1_name,
'Test 0000 Basic Repository Features 1',
post_submit_strings_not_displayed=post_submit_strings_not_displayed)
strings_displayed = ['filtering_0000',
"Galaxy's filtering tool",
'user1',
self.url.replace('http://', ''),
str(installed_repository.installed_changeset_revision)]
self.display_installed_repository_manage_page(installed_repository, strings_displayed=strings_displayed)
self.display_galaxy_browse_repositories_page(strings_displayed=strings_displayed)
def test_0035_verify_installed_repository_metadata(self):
'''Verify that resetting the metadata on an installed repository does not change the metadata.'''
self.verify_installed_repository_metadata_unchanged('filtering_0000', common.test_user_1_name)
| 67.016129 | 150 | 0.63562 | 8,234 | 0.990854 | 0 | 0 | 0 | 0 | 0 | 0 | 2,124 | 0.255596 |
4fc6de35795e4f13631af5dec2a31964ff60ed92 | 572 | py | Python | exceptional.py | kentoj/python-fundamentals | f7b93228c18d1553aad11580b7d2f42c999da376 | [
"MIT"
] | 6 | 2017-01-31T18:55:14.000Z | 2021-01-02T09:21:40.000Z | exceptional.py | kentoj/python-fundamentals | f7b93228c18d1553aad11580b7d2f42c999da376 | [
"MIT"
] | null | null | null | exceptional.py | kentoj/python-fundamentals | f7b93228c18d1553aad11580b7d2f42c999da376 | [
"MIT"
] | 1 | 2020-12-28T15:50:39.000Z | 2020-12-28T15:50:39.000Z | """A module to demonstrate exceptions."""
import sys
from math import log
def convert(item):
"""
Convert to an integer.
Args:
item: some object
Returns:
an integer representation of the object
Throws:
a ValueException
"""
try:
return int(item)
except (ValueError, TypeError) as e:
print("Conversion Error: {}"
.format(str(e)),
file=sys.stderr)
raise
def string_log(s):
return log(convert(s))
if __name__ == '__main__':
print(convert(sys.argv[1]))
| 16.823529 | 47 | 0.575175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.431818 |
4fc9baceeb6c53d83cab3241e6032040a6ea6f24 | 33,921 | py | Python | tests/examples/minlplib/syn05m04h.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | 2 | 2021-07-03T13:19:10.000Z | 2022-02-06T10:48:13.000Z | tests/examples/minlplib/syn05m04h.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | 1 | 2021-07-04T14:52:14.000Z | 2021-07-15T10:17:11.000Z | tests/examples/minlplib/syn05m04h.py | ouyang-w-19/decogo | 52546480e49776251d4d27856e18a46f40c824a1 | [
"MIT"
] | null | null | null | # MINLP written by GAMS Convert at 04/21/18 13:54:28
#
# Equation counts
# Total E G L N X C B
# 363 141 12 210 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 209 169 40 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 843 807 36 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,30),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.b170 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b171 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b172 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b173 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b174 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b175 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b176 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b177 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b178 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b179 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b180 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b181 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b182 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b183 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b184 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b185 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b186 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b187 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b188 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b189 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b190 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b191 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b192 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b193 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b194 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b195 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b196 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b197 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b198 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b199 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b200 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b201 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b202 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b203 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b204 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b205 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b206 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b207 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b208 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b209 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= - m.x22 - m.x23 - m.x24 - m.x25 + 5*m.x46 + 10*m.x47 + 5*m.x48 + 10*m.x49 - 2*m.x66 - m.x67
- 2*m.x68 - m.x69 + 80*m.x70 + 90*m.x71 + 120*m.x72 + 100*m.x73 + 285*m.x74 + 390*m.x75
+ 350*m.x76 + 300*m.x77 + 290*m.x78 + 405*m.x79 + 190*m.x80 + 340*m.x81 - 5*m.b190 - 4*m.b191
- 6*m.b192 - 3*m.b193 - 8*m.b194 - 7*m.b195 - 6*m.b196 - 5*m.b197 - 6*m.b198 - 9*m.b199
- 4*m.b200 - 3*m.b201 - 10*m.b202 - 9*m.b203 - 5*m.b204 - 6*m.b205 - 6*m.b206 - 10*m.b207
- 6*m.b208 - 9*m.b209, sense=maximize)
m.c2 = Constraint(expr= m.x22 - m.x26 - m.x30 == 0)
m.c3 = Constraint(expr= m.x23 - m.x27 - m.x31 == 0)
m.c4 = Constraint(expr= m.x24 - m.x28 - m.x32 == 0)
m.c5 = Constraint(expr= m.x25 - m.x29 - m.x33 == 0)
m.c6 = Constraint(expr= - m.x34 - m.x38 + m.x42 == 0)
m.c7 = Constraint(expr= - m.x35 - m.x39 + m.x43 == 0)
m.c8 = Constraint(expr= - m.x36 - m.x40 + m.x44 == 0)
m.c9 = Constraint(expr= - m.x37 - m.x41 + m.x45 == 0)
m.c10 = Constraint(expr= m.x42 - m.x46 - m.x50 == 0)
m.c11 = Constraint(expr= m.x43 - m.x47 - m.x51 == 0)
m.c12 = Constraint(expr= m.x44 - m.x48 - m.x52 == 0)
m.c13 = Constraint(expr= m.x45 - m.x49 - m.x53 == 0)
m.c14 = Constraint(expr= m.x50 - m.x54 - m.x58 - m.x62 == 0)
m.c15 = Constraint(expr= m.x51 - m.x55 - m.x59 - m.x63 == 0)
m.c16 = Constraint(expr= m.x52 - m.x56 - m.x60 - m.x64 == 0)
m.c17 = Constraint(expr= m.x53 - m.x57 - m.x61 - m.x65 == 0)
m.c18 = Constraint(expr=(m.x98/(1e-6 + m.b170) - log(1 + m.x82/(1e-6 + m.b170)))*(1e-6 + m.b170) <= 0)
m.c19 = Constraint(expr=(m.x99/(1e-6 + m.b171) - log(1 + m.x83/(1e-6 + m.b171)))*(1e-6 + m.b171) <= 0)
m.c20 = Constraint(expr=(m.x100/(1e-6 + m.b172) - log(1 + m.x84/(1e-6 + m.b172)))*(1e-6 + m.b172) <= 0)
m.c21 = Constraint(expr=(m.x101/(1e-6 + m.b173) - log(1 + m.x85/(1e-6 + m.b173)))*(1e-6 + m.b173) <= 0)
m.c22 = Constraint(expr= m.x86 == 0)
m.c23 = Constraint(expr= m.x87 == 0)
m.c24 = Constraint(expr= m.x88 == 0)
m.c25 = Constraint(expr= m.x89 == 0)
m.c26 = Constraint(expr= m.x102 == 0)
m.c27 = Constraint(expr= m.x103 == 0)
m.c28 = Constraint(expr= m.x104 == 0)
m.c29 = Constraint(expr= m.x105 == 0)
m.c30 = Constraint(expr= m.x26 - m.x82 - m.x86 == 0)
m.c31 = Constraint(expr= m.x27 - m.x83 - m.x87 == 0)
m.c32 = Constraint(expr= m.x28 - m.x84 - m.x88 == 0)
m.c33 = Constraint(expr= m.x29 - m.x85 - m.x89 == 0)
m.c34 = Constraint(expr= m.x34 - m.x98 - m.x102 == 0)
m.c35 = Constraint(expr= m.x35 - m.x99 - m.x103 == 0)
m.c36 = Constraint(expr= m.x36 - m.x100 - m.x104 == 0)
m.c37 = Constraint(expr= m.x37 - m.x101 - m.x105 == 0)
m.c38 = Constraint(expr= m.x82 - 40*m.b170 <= 0)
m.c39 = Constraint(expr= m.x83 - 40*m.b171 <= 0)
m.c40 = Constraint(expr= m.x84 - 40*m.b172 <= 0)
m.c41 = Constraint(expr= m.x85 - 40*m.b173 <= 0)
m.c42 = Constraint(expr= m.x86 + 40*m.b170 <= 40)
m.c43 = Constraint(expr= m.x87 + 40*m.b171 <= 40)
m.c44 = Constraint(expr= m.x88 + 40*m.b172 <= 40)
m.c45 = Constraint(expr= m.x89 + 40*m.b173 <= 40)
m.c46 = Constraint(expr= m.x98 - 3.71357206670431*m.b170 <= 0)
m.c47 = Constraint(expr= m.x99 - 3.71357206670431*m.b171 <= 0)
m.c48 = Constraint(expr= m.x100 - 3.71357206670431*m.b172 <= 0)
m.c49 = Constraint(expr= m.x101 - 3.71357206670431*m.b173 <= 0)
m.c50 = Constraint(expr= m.x102 + 3.71357206670431*m.b170 <= 3.71357206670431)
m.c51 = Constraint(expr= m.x103 + 3.71357206670431*m.b171 <= 3.71357206670431)
m.c52 = Constraint(expr= m.x104 + 3.71357206670431*m.b172 <= 3.71357206670431)
m.c53 = Constraint(expr= m.x105 + 3.71357206670431*m.b173 <= 3.71357206670431)
m.c54 = Constraint(expr=(m.x106/(1e-6 + m.b174) - 1.2*log(1 + m.x90/(1e-6 + m.b174)))*(1e-6 + m.b174) <= 0)
m.c55 = Constraint(expr=(m.x107/(1e-6 + m.b175) - 1.2*log(1 + m.x91/(1e-6 + m.b175)))*(1e-6 + m.b175) <= 0)
m.c56 = Constraint(expr=(m.x108/(1e-6 + m.b176) - 1.2*log(1 + m.x92/(1e-6 + m.b176)))*(1e-6 + m.b176) <= 0)
m.c57 = Constraint(expr=(m.x109/(1e-6 + m.b177) - 1.2*log(1 + m.x93/(1e-6 + m.b177)))*(1e-6 + m.b177) <= 0)
m.c58 = Constraint(expr= m.x94 == 0)
m.c59 = Constraint(expr= m.x95 == 0)
m.c60 = Constraint(expr= m.x96 == 0)
m.c61 = Constraint(expr= m.x97 == 0)
m.c62 = Constraint(expr= m.x110 == 0)
m.c63 = Constraint(expr= m.x111 == 0)
m.c64 = Constraint(expr= m.x112 == 0)
m.c65 = Constraint(expr= m.x113 == 0)
m.c66 = Constraint(expr= m.x30 - m.x90 - m.x94 == 0)
m.c67 = Constraint(expr= m.x31 - m.x91 - m.x95 == 0)
m.c68 = Constraint(expr= m.x32 - m.x92 - m.x96 == 0)
m.c69 = Constraint(expr= m.x33 - m.x93 - m.x97 == 0)
m.c70 = Constraint(expr= m.x38 - m.x106 - m.x110 == 0)
m.c71 = Constraint(expr= m.x39 - m.x107 - m.x111 == 0)
m.c72 = Constraint(expr= m.x40 - m.x108 - m.x112 == 0)
m.c73 = Constraint(expr= m.x41 - m.x109 - m.x113 == 0)
m.c74 = Constraint(expr= m.x90 - 40*m.b174 <= 0)
m.c75 = Constraint(expr= m.x91 - 40*m.b175 <= 0)
m.c76 = Constraint(expr= m.x92 - 40*m.b176 <= 0)
m.c77 = Constraint(expr= m.x93 - 40*m.b177 <= 0)
m.c78 = Constraint(expr= m.x94 + 40*m.b174 <= 40)
m.c79 = Constraint(expr= m.x95 + 40*m.b175 <= 40)
m.c80 = Constraint(expr= m.x96 + 40*m.b176 <= 40)
m.c81 = Constraint(expr= m.x97 + 40*m.b177 <= 40)
m.c82 = Constraint(expr= m.x106 - 4.45628648004517*m.b174 <= 0)
m.c83 = Constraint(expr= m.x107 - 4.45628648004517*m.b175 <= 0)
m.c84 = Constraint(expr= m.x108 - 4.45628648004517*m.b176 <= 0)
m.c85 = Constraint(expr= m.x109 - 4.45628648004517*m.b177 <= 0)
m.c86 = Constraint(expr= m.x110 + 4.45628648004517*m.b174 <= 4.45628648004517)
m.c87 = Constraint(expr= m.x111 + 4.45628648004517*m.b175 <= 4.45628648004517)
m.c88 = Constraint(expr= m.x112 + 4.45628648004517*m.b176 <= 4.45628648004517)
m.c89 = Constraint(expr= m.x113 + 4.45628648004517*m.b177 <= 4.45628648004517)
m.c90 = Constraint(expr= - 0.75*m.x114 + m.x146 == 0)
m.c91 = Constraint(expr= - 0.75*m.x115 + m.x147 == 0)
m.c92 = Constraint(expr= - 0.75*m.x116 + m.x148 == 0)
m.c93 = Constraint(expr= - 0.75*m.x117 + m.x149 == 0)
m.c94 = Constraint(expr= m.x118 == 0)
m.c95 = Constraint(expr= m.x119 == 0)
m.c96 = Constraint(expr= m.x120 == 0)
m.c97 = Constraint(expr= m.x121 == 0)
m.c98 = Constraint(expr= m.x150 == 0)
m.c99 = Constraint(expr= m.x151 == 0)
m.c100 = Constraint(expr= m.x152 == 0)
m.c101 = Constraint(expr= m.x153 == 0)
m.c102 = Constraint(expr= m.x54 - m.x114 - m.x118 == 0)
m.c103 = Constraint(expr= m.x55 - m.x115 - m.x119 == 0)
m.c104 = Constraint(expr= m.x56 - m.x116 - m.x120 == 0)
m.c105 = Constraint(expr= m.x57 - m.x117 - m.x121 == 0)
m.c106 = Constraint(expr= m.x70 - m.x146 - m.x150 == 0)
m.c107 = Constraint(expr= m.x71 - m.x147 - m.x151 == 0)
m.c108 = Constraint(expr= m.x72 - m.x148 - m.x152 == 0)
m.c109 = Constraint(expr= m.x73 - m.x149 - m.x153 == 0)
m.c110 = Constraint(expr= m.x114 - 4.45628648004517*m.b178 <= 0)
m.c111 = Constraint(expr= m.x115 - 4.45628648004517*m.b179 <= 0)
m.c112 = Constraint(expr= m.x116 - 4.45628648004517*m.b180 <= 0)
m.c113 = Constraint(expr= m.x117 - 4.45628648004517*m.b181 <= 0)
m.c114 = Constraint(expr= m.x118 + 4.45628648004517*m.b178 <= 4.45628648004517)
m.c115 = Constraint(expr= m.x119 + 4.45628648004517*m.b179 <= 4.45628648004517)
m.c116 = Constraint(expr= m.x120 + 4.45628648004517*m.b180 <= 4.45628648004517)
m.c117 = Constraint(expr= m.x121 + 4.45628648004517*m.b181 <= 4.45628648004517)
m.c118 = Constraint(expr= m.x146 - 3.34221486003388*m.b178 <= 0)
m.c119 = Constraint(expr= m.x147 - 3.34221486003388*m.b179 <= 0)
m.c120 = Constraint(expr= m.x148 - 3.34221486003388*m.b180 <= 0)
m.c121 = Constraint(expr= m.x149 - 3.34221486003388*m.b181 <= 0)
m.c122 = Constraint(expr= m.x150 + 3.34221486003388*m.b178 <= 3.34221486003388)
m.c123 = Constraint(expr= m.x151 + 3.34221486003388*m.b179 <= 3.34221486003388)
m.c124 = Constraint(expr= m.x152 + 3.34221486003388*m.b180 <= 3.34221486003388)
m.c125 = Constraint(expr= m.x153 + 3.34221486003388*m.b181 <= 3.34221486003388)
m.c126 = Constraint(expr=(m.x154/(1e-6 + m.b182) - 1.5*log(1 + m.x122/(1e-6 + m.b182)))*(1e-6 + m.b182) <= 0)
m.c127 = Constraint(expr=(m.x155/(1e-6 + m.b183) - 1.5*log(1 + m.x123/(1e-6 + m.b183)))*(1e-6 + m.b183) <= 0)
m.c128 = Constraint(expr=(m.x156/(1e-6 + m.b184) - 1.5*log(1 + m.x124/(1e-6 + m.b184)))*(1e-6 + m.b184) <= 0)
m.c129 = Constraint(expr=(m.x157/(1e-6 + m.b185) - 1.5*log(1 + m.x125/(1e-6 + m.b185)))*(1e-6 + m.b185) <= 0)
m.c130 = Constraint(expr= m.x126 == 0)
m.c131 = Constraint(expr= m.x127 == 0)
m.c132 = Constraint(expr= m.x128 == 0)
m.c133 = Constraint(expr= m.x129 == 0)
m.c134 = Constraint(expr= m.x158 == 0)
m.c135 = Constraint(expr= m.x159 == 0)
m.c136 = Constraint(expr= m.x160 == 0)
m.c137 = Constraint(expr= m.x161 == 0)
m.c138 = Constraint(expr= m.x58 - m.x122 - m.x126 == 0)
m.c139 = Constraint(expr= m.x59 - m.x123 - m.x127 == 0)
m.c140 = Constraint(expr= m.x60 - m.x124 - m.x128 == 0)
m.c141 = Constraint(expr= m.x61 - m.x125 - m.x129 == 0)
m.c142 = Constraint(expr= m.x74 - m.x154 - m.x158 == 0)
m.c143 = Constraint(expr= m.x75 - m.x155 - m.x159 == 0)
m.c144 = Constraint(expr= m.x76 - m.x156 - m.x160 == 0)
m.c145 = Constraint(expr= m.x77 - m.x157 - m.x161 == 0)
m.c146 = Constraint(expr= m.x122 - 4.45628648004517*m.b182 <= 0)
m.c147 = Constraint(expr= m.x123 - 4.45628648004517*m.b183 <= 0)
m.c148 = Constraint(expr= m.x124 - 4.45628648004517*m.b184 <= 0)
m.c149 = Constraint(expr= m.x125 - 4.45628648004517*m.b185 <= 0)
m.c150 = Constraint(expr= m.x126 + 4.45628648004517*m.b182 <= 4.45628648004517)
m.c151 = Constraint(expr= m.x127 + 4.45628648004517*m.b183 <= 4.45628648004517)
m.c152 = Constraint(expr= m.x128 + 4.45628648004517*m.b184 <= 4.45628648004517)
m.c153 = Constraint(expr= m.x129 + 4.45628648004517*m.b185 <= 4.45628648004517)
m.c154 = Constraint(expr= m.x154 - 2.54515263975353*m.b182 <= 0)
m.c155 = Constraint(expr= m.x155 - 2.54515263975353*m.b183 <= 0)
m.c156 = Constraint(expr= m.x156 - 2.54515263975353*m.b184 <= 0)
m.c157 = Constraint(expr= m.x157 - 2.54515263975353*m.b185 <= 0)
m.c158 = Constraint(expr= m.x158 + 2.54515263975353*m.b182 <= 2.54515263975353)
m.c159 = Constraint(expr= m.x159 + 2.54515263975353*m.b183 <= 2.54515263975353)
m.c160 = Constraint(expr= m.x160 + 2.54515263975353*m.b184 <= 2.54515263975353)
m.c161 = Constraint(expr= m.x161 + 2.54515263975353*m.b185 <= 2.54515263975353)
m.c162 = Constraint(expr= - m.x130 + m.x162 == 0)
m.c163 = Constraint(expr= - m.x131 + m.x163 == 0)
m.c164 = Constraint(expr= - m.x132 + m.x164 == 0)
m.c165 = Constraint(expr= - m.x133 + m.x165 == 0)
m.c166 = Constraint(expr= - 0.5*m.x138 + m.x162 == 0)
m.c167 = Constraint(expr= - 0.5*m.x139 + m.x163 == 0)
m.c168 = Constraint(expr= - 0.5*m.x140 + m.x164 == 0)
m.c169 = Constraint(expr= - 0.5*m.x141 + m.x165 == 0)
m.c170 = Constraint(expr= m.x134 == 0)
m.c171 = Constraint(expr= m.x135 == 0)
m.c172 = Constraint(expr= m.x136 == 0)
m.c173 = Constraint(expr= m.x137 == 0)
m.c174 = Constraint(expr= m.x142 == 0)
m.c175 = Constraint(expr= m.x143 == 0)
m.c176 = Constraint(expr= m.x144 == 0)
m.c177 = Constraint(expr= m.x145 == 0)
m.c178 = Constraint(expr= m.x166 == 0)
m.c179 = Constraint(expr= m.x167 == 0)
m.c180 = Constraint(expr= m.x168 == 0)
m.c181 = Constraint(expr= m.x169 == 0)
m.c182 = Constraint(expr= m.x62 - m.x130 - m.x134 == 0)
m.c183 = Constraint(expr= m.x63 - m.x131 - m.x135 == 0)
m.c184 = Constraint(expr= m.x64 - m.x132 - m.x136 == 0)
m.c185 = Constraint(expr= m.x65 - m.x133 - m.x137 == 0)
m.c186 = Constraint(expr= m.x66 - m.x138 - m.x142 == 0)
m.c187 = Constraint(expr= m.x67 - m.x139 - m.x143 == 0)
m.c188 = Constraint(expr= m.x68 - m.x140 - m.x144 == 0)
m.c189 = Constraint(expr= m.x69 - m.x141 - m.x145 == 0)
m.c190 = Constraint(expr= m.x78 - m.x162 - m.x166 == 0)
m.c191 = Constraint(expr= m.x79 - m.x163 - m.x167 == 0)
m.c192 = Constraint(expr= m.x80 - m.x164 - m.x168 == 0)
m.c193 = Constraint(expr= m.x81 - m.x165 - m.x169 == 0)
m.c194 = Constraint(expr= m.x130 - 4.45628648004517*m.b186 <= 0)
m.c195 = Constraint(expr= m.x131 - 4.45628648004517*m.b187 <= 0)
m.c196 = Constraint(expr= m.x132 - 4.45628648004517*m.b188 <= 0)
m.c197 = Constraint(expr= m.x133 - 4.45628648004517*m.b189 <= 0)
m.c198 = Constraint(expr= m.x134 + 4.45628648004517*m.b186 <= 4.45628648004517)
m.c199 = Constraint(expr= m.x135 + 4.45628648004517*m.b187 <= 4.45628648004517)
m.c200 = Constraint(expr= m.x136 + 4.45628648004517*m.b188 <= 4.45628648004517)
m.c201 = Constraint(expr= m.x137 + 4.45628648004517*m.b189 <= 4.45628648004517)
m.c202 = Constraint(expr= m.x138 - 30*m.b186 <= 0)
m.c203 = Constraint(expr= m.x139 - 30*m.b187 <= 0)
m.c204 = Constraint(expr= m.x140 - 30*m.b188 <= 0)
m.c205 = Constraint(expr= m.x141 - 30*m.b189 <= 0)
m.c206 = Constraint(expr= m.x142 + 30*m.b186 <= 30)
m.c207 = Constraint(expr= m.x143 + 30*m.b187 <= 30)
m.c208 = Constraint(expr= m.x144 + 30*m.b188 <= 30)
m.c209 = Constraint(expr= m.x145 + 30*m.b189 <= 30)
m.c210 = Constraint(expr= m.x162 - 15*m.b186 <= 0)
m.c211 = Constraint(expr= m.x163 - 15*m.b187 <= 0)
m.c212 = Constraint(expr= m.x164 - 15*m.b188 <= 0)
m.c213 = Constraint(expr= m.x165 - 15*m.b189 <= 0)
m.c214 = Constraint(expr= m.x166 + 15*m.b186 <= 15)
m.c215 = Constraint(expr= m.x167 + 15*m.b187 <= 15)
m.c216 = Constraint(expr= m.x168 + 15*m.b188 <= 15)
m.c217 = Constraint(expr= m.x169 + 15*m.b189 <= 15)
m.c218 = Constraint(expr= m.x2 + 5*m.b190 == 0)
m.c219 = Constraint(expr= m.x3 + 4*m.b191 == 0)
m.c220 = Constraint(expr= m.x4 + 6*m.b192 == 0)
m.c221 = Constraint(expr= m.x5 + 3*m.b193 == 0)
m.c222 = Constraint(expr= m.x6 + 8*m.b194 == 0)
m.c223 = Constraint(expr= m.x7 + 7*m.b195 == 0)
m.c224 = Constraint(expr= m.x8 + 6*m.b196 == 0)
m.c225 = Constraint(expr= m.x9 + 5*m.b197 == 0)
m.c226 = Constraint(expr= m.x10 + 6*m.b198 == 0)
m.c227 = Constraint(expr= m.x11 + 9*m.b199 == 0)
m.c228 = Constraint(expr= m.x12 + 4*m.b200 == 0)
m.c229 = Constraint(expr= m.x13 + 3*m.b201 == 0)
m.c230 = Constraint(expr= m.x14 + 10*m.b202 == 0)
m.c231 = Constraint(expr= m.x15 + 9*m.b203 == 0)
m.c232 = Constraint(expr= m.x16 + 5*m.b204 == 0)
m.c233 = Constraint(expr= m.x17 + 6*m.b205 == 0)
m.c234 = Constraint(expr= m.x18 + 6*m.b206 == 0)
m.c235 = Constraint(expr= m.x19 + 10*m.b207 == 0)
m.c236 = Constraint(expr= m.x20 + 6*m.b208 == 0)
m.c237 = Constraint(expr= m.x21 + 9*m.b209 == 0)
m.c238 = Constraint(expr= m.b170 - m.b171 <= 0)
m.c239 = Constraint(expr= m.b170 - m.b172 <= 0)
m.c240 = Constraint(expr= m.b170 - m.b173 <= 0)
m.c241 = Constraint(expr= m.b171 - m.b172 <= 0)
m.c242 = Constraint(expr= m.b171 - m.b173 <= 0)
m.c243 = Constraint(expr= m.b172 - m.b173 <= 0)
m.c244 = Constraint(expr= m.b174 - m.b175 <= 0)
m.c245 = Constraint(expr= m.b174 - m.b176 <= 0)
m.c246 = Constraint(expr= m.b174 - m.b177 <= 0)
m.c247 = Constraint(expr= m.b175 - m.b176 <= 0)
m.c248 = Constraint(expr= m.b175 - m.b177 <= 0)
m.c249 = Constraint(expr= m.b176 - m.b177 <= 0)
m.c250 = Constraint(expr= m.b178 - m.b179 <= 0)
m.c251 = Constraint(expr= m.b178 - m.b180 <= 0)
m.c252 = Constraint(expr= m.b178 - m.b181 <= 0)
m.c253 = Constraint(expr= m.b179 - m.b180 <= 0)
m.c254 = Constraint(expr= m.b179 - m.b181 <= 0)
m.c255 = Constraint(expr= m.b180 - m.b181 <= 0)
m.c256 = Constraint(expr= m.b182 - m.b183 <= 0)
m.c257 = Constraint(expr= m.b182 - m.b184 <= 0)
m.c258 = Constraint(expr= m.b182 - m.b185 <= 0)
m.c259 = Constraint(expr= m.b183 - m.b184 <= 0)
m.c260 = Constraint(expr= m.b183 - m.b185 <= 0)
m.c261 = Constraint(expr= m.b184 - m.b185 <= 0)
m.c262 = Constraint(expr= m.b186 - m.b187 <= 0)
m.c263 = Constraint(expr= m.b186 - m.b188 <= 0)
m.c264 = Constraint(expr= m.b186 - m.b189 <= 0)
m.c265 = Constraint(expr= m.b187 - m.b188 <= 0)
m.c266 = Constraint(expr= m.b187 - m.b189 <= 0)
m.c267 = Constraint(expr= m.b188 - m.b189 <= 0)
m.c268 = Constraint(expr= m.b190 + m.b191 <= 1)
m.c269 = Constraint(expr= m.b190 + m.b192 <= 1)
m.c270 = Constraint(expr= m.b190 + m.b193 <= 1)
m.c271 = Constraint(expr= m.b190 + m.b191 <= 1)
m.c272 = Constraint(expr= m.b191 + m.b192 <= 1)
m.c273 = Constraint(expr= m.b191 + m.b193 <= 1)
m.c274 = Constraint(expr= m.b190 + m.b192 <= 1)
m.c275 = Constraint(expr= m.b191 + m.b192 <= 1)
m.c276 = Constraint(expr= m.b192 + m.b193 <= 1)
m.c277 = Constraint(expr= m.b190 + m.b193 <= 1)
m.c278 = Constraint(expr= m.b191 + m.b193 <= 1)
m.c279 = Constraint(expr= m.b192 + m.b193 <= 1)
m.c280 = Constraint(expr= m.b194 + m.b195 <= 1)
m.c281 = Constraint(expr= m.b194 + m.b196 <= 1)
m.c282 = Constraint(expr= m.b194 + m.b197 <= 1)
m.c283 = Constraint(expr= m.b194 + m.b195 <= 1)
m.c284 = Constraint(expr= m.b195 + m.b196 <= 1)
m.c285 = Constraint(expr= m.b195 + m.b197 <= 1)
m.c286 = Constraint(expr= m.b194 + m.b196 <= 1)
m.c287 = Constraint(expr= m.b195 + m.b196 <= 1)
m.c288 = Constraint(expr= m.b196 + m.b197 <= 1)
m.c289 = Constraint(expr= m.b194 + m.b197 <= 1)
m.c290 = Constraint(expr= m.b195 + m.b197 <= 1)
m.c291 = Constraint(expr= m.b196 + m.b197 <= 1)
m.c292 = Constraint(expr= m.b198 + m.b199 <= 1)
m.c293 = Constraint(expr= m.b198 + m.b200 <= 1)
m.c294 = Constraint(expr= m.b198 + m.b201 <= 1)
m.c295 = Constraint(expr= m.b198 + m.b199 <= 1)
m.c296 = Constraint(expr= m.b199 + m.b200 <= 1)
m.c297 = Constraint(expr= m.b199 + m.b201 <= 1)
m.c298 = Constraint(expr= m.b198 + m.b200 <= 1)
m.c299 = Constraint(expr= m.b199 + m.b200 <= 1)
m.c300 = Constraint(expr= m.b200 + m.b201 <= 1)
m.c301 = Constraint(expr= m.b198 + m.b201 <= 1)
m.c302 = Constraint(expr= m.b199 + m.b201 <= 1)
m.c303 = Constraint(expr= m.b200 + m.b201 <= 1)
m.c304 = Constraint(expr= m.b202 + m.b203 <= 1)
m.c305 = Constraint(expr= m.b202 + m.b204 <= 1)
m.c306 = Constraint(expr= m.b202 + m.b205 <= 1)
m.c307 = Constraint(expr= m.b202 + m.b203 <= 1)
m.c308 = Constraint(expr= m.b203 + m.b204 <= 1)
m.c309 = Constraint(expr= m.b203 + m.b205 <= 1)
m.c310 = Constraint(expr= m.b202 + m.b204 <= 1)
m.c311 = Constraint(expr= m.b203 + m.b204 <= 1)
m.c312 = Constraint(expr= m.b204 + m.b205 <= 1)
m.c313 = Constraint(expr= m.b202 + m.b205 <= 1)
m.c314 = Constraint(expr= m.b203 + m.b205 <= 1)
m.c315 = Constraint(expr= m.b204 + m.b205 <= 1)
m.c316 = Constraint(expr= m.b206 + m.b207 <= 1)
m.c317 = Constraint(expr= m.b206 + m.b208 <= 1)
m.c318 = Constraint(expr= m.b206 + m.b209 <= 1)
m.c319 = Constraint(expr= m.b206 + m.b207 <= 1)
m.c320 = Constraint(expr= m.b207 + m.b208 <= 1)
m.c321 = Constraint(expr= m.b207 + m.b209 <= 1)
m.c322 = Constraint(expr= m.b206 + m.b208 <= 1)
m.c323 = Constraint(expr= m.b207 + m.b208 <= 1)
m.c324 = Constraint(expr= m.b208 + m.b209 <= 1)
m.c325 = Constraint(expr= m.b206 + m.b209 <= 1)
m.c326 = Constraint(expr= m.b207 + m.b209 <= 1)
m.c327 = Constraint(expr= m.b208 + m.b209 <= 1)
m.c328 = Constraint(expr= m.b170 - m.b190 <= 0)
m.c329 = Constraint(expr= - m.b170 + m.b171 - m.b191 <= 0)
m.c330 = Constraint(expr= - m.b170 - m.b171 + m.b172 - m.b192 <= 0)
m.c331 = Constraint(expr= - m.b170 - m.b171 - m.b172 + m.b173 - m.b193 <= 0)
m.c332 = Constraint(expr= m.b174 - m.b194 <= 0)
m.c333 = Constraint(expr= - m.b174 + m.b175 - m.b195 <= 0)
m.c334 = Constraint(expr= - m.b174 - m.b175 + m.b176 - m.b196 <= 0)
m.c335 = Constraint(expr= - m.b174 - m.b175 - m.b176 + m.b177 - m.b197 <= 0)
m.c336 = Constraint(expr= m.b178 - m.b198 <= 0)
m.c337 = Constraint(expr= - m.b178 + m.b179 - m.b199 <= 0)
m.c338 = Constraint(expr= - m.b178 - m.b179 + m.b180 - m.b200 <= 0)
m.c339 = Constraint(expr= - m.b178 - m.b179 - m.b180 + m.b181 - m.b201 <= 0)
m.c340 = Constraint(expr= m.b182 - m.b202 <= 0)
m.c341 = Constraint(expr= - m.b182 + m.b183 - m.b203 <= 0)
m.c342 = Constraint(expr= - m.b182 - m.b183 + m.b184 - m.b204 <= 0)
m.c343 = Constraint(expr= - m.b182 - m.b183 - m.b184 + m.b185 - m.b205 <= 0)
m.c344 = Constraint(expr= m.b186 - m.b206 <= 0)
m.c345 = Constraint(expr= - m.b186 + m.b187 - m.b207 <= 0)
m.c346 = Constraint(expr= - m.b186 - m.b187 + m.b188 - m.b208 <= 0)
m.c347 = Constraint(expr= - m.b186 - m.b187 - m.b188 + m.b189 - m.b209 <= 0)
m.c348 = Constraint(expr= m.b170 + m.b174 == 1)
m.c349 = Constraint(expr= m.b171 + m.b175 == 1)
m.c350 = Constraint(expr= m.b172 + m.b176 == 1)
m.c351 = Constraint(expr= m.b173 + m.b177 == 1)
m.c352 = Constraint(expr= m.b170 + m.b174 - m.b178 >= 0)
m.c353 = Constraint(expr= m.b171 + m.b175 - m.b179 >= 0)
m.c354 = Constraint(expr= m.b172 + m.b176 - m.b180 >= 0)
m.c355 = Constraint(expr= m.b173 + m.b177 - m.b181 >= 0)
m.c356 = Constraint(expr= m.b170 + m.b174 - m.b182 >= 0)
m.c357 = Constraint(expr= m.b171 + m.b175 - m.b183 >= 0)
m.c358 = Constraint(expr= m.b172 + m.b176 - m.b184 >= 0)
m.c359 = Constraint(expr= m.b173 + m.b177 - m.b185 >= 0)
m.c360 = Constraint(expr= m.b170 + m.b174 - m.b186 >= 0)
m.c361 = Constraint(expr= m.b171 + m.b175 - m.b187 >= 0)
m.c362 = Constraint(expr= m.b172 + m.b176 - m.b188 >= 0)
m.c363 = Constraint(expr= m.b173 + m.b177 - m.b189 >= 0)
| 35.187759 | 117 | 0.635565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 699 | 0.020607 |
4fcbcca983a903be3e13de0bc766ff057f2460ae | 2,991 | py | Python | kitsune/gallery/models.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | kitsune/gallery/models.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | kitsune/gallery/models.py | jgmize/kitsune | 8f23727a9c7fcdd05afc86886f0134fb08d9a2f0 | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from kitsune.sumo.models import ModelBase, LocaleField
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files
class Media(ModelBase):
"""Generic model for media"""
title = models.CharField(max_length=255, db_index=True)
created = models.DateTimeField(default=datetime.now, db_index=True)
updated = models.DateTimeField(default=datetime.now, db_index=True)
updated_by = models.ForeignKey(User, null=True)
description = models.TextField(max_length=10000)
locale = LocaleField(default=settings.GALLERY_DEFAULT_LANGUAGE,
db_index=True)
is_draft = models.NullBooleanField(default=None, null=True, editable=False)
class Meta(object):
abstract = True
ordering = ['-created']
unique_together = (('locale', 'title'), ('is_draft', 'creator'))
def __unicode__(self):
return '[%s] %s' % (self.locale, self.title)
@auto_delete_files
class Image(Media):
creator = models.ForeignKey(User, related_name='gallery_images')
file = models.ImageField(upload_to=settings.GALLERY_IMAGE_PATH,
max_length=settings.MAX_FILEPATH_LENGTH)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_IMAGE_THUMBNAIL_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
def get_absolute_url(self):
return reverse('gallery.media', args=['image', self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail, if set, else self.file"""
return self.thumbnail.url if self.thumbnail else self.file.url
@auto_delete_files
class Video(Media):
creator = models.ForeignKey(User, related_name='gallery_videos')
webm = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
ogv = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
flv = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
poster = models.ImageField(upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH,
max_length=settings.MAX_FILEPATH_LENGTH,
null=True)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
def get_absolute_url(self):
return reverse('gallery.media', args=['video', self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail.url, if set, else default thumbnail URL"""
progress_url = settings.GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL
return self.thumbnail.url if self.thumbnail else progress_url
| 41.541667 | 79 | 0.704112 | 2,656 | 0.887997 | 0 | 0 | 1,905 | 0.636911 | 0 | 0 | 278 | 0.092946 |
4fccde0990616a0b7cd8bff73f531e7de8b4cd4b | 104 | py | Python | senseTk/__main__.py | Helicopt/senseToolkit | 1630ec3f03368980a13f448b3be554efe44ec7cb | [
"MIT"
] | 2 | 2018-07-30T03:54:58.000Z | 2018-12-17T16:09:06.000Z | senseTk/__main__.py | Helicopt/senseToolkit | 1630ec3f03368980a13f448b3be554efe44ec7cb | [
"MIT"
] | null | null | null | senseTk/__main__.py | Helicopt/senseToolkit | 1630ec3f03368980a13f448b3be554efe44ec7cb | [
"MIT"
] | null | null | null | import senseTk
if __name__ == '__main__':
print('senseToolkit version %s' % (senseTk.__version__))
| 20.8 | 60 | 0.711538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.336538 |
4fcfbd8024734cb1efbb3a2c975669d6daead2b5 | 666 | py | Python | RaspberryPi/Hardware/UltrasonicSensorSet.py | amaankhan02/SelfDrivingCar | 7831a9db13d2e8c9ca683e48588eabdf065f80fa | [
"MIT"
] | null | null | null | RaspberryPi/Hardware/UltrasonicSensorSet.py | amaankhan02/SelfDrivingCar | 7831a9db13d2e8c9ca683e48588eabdf065f80fa | [
"MIT"
] | null | null | null | RaspberryPi/Hardware/UltrasonicSensorSet.py | amaankhan02/SelfDrivingCar | 7831a9db13d2e8c9ca683e48588eabdf065f80fa | [
"MIT"
] | null | null | null | import RPi.GPIO as gpio
from enum import Enum
import time
from GpioMode import GpioMode
from UltrasonicSensor import UltrasonicSensor
class UltrasonicSensorSet:
def __init__(self, *args:UltrasonicSensor):
"""
:param args: UltrasonicSensor objects
"""
self.ussSet = args
def getDistances(self):
"""
:return: list of distances of all UltrasonicSensors in order of how passed in constructor
"""
distances = []
for uss in self.ussSet:
distances.append(uss.getDistance())
return distances
def cleanup(self):
gpio.cleanup()
print("GPIO cleaned up") | 25.615385 | 97 | 0.641141 | 531 | 0.797297 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.286787 |
4fd0f164b54137d3499bbb7d41a57e54fcaffbef | 827 | py | Python | m3u_to_channels.py | Axel-Erfurt/hypnotixLite | 11d2381999724a247b8b42b345da5ba6c9e21178 | [
"MIT"
] | 3 | 2021-03-26T03:53:30.000Z | 2021-07-20T23:50:14.000Z | m3u_to_channels.py | Axel-Erfurt/hypnotixLite | 11d2381999724a247b8b42b345da5ba6c9e21178 | [
"MIT"
] | 2 | 2021-01-22T11:14:38.000Z | 2021-04-15T18:40:44.000Z | m3u_to_channels.py | Axel-Erfurt/hypnotixLite | 11d2381999724a247b8b42b345da5ba6c9e21178 | [
"MIT"
] | 1 | 2021-02-15T06:53:45.000Z | 2021-02-15T06:53:45.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
if len(sys.argv) < 3:
print("usage: python3 m3u_to_channels.py infile.m3u outfile.txt")
sys.exit()
else:
text = open(sys.argv[1], "r").read()
chList = []
urlList = []
mlist = text.splitlines()
for line in mlist:
if line.startswith("#EXTINF"):
ch = line.partition('tvg-name="')[2].partition('" ')[0]
if ch == "":
ch = line.partition(',')[2]
chList.append(ch)
if line.startswith("http"):
urlList.append(line)
with open(sys.argv[2], "w") as f:
for x in range(len(chList)):
if not "***" in chList[x]:
f.write(f"{chList[x].replace('Pluto ', '').replace(' Made In Germany', '')},{urlList[x]}\n")
f.close()
| 28.517241 | 108 | 0.504232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.276904 |
4fd0f444ff8c7ce8c0356f846fd27ec234c49cff | 823 | py | Python | Experiments/RunTrainBasicClassification.py | christymarc/raycasting-simulation | ed9b92143d3eb1c5a25900419ead517f93f8c315 | [
"MIT"
] | null | null | null | Experiments/RunTrainBasicClassification.py | christymarc/raycasting-simulation | ed9b92143d3eb1c5a25900419ead517f93f8c315 | [
"MIT"
] | null | null | null | Experiments/RunTrainBasicClassification.py | christymarc/raycasting-simulation | ed9b92143d3eb1c5a25900419ead517f93f8c315 | [
"MIT"
] | null | null | null | from subprocess import run
compared_models = [
"resnet18",
"xresnet18",
"xresnet18_deep",
"xresnet18_deeper",
"xse_resnet18",
"xresnext18",
"xse_resnext18",
"xse_resnext18_deep",
"xse_resnext18_deeper",
"resnet50",
"xresnet50",
"xresnet50_deep",
"xresnet50_deeper",
"xse_resnet50",
"xresnext50",
"xse_resnext50",
"xse_resnext50_deep",
"xse_resnext50_deeper",
"squeezenet1_1",
"densenet121",
"densenet201",
"vgg11_bn",
"vgg19_bn",
"alexnet",
]
for dataset in ["corrected-wander-full"]:
for model in compared_models:
run(
[
"python",
"TrainBasicClassification.py",
model,
dataset,
"--pretrained",
]
)
| 19.139535 | 46 | 0.545565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.510328 |
4fd11410ca1410fdacfd8424d35b517273245311 | 6,822 | py | Python | ferry/config/cassandra/cassandraclientconfig.py | jhorey/ferry | bbaa047df08386e17130a939e20fde5e840d1ffa | [
"Apache-2.0"
] | 44 | 2015-06-04T09:27:37.000Z | 2020-10-20T06:23:38.000Z | ferry/config/cassandra/cassandraclientconfig.py | jhorey/ferry | bbaa047df08386e17130a939e20fde5e840d1ffa | [
"Apache-2.0"
] | 2 | 2016-02-26T11:53:36.000Z | 2020-11-13T12:38:03.000Z | ferry/config/cassandra/cassandraclientconfig.py | jhorey/ferry | bbaa047df08386e17130a939e20fde5e840d1ffa | [
"Apache-2.0"
] | 13 | 2015-06-25T03:46:00.000Z | 2020-03-25T11:20:31.000Z | # Copyright 2014 OpenCore LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import sh
from string import Template
class CassandraClientInitializer(object):
"""
Create a new initializer
Param user The user login for the git repo
"""
def __init__(self, system):
self.template_dir = None
self.template_repo = None
self.container_data_dir = CassandraClientConfig.data_directory
self.container_log_dir = CassandraClientConfig.log_directory
"""
Generate a new hostname
"""
def new_host_name(self, instance_id):
return 'cassandra_client' + str(instance_id)
"""
Start the service on the containers.
"""
def _execute_service(self, containers, entry_point, fabric, cmd):
return fabric.cmd(containers,
'/service/sbin/startnode %s %s' % (cmd, entry_point['cassandra_url']))
def start_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "start")
def restart_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "restart")
def stop_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "stop")
def _generate_config_dir(self, uuid):
return 'cassandra_client' + str(uuid)
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return []
def get_working_ports(self, num_instances):
"""
Ports necessary to get things working.
"""
return []
def get_total_instances(self, num_instances, layers):
"""
Get total number of instances.
"""
instances = []
for i in range(num_instances):
instances.append('cassandra-client')
return instances
"""
Generate a new configuration
"""
def generate(self, num):
return CassandraClientConfig(num)
def _apply_cassandra(self, host_dir, entry_point, config, container):
yaml_in_file = open(self.template_dir + '/cassandra.yaml.template', 'r')
yaml_out_file = open(host_dir + '/cassandra.yaml', 'w+')
# Now make the changes to the template file.
changes = { "LOCAL_ADDRESS":container['data_ip'],
"DATA_DIR":config.data_directory,
"CACHE_DIR":config.cache_directory,
"COMMIT_DIR":config.commit_directory,
"SEEDS":entry_point['cassandra_url']}
for line in yaml_in_file:
s = Template(line).substitute(changes)
yaml_out_file.write(s)
yaml_out_file.close()
yaml_in_file.close()
def _apply_titan(self, host_dir, storage_entry, container):
in_file = open(self.template_dir + '/titan.properties', 'r')
out_file = open(host_dir + '/titan.properties', 'w+')
changes = { "BACKEND":"cassandrathrift",
"DB":container['args']['db'],
"IP":storage_entry['seed']}
for line in in_file:
s = Template(line).substitute(changes)
out_file.write(s)
out_file.close()
in_file.close()
def _find_cassandra_storage(self, containers):
"""
Find a Cassandra compatible storage entry.
"""
for c in containers:
for s in c['storage']:
if s['type'] == 'cassandra':
return s
"""
Apply the configuration to the instances
"""
def apply(self, config, containers):
entry_point = { 'type' : 'cassandra-client' }
entry_point['ip'] = containers[0]['manage_ip']
# Get the storage information.
storage_entry = self._find_cassandra_storage(containers)
if not storage_entry:
# The Cassandra client is currently only compatible with a
# Cassandra backend. So just return an error.
return None, None
# Otherwise record the storage type and get the seed node.
entry_point['cassandra_url'] = storage_entry['seed']
# Create a new configuration directory, and place
# into the template directory.
config_dirs = []
try:
host_dir = "/tmp/" + self._generate_config_dir(config.uuid)
try:
sh.mkdir('-p', host_dir)
except:
sys.stderr.write('could not create config dir ' + host_dir)
self._apply_cassandra(host_dir, entry_point, config, containers[0])
# See if we need to apply
if 'titan' in storage_entry:
self._apply_titan(host_dir, storage_entry, containers[0])
out_file = open(host_dir + '/servers', 'w+')
out_file.write("%s %s" % (storage_entry['titan']['ip'], 'rexserver'))
out_file.close
# The config dirs specifies what to transfer over. We want to
# transfer over specific files into a directory.
for c in containers:
config_dirs.append([c['container'],
host_dir + '/*',
config.config_directory])
except IOError as err:
sys.stderr.write('' + str(err))
return config_dirs, entry_point
class CassandraClientConfig(object):
data_directory = '/service/data/main/'
log_directory = '/service/data/logs/'
commit_directory = '/service/data/commits/'
cache_directory = '/service/data/cache/'
config_directory = '/service/conf/cassandra/'
def __init__(self, num):
self.num = num
self.data_directory = CassandraClientConfig.data_directory
self.commit_directory = CassandraClientConfig.commit_directory
self.cache_directory = CassandraClientConfig.cache_directory
self.log_directory = CassandraClientConfig.log_directory
self.config_directory = CassandraClientConfig.config_directory
| 35.717277 | 96 | 0.622398 | 6,190 | 0.907359 | 0 | 0 | 0 | 0 | 0 | 0 | 2,320 | 0.340076 |
4fd1ce32a10f30a9038b645d55203323aae7bf99 | 2,297 | py | Python | fn_mcafee_esm/setup.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | fn_mcafee_esm/setup.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | fn_mcafee_esm/setup.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
from setuptools import setup, find_packages
setup(
name='fn_mcafee_esm',
version='1.0.2',
license='MIT',
author='IBM Resilient',
author_email='support@resilientsystems.com',
description="Resilient Circuits Components for 'fn_mcafee_esm'",
long_description="""The McAfee ESM integration with the Resilient platform allows for the escalation and enrichment
of cases between McAfee and the Resilient platform. The integration includes a poller and 6 functions. The
returned results can be used to make customized updates to the Resilient platform, such as updating incidents,
data tables and so on. The integration can also be used to make updates to McAfee ESM cases.""",
install_requires=[
'resilient_circuits>=30.0.0',
'resilient-lib'
],
packages=find_packages(),
include_package_data=True,
platforms='any',
classifiers=[
'Programming Language :: Python',
],
entry_points={
"resilient.circuits.components": [
"McafeeEsmGetCaseDetailFunctionComponent = fn_mcafee_esm.components.mcafee_esm_get_case_detail:FunctionComponent",
"McafeeEsmGetListOfCasesFunctionComponent = fn_mcafee_esm.components.mcafee_esm_get_list_of_cases:FunctionComponent",
"McafeeEsmGetCaseEvenstsDetailFunctionComponent = fn_mcafee_esm.components.mcafee_esm_get_case_events_detail:FunctionComponent",
"McafeeEsmEditCaseFunctionComponent = fn_mcafee_esm.components.mcafee_esm_edit_case:FunctionComponent",
"McafeeEsmGetTriggeredAlarms = fn_mcafee_esm.components.mcafee_esm_get_triggered_alarms:FunctionComponent",
"McafeeEsmQueryLogs = fn_mcafee_esm.components.mcafee_esm_query:FunctionComponent",
"McafeeEsmCasePolling = fn_mcafee_esm.components.mcafee_esm_case_polling:ESM_CasePolling"
],
"resilient.circuits.configsection": ["gen_config = fn_mcafee_esm.util.config:config_section_data"],
"resilient.circuits.customize": ["customize = fn_mcafee_esm.util.customize:customization_data"],
"resilient.circuits.selftest": ["selftest = fn_mcafee_esm.util.selftest:selftest_function"]
}
) | 54.690476 | 140 | 0.742273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,781 | 0.775359 |
4fd25d52784965f16619e3e176946ad59586a29f | 95 | py | Python | addons14/storage_image/models/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-06-10T14:59:13.000Z | 2021-06-10T14:59:13.000Z | addons14/storage_image/models/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | null | null | null | addons14/storage_image/models/__init__.py | odoochain/addons_oca | 55d456d798aebe16e49b4a6070765f206a8885ca | [
"MIT"
] | 1 | 2021-04-09T09:44:44.000Z | 2021-04-09T09:44:44.000Z | from . import storage_image
from . import storage_file
from . import storage_relation_abstract
| 23.75 | 39 | 0.842105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4fd37eada01051d2655006a5f367e3dce290c716 | 10,332 | py | Python | src/etl/etl.py | shy166/hinreddit | e19abfa584b8b0cf801dd6968ac7b42d4b68ee96 | [
"Apache-2.0"
] | null | null | null | src/etl/etl.py | shy166/hinreddit | e19abfa584b8b0cf801dd6968ac7b42d4b68ee96 | [
"Apache-2.0"
] | 3 | 2020-05-16T04:29:28.000Z | 2020-05-16T08:05:16.000Z | src/etl/etl.py | syeehyn/hinreddit | e19abfa584b8b0cf801dd6968ac7b42d4b68ee96 | [
"Apache-2.0"
] | null | null | null | # import praw as pr
import pandas as pd
from src import *
import json
import requests
import pandas as pd
import os
from os.path import join
from tqdm import tqdm
import time
from joblib import Parallel, delayed
from p_tqdm import p_umap
from glob import glob
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API = 'https://api.pushshift.io/'
COMMENT = join(API, 'reddit/search/comment/')
SUBMISSION = join(API, 'reddit/search/submission/')
SUBMISSION_DETAIL = join(API, 'reddit/submission/comment_ids/')
POST_DIR = 'raw/posts'
POST_DETAIL_DIR = 'raw/posts_detail'
COMMENT_DIR = 'raw/comments'
def fetch_post(subreddit, sort_type, sort, size, before, meta):
"""[fetch posts]
Args:
subreddit ([string]): [the subreddit of the post to be fetched; i.e. 'politics']
sort_type ([string]): [the catogory to be sorted; i.e. 'time']
sort ([string]): ['asc': ascending; 'dsc': descending]
size ([int]): [number of post to be fetched]
before ([int]): [unix utc 10 digits timestamp]
meta ([list]): [meta to be fetched]
"""
params = '?' + 'subreddit=' + subreddit + \
'&' + 'sort_type=' + sort_type + \
'&' + 'sort=' + sort + \
'&' + 'size=' + size + \
'&' + 'before=' + before
r = requests.get(join(SUBMISSION, params), verify=False)
attemps = 0
if r.status_code == 200:
try:
data = pd.DataFrame(r.json()['data'])[meta]
return data, str(data.created_utc.min())
except KeyError:
data = pd.DataFrame(r.json()['data'])
return data, False
elif r.status_code == 403:
while r.status_code == 403 & attemps < 5:
attemps += 1
time.sleep(3 * attemps)
r = requests.get(join(SUBMISSION, params), verify=False)
try:
data = pd.DataFrame(r.json()['data'])[meta]
return data, str(data.created_utc.min())
except KeyError:
try:
data = pd.DataFrame(r.json()['data'])
return data, False
except:
return None
else:
time.sleep(5)
r = requests.get(join(SUBMISSION, params), verify=False)
if r.status_code == 200:
try:
data = pd.DataFrame(r.json()['data'])[meta]
return data, str(data.created_utc.min())
except KeyError:
data = pd.DataFrame(r.json()['data'])
return data, False
else:
return None
def fetch_posts(subreddit, total, meta, filepath, sort_type, sort, size, start):
"""[fetch subreddits posts]
Args:
subreddit ([list]): [the list of subreddits of the post to be fetched; i.e. ['politics']]
total ([int]): [the number of total subreddits to be fetched]
filepath ([string]): [the filepath to store the data]
sort_type ([string]): [the catogory to be sorted; i.e. 'time']
sort ([string]): ['asc': ascending; 'dsc': descending]
size ([int]): [number of post to be fetched]
before ([int]): [unix utc 10 digits timestamp]
meta ([list]): [meta to be fetched]
Returns:
[dict]: [the log of posts fetching process]
"""
num_epoch = -(-int(total) // int(size))
start_time = start
for i in range(num_epoch):
last_time = start_time
try:
process, start_time = fetch_post(subreddit, sort_type, sort, size, start_time, meta)
except TypeError:
return {'subreddit': subreddit, 'result': 'unsuccess', 'status': i, 'last_time': last_time}
if start_time != False:
if not os.path.exists(join(filepath, POST_DIR, subreddit+'.csv')):
process.to_csv(join(filepath, POST_DIR, subreddit+'.csv'), index = False)
else:
process.to_csv(join(filepath, POST_DIR, subreddit+'.csv'), index = False, mode='a', header = False)
else:
process.to_csv(join(filepath, POST_DIR, subreddit+'_failed.csv'), index = False)
return {'subreddit': subreddit, 'result': 'unsuccess', 'status': i, 'last_time': last_time}
time.sleep(.5)
return {'subreddit': subreddit,'result': 'success', 'status': num_epoch, 'last_time': last_time}
def fetch_submissions(**kwargs):
"""[function to fetch submissions]
Returns:
[dict]: [the log of submission fetching process]
"""
post_args, meta_args = kwargs['POST_ARGS'], kwargs['META_ARGS']
filepath, total, meta, subreddits = meta_args['filepath'], meta_args['total'], \
meta_args['meta'], meta_args['subreddits']
sort_type, sort, size, start = post_args['sort_type'], post_args['sort'], post_args['size'], post_args['start']
if os.path.exists(os.path.join(filepath, 'raw', 'posts', 'log.json')):
return json.load(open(os.path.join(filepath, 'raw', 'posts', 'log.json')))
else:
tolist = lambda x: [x for _ in range(len(subreddits))]
res = p_umap(fetch_posts, subreddits, tolist(total), tolist(meta), tolist(filepath), tolist(sort_type), tolist(sort), tolist(size), tolist(start), num_cpus = NUM_WORKER)
with open(os.path.join(filepath, 'raw', 'posts', 'log.json'), 'w') as fp:
json.dump(res, fp)
return res
def submission_detail(i):
"""[function to fetch submission's comments detail]
Args:
i ([string]): [subreddit name]
Returns:
[dict]: [log of the detail fetching process]
"""
r = requests.get(join(SUBMISSION_DETAIL, i), verify=False)
attemps = 0
if r.status_code == 200:
return {'submission_id': i, 'comment_ids': r.json()['data']}
elif r.status_code == 403:
while r.status_code == 403 & attemps < 5:
attemps += 1
time.sleep(3 * attemps)
r = requests.get(join(SUBMISSION_DETAIL, i), verify=False)
try:
return {'submission_id': i, 'comment_ids': r.json()['data']}
except:
return {'submission_id': i, 'comment_ids': []}
else:
time.sleep(5)
r = requests.get(join(SUBMISSION_DETAIL, i), verify=False)
if r.status_code == 200:
return {'submission_id': i, 'comment_ids': r.json()['data']}
else:
return {'submission_id': i, 'comment_ids': []}
def submissions_detail(filepath):
"""[function to fetch submissions' comments detail]
Args:
filepath ([string]): [filepath to store the data]
"""
subreddits_fp = glob(join(filepath, POST_DIR, '*.csv'))
subreddits = [i.split('/')[-1][:-4] for i in subreddits_fp]
n, N = 1, len(subreddits)
for subreddit, fp in zip(subreddits,subreddits_fp):
print('fetching {0} subreddit details, Progress: {1}/{2}'.format(subreddit, str(n), str(N)))
if os.path.exists(join(filepath, POST_DETAIL_DIR, subreddit+'.json')):
n += 1
continue
else:
ids = pd.read_csv(fp).id.tolist()
rest = p_umap(submission_detail, ids, num_cpus = NUM_WORKER)
with open(join(filepath, POST_DETAIL_DIR, subreddit+'.json'), 'w') as f:
json.dump(rest, f)
n += 1
def comment_detail(i, filepath, subreddit):
"""[function to fetch the detail of comments]
Args:
i ([string]): [comment id]
filepath ([string]): [file path of submissions detail]
subreddit ([string]): [subreddit]
Returns:
[dict]: [log of the comment detail fetching process]
"""
if os.path.exists(join(filepath, COMMENT_DIR, subreddit + '.csv')):
return {'subreddit': subreddit, 'result': 'success'}
df = pd.DataFrame(json.load(open(i)))
lst = df.comment_ids.explode().dropna().unique().tolist()
lst = [lst[i: i+100] for i in range(0, len(lst), 100)]
res = []
for i in lst:
attemps = 0
phrase = ','.join(i)
r = requests.get(join(COMMENT, '?ids='+phrase), verify=False)
if r.status_code == 200:
try:
res.append(pd.DataFrame(r.json()['data'])[['id', 'author', 'created_utc', \
'is_submitter', 'subreddit', 'link_id', 'body', 'parent_id', 'send_replies']])
except KeyError:
continue
elif r.status_code == 403:
while r.status_code == 403 & attemps < 5:
attemps += 1
time.sleep(3 * attemps)
r = requests.get(join(COMMENT, '?ids='+phrase), verify=False)
if r.status_code == 200:
try:
res.append(pd.DataFrame(r.json()['data'])[['id', 'author', 'created_utc', \
'is_submitter', 'subreddit', 'link_id', 'body', 'parent_id', 'send_replies']])
except KeyError:
continue
else:
continue
else:
time.sleep(5)
r = requests.get(join(COMMENT, '?ids='+phrase), verify=False)
if r.status_code == 200:
try:
res.append(pd.DataFrame(r.json()['data'])[['id', 'author', 'created_utc', \
'is_submitter', 'subreddit', 'link_id', 'body', 'parent_id', 'send_replies']])
except KeyError:
continue
else:
continue
if len(res) == 0:
return {'subreddit': subreddit, 'result': 'unsuccess'}
else:
pd.concat(res, ignore_index = True).to_csv(join(filepath, COMMENT_DIR, subreddit + '.csv'), index = False)
return {'subreddit': subreddit, 'result': 'success'}
def comments_detail(filepath):
"""[function to fetch comments detail]
Args:
filepath ([string]): [filepath to store the data]
"""
subreddit_fp = glob(join(filepath, POST_DETAIL_DIR, '*.json'))
subreddits = [i.split('/')[-1][:-5] for i in subreddit_fp]
tolist = lambda x: [x for _ in range(len(subreddits))]
rest = p_umap(comment_detail, subreddit_fp, tolist(filepath), subreddits, num_cpus = NUM_WORKER)
with open(join(filepath, COMMENT_DIR, 'log.json'), 'w') as fp:
json.dump(rest, fp) | 42.518519 | 177 | 0.576268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,124 | 0.302362 |
4fd5295b29e4b31c48489377d517627d7c834a90 | 581 | py | Python | tanslate.py | Blues-star/bilibili-BV-conv | e53015fb7272e70945fbb6c35a59edef8ba0cb3f | [
"MIT"
] | null | null | null | tanslate.py | Blues-star/bilibili-BV-conv | e53015fb7272e70945fbb6c35a59edef8ba0cb3f | [
"MIT"
] | null | null | null | tanslate.py | Blues-star/bilibili-BV-conv | e53015fb7272e70945fbb6c35a59edef8ba0cb3f | [
"MIT"
] | null | null | null | table = 'fZodR9XQDSUm21yCkr6zBqiveYah8bt4xsWpHnJE7jL5VG3guMTKNPAwcF'
tr = {}
for i in range(58):
tr[table[i]] = i
s = [11, 10, 3, 8, 4, 6]
xor = 177451812
add = 8728348608
def dec(x):
r = 0
for i in range(6):
r += tr[x[s[i]]] * 58**i
return (r - add) ^ xor
def enc(x):
x = (x ^ xor) + add
r = list('BV1 4 1 7 ')
for i in range(6):
r[s[i]] = table[x // 58**i % 58]
return ''.join(r)
print(dec('BV17x411w7KC'))
print(dec('BV1Q541167Qg'))
print(dec('BV1mK4y1C7Bz'))
print(enc(170001))
print(enc(455017605))
print(enc(882584971)) | 19.366667 | 68 | 0.576592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.203098 |
4fd725c7e36bdf921e72caea11e281c8604d9e0c | 162 | py | Python | ddtrace/settings/exceptions.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2020-03-07T01:12:29.000Z | 2021-04-21T00:53:19.000Z | ddtrace/settings/exceptions.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | 4 | 2019-11-22T20:58:01.000Z | 2020-08-17T21:16:13.000Z | ddtrace/settings/exceptions.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-03-18T16:29:20.000Z | 2020-07-20T16:05:10.000Z | class ConfigException(Exception):
"""Configuration exception when an integration that is not available
is called in the `Config` object.
"""
pass
| 27 | 72 | 0.709877 | 161 | 0.993827 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.703704 |
4fd773510f35b86e510f60c92159c965a39255c9 | 22,001 | py | Python | nex/router.py | eddiejessup/nex | d61005aacb3b87f8cf1a1e2080ca760d757d5751 | [
"MIT"
] | null | null | null | nex/router.py | eddiejessup/nex | d61005aacb3b87f8cf1a1e2080ca760d757d5751 | [
"MIT"
] | null | null | null | nex/router.py | eddiejessup/nex | d61005aacb3b87f8cf1a1e2080ca760d757d5751 | [
"MIT"
] | null | null | null | from collections import deque
from enum import Enum
import logging
from .constants.codes import CatCode
from .constants.parameters import param_to_instr
from .constants.specials import special_to_instr
from .constants.instructions import (Instructions, if_instructions,
unexpanded_cs_instructions)
from .constants import control_sequences
from .tokens import InstructionToken, BaseToken
from .utils import get_unique_id, LogicError
from .lexer import (Lexer,
control_sequence_lex_type, char_cat_lex_type)
from .macro import parse_replacement_text, parse_parameter_text
logger = logging.getLogger(__name__)
short_hand_def_type_to_token_instr = {
Instructions.char_def.value: Instructions.char_def_token,
Instructions.math_char_def.value: Instructions.math_char_def_token,
Instructions.count_def.value: Instructions.count_def_token,
Instructions.dimen_def.value: Instructions.dimen_def_token,
Instructions.skip_def.value: Instructions.skip_def_token,
Instructions.mu_skip_def.value: Instructions.mu_skip_def_token,
Instructions.toks_def.value: Instructions.toks_def_token,
Instructions.font.value: Instructions.font_def_token,
}
literals_map = {
('<', CatCode.other): Instructions.less_than,
('>', CatCode.other): Instructions.greater_than,
('=', CatCode.other): Instructions.equals,
('+', CatCode.other): Instructions.plus_sign,
('-', CatCode.other): Instructions.minus_sign,
('0', CatCode.other): Instructions.zero,
('1', CatCode.other): Instructions.one,
('2', CatCode.other): Instructions.two,
('3', CatCode.other): Instructions.three,
('4', CatCode.other): Instructions.four,
('5', CatCode.other): Instructions.five,
('6', CatCode.other): Instructions.six,
('7', CatCode.other): Instructions.seven,
('8', CatCode.other): Instructions.eight,
('9', CatCode.other): Instructions.nine,
('\'', CatCode.other): Instructions.single_quote,
('"', CatCode.other): Instructions.double_quote,
('`', CatCode.other): Instructions.backtick,
('.', CatCode.other): Instructions.point,
(',', CatCode.other): Instructions.comma,
('A', CatCode.other): Instructions.a,
('B', CatCode.other): Instructions.b,
('C', CatCode.other): Instructions.c,
('D', CatCode.other): Instructions.d,
('E', CatCode.other): Instructions.e,
('F', CatCode.other): Instructions.f,
('A', CatCode.letter): Instructions.a,
('B', CatCode.letter): Instructions.b,
('C', CatCode.letter): Instructions.c,
('D', CatCode.letter): Instructions.d,
('E', CatCode.letter): Instructions.e,
('F', CatCode.letter): Instructions.f,
}
non_active_letters_map = {
'a': Instructions.non_active_uncased_a,
'b': Instructions.non_active_uncased_b,
'c': Instructions.non_active_uncased_c,
'd': Instructions.non_active_uncased_d,
'e': Instructions.non_active_uncased_e,
'f': Instructions.non_active_uncased_f,
'g': Instructions.non_active_uncased_g,
'h': Instructions.non_active_uncased_h,
'i': Instructions.non_active_uncased_i,
'j': Instructions.non_active_uncased_j,
'k': Instructions.non_active_uncased_k,
'l': Instructions.non_active_uncased_l,
'm': Instructions.non_active_uncased_m,
'n': Instructions.non_active_uncased_n,
'o': Instructions.non_active_uncased_o,
'p': Instructions.non_active_uncased_p,
'q': Instructions.non_active_uncased_q,
'r': Instructions.non_active_uncased_r,
's': Instructions.non_active_uncased_s,
't': Instructions.non_active_uncased_t,
'u': Instructions.non_active_uncased_u,
'v': Instructions.non_active_uncased_v,
'w': Instructions.non_active_uncased_w,
'x': Instructions.non_active_uncased_x,
'y': Instructions.non_active_uncased_y,
'z': Instructions.non_active_uncased_z,
'A': Instructions.non_active_uncased_a,
'B': Instructions.non_active_uncased_b,
'C': Instructions.non_active_uncased_c,
'D': Instructions.non_active_uncased_d,
'E': Instructions.non_active_uncased_e,
'F': Instructions.non_active_uncased_f,
'G': Instructions.non_active_uncased_g,
'H': Instructions.non_active_uncased_h,
'I': Instructions.non_active_uncased_i,
'J': Instructions.non_active_uncased_j,
'K': Instructions.non_active_uncased_k,
'L': Instructions.non_active_uncased_l,
'M': Instructions.non_active_uncased_m,
'N': Instructions.non_active_uncased_n,
'O': Instructions.non_active_uncased_o,
'P': Instructions.non_active_uncased_p,
'Q': Instructions.non_active_uncased_q,
'R': Instructions.non_active_uncased_r,
'S': Instructions.non_active_uncased_s,
'T': Instructions.non_active_uncased_t,
'U': Instructions.non_active_uncased_u,
'V': Instructions.non_active_uncased_v,
'W': Instructions.non_active_uncased_w,
'X': Instructions.non_active_uncased_x,
'Y': Instructions.non_active_uncased_y,
'Z': Instructions.non_active_uncased_z,
}
category_map = {
CatCode.space: Instructions.space,
CatCode.begin_group: Instructions.left_brace,
CatCode.end_group: Instructions.right_brace,
CatCode.active: Instructions.active_character,
CatCode.parameter: Instructions.parameter,
CatCode.math_shift: Instructions.math_shift,
CatCode.align_tab: Instructions.align_tab,
CatCode.superscript: Instructions.superscript,
CatCode.subscript: Instructions.subscript,
}
def get_char_cat_pair_instruction(char, cat):
if cat in (CatCode.letter, CatCode.other) and (char, cat) in literals_map:
return literals_map[(char, cat)]
elif cat != CatCode.active and char in non_active_letters_map:
return non_active_letters_map[char]
elif cat in (CatCode.letter, CatCode.other):
return Instructions.misc_char_cat_pair
elif cat in category_map:
return category_map[cat]
else:
raise ValueError(f'Confused by char-cat pair: ({char}, {cat})')
def make_char_cat_pair_instruction_token_direct(char, cat, *args, **kwargs):
"""Make a char-cat instruction token straight from a pair.
"""
instruction = get_char_cat_pair_instruction(char, cat)
value = {'char': char, 'cat': cat, 'lex_type': char_cat_lex_type}
token = InstructionToken(
instruction,
value=value,
*args, **kwargs,
)
return token
def make_char_cat_pair_instruction_token(char_cat_lex_token):
v = char_cat_lex_token.value
return make_char_cat_pair_instruction_token_direct(
v['char'], v['cat'],
parents=[char_cat_lex_token]
)
def make_parameter_control_sequence_instruction(name, parameter, instruction):
instr_tok = make_primitive_control_sequence_instruction(name, instruction)
# This is what is used to look up the parameter value. The 'name' just
# records the name of the control sequence used to refer to this parameter.
instr_tok.value['parameter'] = parameter
return instr_tok
def make_special_control_sequence_instruction(name, special, instruction):
instr_tok = make_primitive_control_sequence_instruction(name, instruction)
# This is what is used to look up the special value. The 'name' just
# records the name of the control sequence used to refer to this special.
instr_tok.value['special'] = special
return instr_tok
def make_primitive_control_sequence_instruction(name, instruction):
return InstructionToken(
instruction,
value={'name': name, 'lex_type': control_sequence_lex_type},
parents=[],
)
def make_unexpanded_control_sequence_instruction(name, parents):
if len(name) == 1:
instruction = Instructions.unexpanded_control_symbol
else:
instruction = Instructions.unexpanded_control_word
return InstructionToken(
instruction,
value={'name': name, 'lex_type': control_sequence_lex_type},
parents=parents,
)
def lex_token_to_instruction_token(lex_token):
# If we have a char-cat pair, we must type it to its terminal version,
if lex_token.type == char_cat_lex_type:
return make_char_cat_pair_instruction_token(lex_token)
elif lex_token.type == control_sequence_lex_type:
return make_unexpanded_control_sequence_instruction(
lex_token.value, parents=[lex_token])
# Aren't any other types of lexed tokens.
else:
raise LogicError(f"Unknown lex token type: '{lex_token}'")
def make_macro_token(name, replacement_text, parameter_text,
parents,
def_type=None, prefixes=None):
if prefixes is None:
prefixes = set()
return InstructionToken(
Instructions.macro,
value={'name': name,
'prefixes': prefixes,
'replacement_text': parse_replacement_text(replacement_text),
'parameter_text': parse_parameter_text(parameter_text),
'def_type': def_type,
'lex_type': control_sequence_lex_type},
parents=parents,
)
class NoSuchControlSequence(Exception):
def __init__(self, name):
self.name = name
class ControlSequenceType(Enum):
macro = 1
let_character = 2
parameter = 3
primitive = 4
font = 5
special = 6
class RouteToken(BaseToken):
def __init__(self, type_, value):
if type_ not in ControlSequenceType:
raise ValueError('Route token {type_} not a ControlSequenceType')
super().__init__(type_, value)
class CSRouter:
def __init__(self,
param_control_sequences,
special_control_sequences,
primitive_control_sequences,
enclosing_scope=None):
self.control_sequences = {}
self.macros = {}
self.let_chars = {}
self.parameters = {}
self.specials = {}
self.primitives = {}
self.font_ids = {}
self.enclosing_scope = enclosing_scope
for name, tpl in param_control_sequences.items():
parameter, instr = tpl
self._set_parameter(name, parameter, instr)
for name, tpl in special_control_sequences.items():
special, instr = tpl
self._set_special(name, special, instr)
for name, instruction in primitive_control_sequences.items():
self._set_primitive(name, instruction)
@classmethod
def default_initial(cls):
# Router needs a map from a control sequence name, to the parameter and
# the instruction type of the parameter (integer, dimen and so on).
params = {
n: (p, param_to_instr[p])
for n, p in control_sequences.param_control_sequences.items()
}
specials = {
n: (p, special_to_instr[p])
for n, p in control_sequences.special_control_sequences.items()
}
primitives = control_sequences.primitive_control_sequences
return cls(
param_control_sequences=params,
special_control_sequences=specials,
primitive_control_sequences=primitives,
enclosing_scope=None)
@classmethod
def default_local(cls, enclosing_scope):
return cls(param_control_sequences={},
special_control_sequences={},
primitive_control_sequences={},
enclosing_scope=enclosing_scope)
def _name_means_instruction(self, name, instructions):
try:
tok = self.lookup_control_sequence(name, parents=None)
except NoSuchControlSequence:
return False
if isinstance(tok, InstructionToken):
return tok.instruction in instructions
else:
return False
def name_means_delimit_condition(self, name):
"""Test if a control sequence corresponds to an instruction to split
blocks of conditional text. Concretely, this means a control sequence
is '\else' or '\or'."""
return self._name_means_instruction(name, (Instructions.else_,
Instructions.or_))
def name_means_end_condition(self, name):
"""Test if a control sequence corresponds to an instruction to split
blocks of conditional text. Concretely, this means a control sequence
is '\fi'."""
return self._name_means_instruction(name, (Instructions.end_if,))
def name_means_start_condition(self, name):
"""Test if a control sequence corresponds to an instruction to split
blocks of conditional text. Concretely, this means a control sequence
is one of '\ifnum', '\ifcase' and so on."""
return self._name_means_instruction(name, if_instructions)
def lookup_canonical_control_sequence(self, name):
route_token = self._lookup_route_token(name)
return self._resolve_route_token_to_raw_value(route_token)
def lookup_control_sequence(self, name, parents):
canon_token = self.lookup_canonical_control_sequence(name)
token = canon_token.copy(parents=parents)
# Amend token to give it the proper control sequence name.
if isinstance(token.value, dict) and 'name' in token.value:
token.value['name'] = name
return token
def set_macro(self, name, replacement_text, parameter_text,
def_type, prefixes,
parents):
if prefixes is None:
prefixes = set()
route_id = self._set_route_token(name, ControlSequenceType.macro)
macro_token = make_macro_token(name,
replacement_text=replacement_text,
parameter_text=parameter_text,
def_type=def_type, prefixes=prefixes,
parents=parents)
self.macros[route_id] = macro_token
def do_short_hand_definition(self, name, def_type, code,
target_parents, cmd_parents):
def_token_instr = short_hand_def_type_to_token_instr[def_type]
instr_token = InstructionToken(
def_token_instr,
value=code,
parents=target_parents,
)
self.set_macro(name, replacement_text=[instr_token],
parameter_text=[], def_type='sdef', prefixes=None,
parents=cmd_parents)
def define_new_font_control_sequence(self, name, font_id,
cmd_parents, target_parents):
# Note, this token just records the font id; the information
# is stored in the global font state, because it has internal
# state that might be modified later; we need to know where to get
# at it.
self.do_short_hand_definition(
name=name,
def_type=Instructions.font.value,
code=font_id,
cmd_parents=cmd_parents,
target_parents=target_parents,
)
def do_let_assignment(self, new_name, target_token):
if target_token.value['lex_type'] == control_sequence_lex_type:
target_name = target_token.value['name']
self._copy_control_sequence(target_name, new_name)
elif target_token.value['lex_type'] == char_cat_lex_type:
self._set_let_character(new_name, target_token)
else:
raise ValueError(f'Let target does not look like a token: '
f'{target_token}')
def _set_primitive(self, name, instruction):
# Get a route from the name to a primitive.
route_id = self._set_route_token(name, ControlSequenceType.primitive)
# Make that route resolve to the instruction token.
token = make_primitive_control_sequence_instruction(
name=name, instruction=instruction)
self.primitives[route_id] = token
def _set_parameter(self, name, parameter, instr):
# Get a route from the name to a parameter.
route_id = self._set_route_token(name, ControlSequenceType.parameter)
# Make that route resolve to the parameter token.
token = make_parameter_control_sequence_instruction(
name=name, parameter=parameter, instruction=instr)
self.parameters[route_id] = token
def _set_special(self, name, special, instr):
# Get a route from the name to a special.
route_id = self._set_route_token(name, ControlSequenceType.special)
# Make that route resolve to the special token.
token = make_special_control_sequence_instruction(
name=name, special=special, instruction=instr)
self.specials[route_id] = token
def _copy_control_sequence(self, target_name, new_name):
# Make a new control sequence that is routed to the same spot as the
# current one.
target_route_token = self._lookup_route_token(target_name)
self.control_sequences[new_name] = target_route_token
def _set_let_character(self, name, char_cat_token):
route_id = self._set_route_token(name,
ControlSequenceType.let_character)
self.let_chars[route_id] = char_cat_token
def _set_route_token(self, name, cs_type):
route_id = get_unique_id()
route_token = RouteToken(cs_type, route_id)
self.control_sequences[name] = route_token
return route_id
def _lookup_route_token(self, name):
# If the route token exists in this scope, return it.
if name in self.control_sequences:
route_token = self.control_sequences[name]
# Otherwise, if there's an enclosing scope, ask it for it.
elif self.enclosing_scope is not None:
route_token = self.enclosing_scope._lookup_route_token(name)
# If we are the outermost scope, the control sequence is unknown.
else:
raise NoSuchControlSequence(name)
return route_token
def _resolve_route_token_to_raw_value(self, r):
type_ = r.type
route_id = r.value
value_maps_map = {
ControlSequenceType.parameter: self.parameters,
ControlSequenceType.special: self.specials,
ControlSequenceType.primitive: self.primitives,
ControlSequenceType.macro: self.macros,
ControlSequenceType.let_character: self.let_chars,
ControlSequenceType.font: self.font_ids,
}
value_map = value_maps_map[type_]
try:
v = value_map[route_id]
except KeyError:
v = self.enclosing_scope._resolve_route_token_to_raw_value(r)
return v
class Instructioner:
def __init__(self, lexer, resolve_cs_func):
self.lexer = lexer
self.resolve_control_sequence = resolve_cs_func
# TODO: Use GetBuffer.
self.output_buffer = deque()
@classmethod
def from_string(cls, resolve_cs_func, *args, **kwargs):
lexer = Lexer.from_string(*args, **kwargs)
return cls(lexer, resolve_cs_func=resolve_cs_func)
def replace_tokens_on_input(self, tokens):
if logger.isEnabledFor(logging.DEBUG):
if len(tokens) == 1:
s = tokens[0]
elif len(tokens) > 3:
s = f'[{tokens[0]} … {tokens[-1]}]'
else:
s = tokens
logger.debug(f'Replacing "{s}" on input instruction queue')
self.output_buffer.extendleft(reversed(tokens))
def iter_unexpanded(self):
while True:
yield self.next_unexpanded()
def next_unexpanded(self):
retrieving = self.output_buffer
if retrieving:
t = self.output_buffer.popleft()
else:
new_lex_token = next(self.lexer)
t = lex_token_to_instruction_token(new_lex_token)
# if t.char_nr is not None and logger.isEnabledFor(logging.INFO):
# source = 'Retrieved' if retrieving else 'Read'
# if self.lexer.reader.current_buffer.name != 'plain.tex':
# logger.info(f'{source}: {t.get_position_str(self.lexer.reader)}')
return t
def next_expanded(self):
instr_tok = self.next_unexpanded()
# If the token is an unexpanded control sequence call, and expansion is
# not suppressed, then we must resolve the call:
# - A user control sequence will become a macro instruction token.
# - A \let character will become its character instruction token.
# - A primitive control sequence will become its instruction token.
# NOTE: I've made this mistake twice now: we can't make this resolution
# into a two-call process, where we resolve the token, put the resolved
# token on the input, then handle it in the next call. This is because,
# for example, \expandafter expects a single call to the banisher to
# both resolve *and* expand a macro. Basically this method must do a
# certain amount to a token in each call.
if instr_tok.instruction in unexpanded_cs_instructions:
name = instr_tok.value['name']
try:
instr_tok = self.resolve_control_sequence(name,
parents=[instr_tok])
except NoSuchControlSequence:
# Might be that we are parsing too far in a chunk, and just
# need to execute a command before this can be understood. Put
# the token back on the input, potentially to read again.
self.replace_tokens_on_input([instr_tok])
raise
return instr_tok
def advance_to_end(self, expand=True):
while True:
try:
if expand:
yield self.next_expanded()
else:
yield self.next_unexpanded()
except EOFError:
return
| 39.570144 | 83 | 0.66447 | 12,925 | 0.58742 | 359 | 0.016316 | 1,189 | 0.054038 | 0 | 0 | 3,806 | 0.172976 |
4fd795c999e925984e4dfb7c769e57a42211546b | 1,409 | py | Python | nd-coursework/courses/computationalChemistry/scripts/plotEnergies.py | crdrisko/nd-grad | f1765e4f24d7a4b1b3a76c64eb8d88bcca0eaa44 | [
"MIT"
] | 1 | 2020-09-26T12:38:55.000Z | 2020-09-26T12:38:55.000Z | nd-coursework/courses/computationalChemistry/scripts/plotEnergies.py | crdrisko/nd-research | f1765e4f24d7a4b1b3a76c64eb8d88bcca0eaa44 | [
"MIT"
] | null | null | null | nd-coursework/courses/computationalChemistry/scripts/plotEnergies.py | crdrisko/nd-research | f1765e4f24d7a4b1b3a76c64eb8d88bcca0eaa44 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Name: plotEnergies.py - Version 1.0.0
# Author: Cody R. Drisko (crdrisko)
# Date: 10/18/2019-08:02:13
# Description: Plotting the relevant data for HW 4
import numpy as np
import matplotlib.pyplot as plt
### Results of the Vibrational Analysis ###
data_va = np.loadtxt("vinylAlcohol/vinylAlcohol_spec.jdx", dtype=float, comments='#')
N_va = np.size(data_va)
frec_va = data_va[0:N_va:1, 0]
int_va = data_va[0:N_va:1, 1]
data_ts = np.loadtxt("TS/TS_spec.jdx", dtype=float, comments='#')
N_ts = np.size(data_ts)
frec_ts = data_ts[0:N_ts:1, 0]
int_ts = data_ts[0:N_ts:1, 1]
data_a = np.loadtxt("acetaldehyde/acetaldehyde_spec.jdx", dtype=float, comments='#')
N_a = np.size(data_a)
frec_a = data_a[0:N_a:1, 0]
int_a = data_a[0:N_a:1, 1]
plt.figure(1)
plt.plot(frec_a, int_a, label="Acetaldehyde")
plt.plot(frec_ts, int_ts, label="Transition State")
plt.plot(frec_va, int_va, label="Vinyl Alcohol")
plt.xlabel("Scaled Frequencies (cm$^{-1}$) ")
plt.ylabel("Intensities")
plt.legend()
plt.savefig("Frequencies.png")
### Results of the Scanning Technique ###
data_scan = np.loadtxt("scan/allEnergies.dat", dtype=float)
N_scan = np.size(data_scan)
angle = data_scan[0:N_scan:1, 0]
energy = data_scan[0:N_scan:1, 1]
plt.figure(2)
plt.plot(angle, energy)
plt.xlabel("Dihedral Angle (Degrees)")
plt.ylabel("Energy (Hartrees)")
plt.savefig("Energies.png")
| 24.719298 | 85 | 0.710433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 568 | 0.403123 |
4fd88fa848745173132ac1e20520e01408473d33 | 22,130 | py | Python | src/plugin.py | BradB111/galaxy_blizzard_plugin | 4386ee2902cfaf4f7871613d419ad6f2ddad6e77 | [
"MIT"
] | 67 | 2020-01-05T23:23:35.000Z | 2022-03-22T01:03:22.000Z | src/plugin.py | BradB111/galaxy_blizzard_plugin | 4386ee2902cfaf4f7871613d419ad6f2ddad6e77 | [
"MIT"
] | 49 | 2019-12-20T15:08:06.000Z | 2021-11-18T22:29:48.000Z | src/plugin.py | BradB111/galaxy_blizzard_plugin | 4386ee2902cfaf4f7871613d419ad6f2ddad6e77 | [
"MIT"
] | 24 | 2019-12-20T16:52:38.000Z | 2022-03-08T02:22:56.000Z | import asyncio
import json
import os
import sys
import multiprocessing
import webbrowser
from collections import defaultdict
import requests
import requests.cookies
import logging as log
import subprocess
import time
import re
from typing import Union, Dict
from galaxy.api.consts import LocalGameState, Platform
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.types import Achievement, Game, LicenseInfo, LocalGame, GameTime, LicenseType
from galaxy.api.errors import (
AuthenticationRequired, BackendTimeout, BackendNotAvailable, BackendError,
NetworkError, UnknownError, InvalidCredentials, UnknownBackendResponse
)
from version import __version__ as version
from process import ProcessProvider
from local_client_base import ClientNotInstalledError
from local_client import LocalClient
from osutils import get_directory_size
from backend import BackendClient, AccessTokenExpired
from definitions import Blizzard, DataclassJSONEncoder, BlizzardGame, ClassicGame
from consts import SYSTEM
from consts import Platform as pf
from http_client import AuthenticatedHttpClient
class BNetPlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.Battlenet, version, reader, writer, token)
self.local_client = LocalClient(self._update_statuses)
self.authentication_client = AuthenticatedHttpClient(self)
self.backend_client = BackendClient(self, self.authentication_client)
self.watched_running_games = set()
def handshake_complete(self):
self.create_task(self.__delayed_handshake(), 'delayed handshake')
async def __delayed_handshake(self):
"""
Adds some minimal delay on Galaxy start before registering local data watchers.
Apparently Galaxy may be not ready to receive notifications even after handshake_complete.
"""
await asyncio.sleep(1)
self.create_task(self.local_client.register_local_data_watcher(), 'local data watcher')
self.create_task(self.local_client.register_classic_games_updater(), 'classic games updater')
async def _notify_about_game_stop(self, game, starting_timeout):
id_to_watch = game.info.uid
if id_to_watch in self.watched_running_games:
log.debug(f'Game {id_to_watch} is already watched. Skipping')
return
try:
self.watched_running_games.add(id_to_watch)
await asyncio.sleep(starting_timeout)
ProcessProvider().update_games_processes([game])
log.info(f'Setuping process watcher for {game._processes}')
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, game.wait_until_game_stops)
finally:
self.update_local_game_status(LocalGame(id_to_watch, LocalGameState.Installed))
self.watched_running_games.remove(id_to_watch)
def _update_statuses(self, refreshed_games, previous_games):
for blizz_id, refr in refreshed_games.items():
prev = previous_games.get(blizz_id, None)
if prev is None:
if refr.has_galaxy_installed_state:
log.debug('Detected playable game')
state = LocalGameState.Installed
else:
log.debug('Detected not-fully installed game')
continue
elif refr.has_galaxy_installed_state and not prev.has_galaxy_installed_state:
log.debug('Detected playable game')
state = LocalGameState.Installed
elif refr.last_played != prev.last_played:
log.debug('Detected launched game')
state = LocalGameState.Installed | LocalGameState.Running
self.create_task(self._notify_about_game_stop(refr, 5), 'game stop waiter')
else:
continue
log.info(f'Changing game {blizz_id} state to {state}')
self.update_local_game_status(LocalGame(blizz_id, state))
for blizz_id, prev in previous_games.items():
refr = refreshed_games.get(blizz_id, None)
if refr is None:
log.debug('Detected uninstalled game')
state = LocalGameState.None_
self.update_local_game_status(LocalGame(blizz_id, state))
def log_out(self):
if self.backend_client:
asyncio.create_task(self.authentication_client.shutdown())
self.authentication_client.user_details = None
async def open_battlenet_browser(self):
url = self.authentication_client.blizzard_battlenet_download_url
log.info(f'Opening battle.net website: {url}')
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, lambda x: webbrowser.open(x, autoraise=True), url)
async def install_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game and os.access(installed_game.install_path, os.F_OK):
log.warning("Received install command on an already installed game")
return await self.launch_game(game_id)
if game_id in [classic.uid for classic in Blizzard.CLASSIC_GAMES]:
if SYSTEM == pf.WINDOWS:
platform = 'windows'
elif SYSTEM == pf.MACOS:
platform = 'macos'
webbrowser.open(f"https://www.blizzard.com/download/confirmation?platform={platform}&locale=enUS&version=LIVE&id={game_id}")
return
try:
self.local_client.refresh()
log.info(f'Installing game of id {game_id}')
self.local_client.install_game(game_id)
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except Exception as e:
log.exception(f"Installing game {game_id} failed: {e}")
def _open_battlenet_at_id(self, game_id):
try:
self.local_client.refresh()
self.local_client.open_battlenet(game_id)
except Exception as e:
log.exception(f"Opening battlenet client on specific game_id {game_id} failed {e}")
try:
self.local_client.open_battlenet()
except Exception as e:
log.exception(f"Opening battlenet client failed {e}")
async def uninstall_game(self, game_id):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
if game_id == 'wow_classic':
# attempting to uninstall classic wow through protocol gives you a message that the game cannot
# be uninstalled through protocol and you should use battle.net
return self._open_battlenet_at_id(game_id)
if SYSTEM == pf.MACOS:
self._open_battlenet_at_id(game_id)
else:
try:
installed_game = self.local_client.get_installed_games().get(game_id, None)
if installed_game is None or not os.access(installed_game.install_path, os.F_OK):
log.error(f'Cannot uninstall {game_id}')
self.update_local_game_status(LocalGame(game_id, LocalGameState.None_))
return
if not isinstance(installed_game.info, ClassicGame):
if self.local_client.uninstaller is None:
raise FileNotFoundError('Uninstaller not found')
uninstall_tag = installed_game.uninstall_tag
client_lang = self.local_client.config_parser.locale_language
self.local_client.uninstaller.uninstall_game(installed_game, uninstall_tag, client_lang)
except Exception as e:
log.exception(f'Uninstalling game {game_id} failed: {e}')
async def launch_game(self, game_id):
try:
game = self.local_client.get_installed_games().get(game_id, None)
if game is None:
log.error(f'Launching game that is not installed: {game_id}')
return await self.install_game(game_id)
if isinstance(game.info, ClassicGame):
log.info(f'Launching game of id: {game_id}, {game} at path {os.path.join(game.install_path, game.info.exe)}')
if SYSTEM == pf.WINDOWS:
subprocess.Popen(os.path.join(game.install_path, game.info.exe))
elif SYSTEM == pf.MACOS:
if not game.info.bundle_id:
log.warning(f"{game.name} has no bundle id, help by providing us bundle id of this game")
subprocess.Popen(['open', '-b', game.info.bundle_id])
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
asyncio.create_task(self._notify_about_game_stop(game, 6))
return
self.local_client.refresh()
log.info(f'Launching game of id: {game_id}, {game}')
await self.local_client.launch_game(game, wait_sec=60)
self.update_local_game_status(LocalGame(game_id, LocalGameState.Installed | LocalGameState.Running))
self.local_client.close_window()
asyncio.create_task(self._notify_about_game_stop(game, 3))
except ClientNotInstalledError as e:
log.warning(e)
await self.open_battlenet_browser()
except TimeoutError as e:
log.warning(str(e))
except Exception as e:
log.exception(f"Launching game {game_id} failed: {e}")
async def authenticate(self, stored_credentials=None):
try:
if stored_credentials:
auth_data = self.authentication_client.process_stored_credentials(stored_credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
if self.authentication_client.validate_auth_status(auth_status):
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_user_details()
else:
return self.authentication_client.authenticate_using_login()
except Exception as e:
raise e
async def pass_login_credentials(self, step, credentials, cookies):
if "logout&app=oauth" in credentials['end_uri']:
# 2fa expired, repeat authentication
return self.authentication_client.authenticate_using_login()
if self.authentication_client.attempted_to_set_battle_tag:
self.authentication_client.user_details = await self.backend_client.get_user_info()
return self.authentication_client.parse_auth_after_setting_battletag()
cookie_jar = self.authentication_client.parse_cookies(cookies)
auth_data = await self.authentication_client.get_auth_data_login(cookie_jar, credentials)
try:
await self.authentication_client.create_session()
await self.backend_client.refresh_cookies()
except (BackendNotAvailable, BackendError, NetworkError, UnknownError, BackendTimeout) as e:
raise e
except Exception:
raise InvalidCredentials()
auth_status = await self.backend_client.validate_access_token(auth_data.access_token)
if not ("authorities" in auth_status and "IS_AUTHENTICATED_FULLY" in auth_status["authorities"]):
raise InvalidCredentials()
self.authentication_client.user_details = await self.backend_client.get_user_info()
self.authentication_client.set_credentials()
return self.authentication_client.parse_battletag()
async def get_owned_games(self):
if not self.authentication_client.is_authenticated():
raise AuthenticationRequired()
def _parse_battlenet_games(standard_games: dict, cn: bool) -> Dict[BlizzardGame, LicenseType]:
licenses = defaultdict(lambda: LicenseType.Unknown, {
"Trial": LicenseType.OtherUserLicense,
"Good": LicenseType.SinglePurchase,
"Inactive": LicenseType.SinglePurchase,
"Banned": LicenseType.SinglePurchase,
"Free": LicenseType.FreeToPlay,
"Suspended": LicenseType.SinglePurchase,
"AccountLock": LicenseType.SinglePurchase
})
games = {}
for standard_game in standard_games["gameAccounts"]:
title_id = standard_game['titleId']
try:
game = Blizzard.game_by_title_id(title_id, cn)
except KeyError:
log.warning(f"Skipping unknown game with titleId: {title_id}")
else:
games[game] = licenses[standard_game.get("gameAccountStatus")]
# Add wow classic if retail wow is present in owned games
wow_license = games.get(Blizzard['wow'])
if wow_license is not None:
games[Blizzard['wow_classic']] = wow_license
return games
def _parse_classic_games(classic_games: dict) -> Dict[ClassicGame, LicenseType]:
games = {}
for classic_game in classic_games["classicGames"]:
sanitized_name = classic_game["localizedGameName"].replace(u'\xa0', ' ')
for cg in Blizzard.CLASSIC_GAMES:
if cg.name == sanitized_name:
games[cg] = LicenseType.SinglePurchase
break
else:
log.warning(f"Skipping unknown classic game with name: {sanitized_name}")
return games
cn = self.authentication_client.region == 'cn'
battlenet_games = _parse_battlenet_games(await self.backend_client.get_owned_games(), cn)
classic_games = _parse_classic_games(await self.backend_client.get_owned_classic_games())
owned_games: Dict[BlizzardGame, LicenseType] = {**battlenet_games, **classic_games}
for game in Blizzard.try_for_free_games(cn):
if game not in owned_games:
owned_games[game] = LicenseType.FreeToPlay
return [
Game(game.uid, game.name, None, LicenseInfo(license_type))
for game, license_type in owned_games.items()
]
async def get_local_games(self):
timeout = time.time() + 2
try:
translated_installed_games = []
while not self.local_client.games_finished_parsing():
await asyncio.sleep(0.1)
if time.time() >= timeout:
break
running_games = self.local_client.get_running_games()
installed_games = self.local_client.get_installed_games()
log.info(f"Installed games {installed_games.items()}")
log.info(f"Running games {running_games}")
for uid, game in installed_games.items():
if game.has_galaxy_installed_state:
state = LocalGameState.Installed
if uid in running_games:
state |= LocalGameState.Running
translated_installed_games.append(LocalGame(uid, state))
self.local_client.installed_games_cache = installed_games
return translated_installed_games
except Exception as e:
log.exception(f"failed to get local games: {str(e)}")
raise
async def get_local_size(self, game_id: str, context) -> int:
install_path = self.local_client.installed_games_cache[game_id].install_path
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, get_directory_size, install_path)
async def get_game_time(self, game_id, context):
total_time = None
last_played_time = None
blizzard_game = Blizzard[game_id]
if blizzard_game.name == "Overwatch":
total_time = await self._get_overwatch_time()
log.debug(f"Gametime for Overwatch is {total_time} minutes.")
for config_info in self.local_client.config_parser.games:
if config_info.uid == blizzard_game.uid:
if config_info.last_played is not None:
last_played_time = int(config_info.last_played)
break
return GameTime(game_id, total_time, last_played_time)
async def _get_overwatch_time(self) -> Union[None, int]:
log.debug("Fetching playtime for Overwatch...")
player_data = await self.backend_client.get_ow_player_data()
if 'message' in player_data: # user not found
log.error('No Overwatch profile found.')
return None
if player_data['private'] == True:
log.info('Unable to get data as Overwatch profile is private.')
return None
qp_time = player_data['playtime'].get('quickplay')
if qp_time is None: # user has not played quick play
return 0
if qp_time.count(':') == 1: # minutes and seconds
match = re.search('(?:(?P<m>\\d+):)(?P<s>\\d+)', qp_time)
if match:
return int(match.group('m'))
elif qp_time.count(':') == 2: # hours, minutes and seconds
match = re.search('(?:(?P<h>\\d+):)(?P<m>\\d+)', qp_time)
if match:
return int(match.group('h')) * 60 + int(match.group('m'))
raise UnknownBackendResponse(f'Unknown Overwatch API playtime format: {qp_time}')
async def _get_wow_achievements(self):
achievements = []
try:
characters_data = await self.backend_client.get_wow_character_data()
characters_data = characters_data["characters"]
wow_character_data = await asyncio.gather(
*[
self.backend_client.get_wow_character_achievements(character["realm"], character["name"])
for character in characters_data
],
return_exceptions=True,
)
for data in wow_character_data:
if isinstance(data, requests.Timeout) or isinstance(data, requests.ConnectionError):
raise data
wow_achievement_data = [
list(
zip(
data["achievements"]["achievementsCompleted"],
data["achievements"]["achievementsCompletedTimestamp"],
)
)
for data in wow_character_data
if type(data) is dict
]
already_in = set()
for char_ach in wow_achievement_data:
for ach in char_ach:
if ach[0] not in already_in:
achievements.append(Achievement(achievement_id=ach[0], unlock_time=int(ach[1] / 1000)))
already_in.add(ach[0])
except (AccessTokenExpired, BackendError) as e:
log.exception(str(e))
with open('wow.json', 'w') as f:
f.write(json.dumps(achievements, cls=DataclassJSONEncoder))
return achievements
async def _get_sc2_achievements(self):
account_data = await self.backend_client.get_sc2_player_data(self.authentication_client.user_details["id"])
# TODO what if more sc2 accounts?
assert len(account_data) == 1
account_data = account_data[0]
profile_data = await self.backend_client.get_sc2_profile_data(
account_data["regionId"], account_data["realmId"],
account_data["profileId"]
)
sc2_achievement_data = [
Achievement(achievement_id=achievement["achievementId"], unlock_time=achievement["completionDate"])
for achievement in profile_data["earnedAchievements"]
if achievement["isComplete"]
]
with open('sc2.json', 'w') as f:
f.write(json.dumps(sc2_achievement_data, cls=DataclassJSONEncoder))
return sc2_achievement_data
# async def get_unlocked_achievements(self, game_id):
# if not self.website_client.is_authenticated():
# raise AuthenticationRequired()
# try:
# if game_id == "21298":
# return await self._get_sc2_achievements()
# elif game_id == "5730135":
# return await self._get_wow_achievements()
# else:
# return []
# except requests.Timeout:
# raise BackendTimeout()
# except requests.ConnectionError:
# raise NetworkError()
# except Exception as e:
# log.exception(str(e))
# return []
async def launch_platform_client(self):
if self.local_client.is_running():
log.info("Launch platform client called but client is already running")
return
self.local_client.open_battlenet()
await self.local_client.prevent_battlenet_from_showing()
async def shutdown_platform_client(self):
await self.local_client.shutdown_platform_client()
async def shutdown(self):
log.info("Plugin shutdown.")
await self.authentication_client.shutdown()
def main():
multiprocessing.freeze_support()
create_and_run_plugin(BNetPlugin, sys.argv)
if __name__ == "__main__":
main()
| 43.648915 | 136 | 0.632671 | 20,877 | 0.94338 | 0 | 0 | 0 | 0 | 17,508 | 0.791143 | 3,466 | 0.15662 |
4fd92af7e0206cf0333bb60a3ec9bad3624fcb60 | 845 | py | Python | kronos_executor/kronos_executor/execution_contexts/trivial.py | ecmwf/kronos | 4f8c896baa634fc937f866d2bd05b438106c1663 | [
"Apache-2.0"
] | 4 | 2020-09-15T15:16:17.000Z | 2021-08-17T14:02:28.000Z | kronos_executor/kronos_executor/execution_contexts/trivial.py | ecmwf/kronos | 4f8c896baa634fc937f866d2bd05b438106c1663 | [
"Apache-2.0"
] | 4 | 2020-09-12T07:22:35.000Z | 2020-10-13T17:08:35.000Z | kronos_executor/kronos_executor/execution_contexts/trivial.py | ecmwf/kronos | 4f8c896baa634fc937f866d2bd05b438106c1663 | [
"Apache-2.0"
] | null | null | null |
import pathlib
from kronos_executor.execution_context import ExecutionContext
run_script = pathlib.Path(__file__).parent / "trivial_run.sh"
class TrivialExecutionContext(ExecutionContext):
scheduler_directive_start = ""
scheduler_directive_params = {}
scheduler_use_params = []
scheduler_cancel_head = "#!/bin/bash\nkill "
scheduler_cancel_entry = "{sequence_id} "
launcher_command = "mpirun"
launcher_params = {"num_procs": "-np "}
launcher_use_params = ["num_procs"]
def env_setup(self, job_config):
return "module load openmpi"
def submit_command(self, job_config, job_script_path, deps=[]):
return [str(run_script),
job_config['job_output_file'],
job_config['job_error_file'],
job_script_path]
Context = TrivialExecutionContext
| 28.166667 | 67 | 0.695858 | 665 | 0.786982 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.170414 |