content stringlengths 5 1.05M |
|---|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the load_projects_iam_policies_pipeline."""
import unittest
import mock
import ratelimiter
from tests.inventory.pipelines.test_data import fake_configs
from tests.inventory.pipelines.test_data import fake_iam_policies
from tests.unittest_utils import ForsetiTestCase
from google.cloud.security.common.data_access import errors as data_access_errors
from google.cloud.security.common.data_access import project_dao as proj_dao
from google.cloud.security.common.gcp_api import cloud_resource_manager as crm
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.util import log_util
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import load_projects_iam_policies_pipeline
class LoadProjectsIamPoliciesPipelineTest(ForsetiTestCase):
"""Tests for the load_org_iam_policies_pipeline."""
FAKE_PROJECT_NUMBERS = ['11111', '22222']
def setUp(self):
"""Set up."""
self.cycle_timestamp = '20001225T120000Z'
self.configs = fake_configs.FAKE_CONFIGS
self.mock_crm = mock.create_autospec(crm.CloudResourceManagerClient)
self.mock_dao = mock.create_autospec(proj_dao.ProjectDao)
self.pipeline = (
load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline(
self.cycle_timestamp,
self.configs,
self.mock_crm,
self.mock_dao))
def test_can_transform_project_iam_policies(self):
"""Test that project iam policies can be tranformed."""
loadable_iam_policies = list(self.pipeline._transform(
fake_iam_policies.FAKE_PROJECT_IAM_POLICY_MAP))
self.assertEquals(
fake_iam_policies.EXPECTED_LOADABLE_PROJECT_IAM_POLICY,
loadable_iam_policies)
def test_api_is_called_to_retrieve_org_policies(self):
"""Test that api is called to retrieve org policies."""
self.pipeline.dao.get_project_numbers.return_value = (
self.FAKE_PROJECT_NUMBERS)
self.pipeline._retrieve()
self.pipeline.dao.get_project_numbers.assert_called_once_with(
self.pipeline.RESOURCE_NAME, self.pipeline.cycle_timestamp)
self.assertEquals(
2, self.pipeline.api_client.get_project_iam_policies.call_count)
called_args, called_kwargs = (
self.pipeline.api_client.get_project_iam_policies.call_args_list[0])
expected_args = (self.pipeline.RESOURCE_NAME,
self.FAKE_PROJECT_NUMBERS[0])
self.assertEquals(expected_args, called_args)
called_args, called_kwargs = (
self.pipeline.api_client.get_project_iam_policies.call_args_list[1])
expected_args = (self.pipeline.RESOURCE_NAME,
self.FAKE_PROJECT_NUMBERS[1])
self.assertEquals(expected_args, called_args)
def test_dao_error_is_handled_when_retrieving(self):
"""Test that exceptions are handled when retrieving."""
self.pipeline.dao.get_project_numbers.side_effect = (
data_access_errors.MySQLError('error error', mock.MagicMock()))
with self.assertRaises(inventory_errors.LoadDataPipelineError):
self.pipeline._retrieve()
@mock.patch.object(
load_projects_iam_policies_pipeline.base_pipeline, 'LOGGER')
def test_api_error_is_handled_when_retrieving(self, mock_logger):
"""Test that exceptions are handled when retrieving.
We don't want to fail the pipeline when any one project's policies
can not be retrieved. We just want to log the error, and continue
with the other projects.
"""
self.pipeline.dao.get_project_numbers.return_value = (
self.FAKE_PROJECT_NUMBERS)
self.pipeline.api_client.get_project_iam_policies.side_effect = (
api_errors.ApiExecutionError('error error', mock.MagicMock()))
results = self.pipeline._retrieve()
self.assertEqual([], results)
self.assertEqual(2, mock_logger.error.call_count)
@mock.patch.object(
load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline,
'_get_loaded_count')
@mock.patch.object(
load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline,
'_load')
@mock.patch.object(
load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline,
'_transform')
@mock.patch.object(
load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline,
'_retrieve')
def test_subroutines_are_called_by_run(self, mock_retrieve, mock_transform,
mock_load, mock_get_loaded_count):
"""Test that the subroutines are called by run."""
mock_retrieve.return_value = (
fake_iam_policies.FAKE_PROJECT_IAM_POLICY_MAP)
mock_transform.return_value = (
fake_iam_policies.EXPECTED_LOADABLE_PROJECT_IAM_POLICY)
self.pipeline.run()
mock_retrieve.assert_called_once_with()
mock_transform.assert_called_once_with(
fake_iam_policies.FAKE_PROJECT_IAM_POLICY_MAP)
self.assertEquals(2, mock_load.call_count)
# The regular data is loaded.
called_args, called_kwargs = mock_load.call_args_list[0]
expected_args = (
self.pipeline.RESOURCE_NAME,
fake_iam_policies.EXPECTED_LOADABLE_PROJECT_IAM_POLICY)
self.assertEquals(expected_args, called_args)
# The raw json data is loaded.
called_args, called_kwargs = mock_load.call_args_list[1]
expected_args = (
self.pipeline.RAW_RESOURCE_NAME,
fake_iam_policies.FAKE_PROJECT_IAM_POLICY_MAP)
self.assertEquals(expected_args, called_args)
mock_get_loaded_count.assert_called_once
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import torch
import torch.nn as nn
IMSIZE = 64
class VAE(nn.Module):
"""Multimodal Variational Autoencoder.
@param n_latents: integer
number of latent dimensions
"""
def __init__(self, n_latents, batch_size, training, imsize, use_cuda):
super(VAE, self).__init__()
self.batch_size = batch_size
self.device = "cuda" if use_cuda is True else "cpu"
self.use_cuda = True if self.device == "cuda" else False
self.n_latents = n_latents
self.training = training
self.bidirectional = False
self.imsize = imsize
if imsize == 128:
self.image_encoder = ImageEncoder128(self.n_latents)
self.image_decoder = ImageDecoder128(self.n_latents)
elif imsize == 64:
self.image_encoder = ImageEncoder64(self.n_latents)
self.image_decoder = ImageDecoder64(self.n_latents)
def reparametrize(self, mu, logvar):
if self.training == True:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps*std
else: # return mean during inference
return mu
def forward(self, image=None):
mu, logvar = self.infer(image)
# reparametrization trick to sample
z = self.reparametrize(mu, logvar)
# reconstruct inputs based on that gaussian
image_recon = self.image_decoder(z)
return image_recon, mu, logvar
def infer(self, image=None):
# initialize the universal prior expert
try:
img_mu, img_logvar = self.image_encoder(image.to(self.device))
except:
img_mu, img_logvar = self.image_encoder(image)
return img_mu, img_logvar
class ImageEncoder128(nn.Module):
"""Parametrizes q(z|x).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageEncoder128, self).__init__()
hid_channels = 32
kernel_size = 4
hidden_dim = 256
self.latent_dim = n_latents
# Shape required to start transpose convs
self.reshape = (hid_channels, kernel_size, kernel_size)
n_chan = 3
# Convolutional layers
cnn_kwargs = dict(stride=2, padding=1)
self.conv1 = nn.Conv2d(n_chan, hid_channels, kernel_size, **cnn_kwargs)
self.conv2 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.conv3 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.conv_128 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
# Fully connected layers
self.lin1 = nn.Linear(np.product(self.reshape), hidden_dim)
self.lin2 = nn.Linear(hidden_dim, hidden_dim)
# Fully connected layers for mean and variance
self.mu_logvar_gen = nn.Linear(hidden_dim, self.latent_dim * 2)
def forward(self, x):
batch_size = x.size(0)
if len(x.shape) < 4:
x = x.unsqueeze(0)
# Convolutional layers with ReLu activations
x = torch.relu(self.conv1(x))
x = torch.relu(self.conv2(x))
x = torch.relu(self.conv3(x))
x = torch.relu(self.conv_128(x))
x = torch.relu(self.conv_128(x))
# Fully connected layers with ReLu activations
x = x.view((batch_size, -1 ))
x = torch.relu(self.lin1(x))
x = torch.relu(self.lin2(x))
# Fully connected layer for log variance and mean
# Log std-dev in paper (bear in mind)
mu_logvar = self.mu_logvar_gen(x)
mu, logvar = mu_logvar.view(-1, self.latent_dim, 2).unbind(-1)
return mu, logvar
class ImageDecoder128(nn.Module):
"""Parametrizes p(x|z).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageDecoder128, self).__init__()
latent_dim = n_latents
# Layer parameters
hid_channels = 32
kernel_size = 4
hidden_dim = 256
# Shape required to start transpose convs
self.reshape = (hid_channels, kernel_size, kernel_size)
n_chan = 3
# Fully connected layers
self.lin1 = nn.Linear(latent_dim, hidden_dim)
self.lin2 = nn.Linear(hidden_dim, hidden_dim)
self.lin3 = nn.Linear(hidden_dim, np.product(self.reshape))
# Convolutional layers
cnn_kwargs = dict(stride=2, padding=1)
# If input image is 64x64 do fourth convolution
self.convT_128 = nn.ConvTranspose2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT1 = nn.ConvTranspose2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT2 = nn.ConvTranspose2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT3 = nn.ConvTranspose2d(hid_channels, n_chan, kernel_size, **cnn_kwargs)
def forward(self, z):
batch_size = z.size(0)
# Fully connected layers with ReLu activations
x = torch.relu(self.lin1(z))
x = torch.relu(self.lin2(x))
x = torch.relu(self.lin3(x))
x = x.view(batch_size, *self.reshape)
# Convolutional layers with ReLu activations
x = torch.relu(self.convT_128(x))
x = torch.relu(self.convT_128(x))
x = torch.relu(self.convT1(x))
x = torch.relu(self.convT2(x))
# Sigmoid activation for final conv layer
x = torch.sigmoid(self.convT3(x))
return x
class ImageEncoder64(nn.Module):
"""Parametrizes q(z|x).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageEncoder64, self).__init__()
hid_channels = 32
kernel_size = 4
hidden_dim = 256
self.latent_dim = n_latents
# Shape required to start transpose convs
self.reshape = (hid_channels, kernel_size, kernel_size)
n_chan = 3
# Convolutional layers
cnn_kwargs = dict(stride=2, padding=1)
self.conv1 = nn.Conv2d(n_chan, hid_channels, kernel_size, **cnn_kwargs)
self.conv2 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.conv3 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
# If input image is 64x64 do fourth convolution
self.conv_64 = nn.Conv2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
# Fully connected layers
self.lin1 = nn.Linear(np.product(self.reshape), hidden_dim)
self.lin2 = nn.Linear(hidden_dim, hidden_dim)
# Fully connected layers for mean and variance
self.mu_logvar_gen = nn.Linear(hidden_dim, self.latent_dim * 2)
def forward(self, x):
batch_size = x.size(0)
if len(x.shape) < 4:
x = x.unsqueeze(0)
# Convolutional layers with ReLu activations
x = torch.relu(self.conv1(x))
x = torch.relu(self.conv2(x))
x = torch.relu(self.conv3(x))
x = torch.relu(self.conv_64(x))
# Fully connected layers with ReLu activations
x = x.view((batch_size, -1 ))
x = torch.relu(self.lin1(x))
x = torch.relu(self.lin2(x))
# Fully connected layer for log variance and mean
# Log std-dev in paper (bear in mind)
mu_logvar = self.mu_logvar_gen(x)
mu, logvar = mu_logvar.view(-1, self.latent_dim, 2).unbind(-1)
return mu, logvar
class ImageDecoder64(nn.Module):
"""Parametrizes p(x|z).
This is the standard DCGAN architecture.
@param n_latents: integer
number of latent variable dimensions.
"""
def __init__(self, n_latents):
super(ImageDecoder64, self).__init__()
latent_dim = n_latents
# Layer parameters
hid_channels = 32
kernel_size = 4
hidden_dim = 256
# Shape required to start transpose convs
self.reshape = (hid_channels, kernel_size, kernel_size)
n_chan = 3
# Fully connected layers
self.lin1 = nn.Linear(latent_dim, hidden_dim)
self.lin2 = nn.Linear(hidden_dim, hidden_dim)
self.lin3 = nn.Linear(hidden_dim, np.product(self.reshape))
# Convolutional layers
cnn_kwargs = dict(stride=2, padding=1)
self.convT_128 = nn.ConvTranspose2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT1 = nn.ConvTranspose2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT2 = nn.ConvTranspose2d(hid_channels, hid_channels, kernel_size, **cnn_kwargs)
self.convT3 = nn.ConvTranspose2d(hid_channels, n_chan, kernel_size, **cnn_kwargs)
def forward(self, z):
batch_size = z.size(0)
# Fully connected layers with ReLu activations
x = torch.relu(self.lin1(z))
x = torch.relu(self.lin2(x))
x = torch.relu(self.lin3(x))
x = x.view(batch_size, *self.reshape)
# Convolutional layers with ReLu activations
x = torch.relu(self.convT_128(x))
x = torch.relu(self.convT1(x))
x = torch.relu(self.convT2(x))
# Sigmoid activation for final conv layer
x = torch.sigmoid(self.convT3(x))
return x
class Swish(nn.Module):
"""https://arxiv.org/abs/1710.05941"""
def forward(self, x):
return x * torch.sigmoid(x)
|
import os, glob
from uuid import uuid4
from flask import Flask, request, render_template, send_from_directory
import numpy as np
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model
#loading and running the model
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
app = Flask(__name__,static_url_path='/static',
static_folder=os.path.join(APP_ROOT,'templates/static'))
#setting the target folder for saving images
classes = ['This is a JackFruit','This is a Mango','Sorry, I don\'t recognize this']
target_folder = APP_ROOT + "/images/"
if not os.path.isdir(target_folder):
os.mkdir(target_folder)
target = os.path.join(APP_ROOT, 'images')
@app.route("/")
def index():
filelist = glob.glob(os.path.join(target, "*"))
for f in filelist:
os.remove(f)
return render_template("index.html")
#saving the image from the user to target folder.
@app.route("/upload", methods=["POST"])
def upload():
if not os.path.isdir(target):
os.mkdir(target)
else:
print("Couldn't create upload directory: {}".format(target))
print(request.files.getlist("file"))
for upload in request.files.getlist("file"):
print(upload)
print("{} is the file name".format(upload.filename))
filename = upload.filename
destination = "/".join([target, filename])
print ("Accept incoming file:", filename)
print ("Save it to:", destination)
upload.save(destination)
new_model = load_model(os.path.join(APP_ROOT, 'models/chamasifier_1.h5'))
new_model.summary()
test_image = image.load_img(os.path.join(APP_ROOT,'images/'+filename),target_size=(150,150))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = new_model.predict(test_image)
if result[0][0] == 0:
prediction = classes[0]
elif result[0][0] == 1:
prediction = classes[1]
else:
prediction = classes[2]
return render_template("result.html",image_name=filename, text=prediction)
#function for loading image from the directory.
@app.route('/upload/<filename>')
def send_image(filename):
return send_from_directory("images", filename)
if __name__ == "__main__":
app.run(debug=False) |
from flask import Flask
app = Flask(__name__)
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route('/hello')
def hello_world():
return 'Hello from Phyton'
if __name__ == '__main__':
app.run(port=8080) |
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from appcenter_sdk.api.distribute import distributeApi
from appcenter_sdk.api.alerting import alertingApi
from appcenter_sdk.api.account import accountApi
from appcenter_sdk.api.gdpr import gdprApi
from appcenter_sdk.api.build import buildApi
from appcenter_sdk.api.codepush import codepushApi
from appcenter_sdk.api.billing import billingApi
from appcenter_sdk.api.crash import crashApi
from appcenter_sdk.api.test import testApi
from appcenter_sdk.api.push import pushApi
from appcenter_sdk.api.export import exportApi
from appcenter_sdk.api.errors import errorsApi
from appcenter_sdk.api.analytics import analyticsApi
from appcenter_sdk.api.mbaas import mbaasApi
|
import svgwrite
if __name__ == "__main__":
drawing = svgwrite.Drawing(filename="text_on_line.svg", size=("100%", "100%"))
path = drawing.add(drawing.path(d="M 0 50 L 100 50", stroke="red", fill="none"))
text = drawing.add(drawing.text(""))
text_path = text.add(
svgwrite.text.TextPath(path.get_iri(), "This text is on a line!")
)
drawing.viewbox(0, 0, 100, 100)
drawing.save()
|
#DESAFIO 014: Escreva um programa que converta uma temperatura digitada em °C e converta para °F.
celsius = float(input('Informoe a temperatura em °C: '))
fahrenheit = ((celsius * 9)/5) + 32
msg = f'A temperatura de {celsius:.2f} °C corresponde a {fahrenheit:.2f} °F!'
print(msg) |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 23 08:58:37 2021
@author: michp-ai
"""
# This script is web automation for the Capstone project on ML rapid text labeling
# Before running this script in a different console start the web server by running main.py for the web app
# This is a simple demo script to illustrate how selenium interacts with the web app
#%%
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import pandas as pd
import os.path
import datetime
from time import sleep
import sys
sys.path.insert(1, '../baseline-classifier/utilities')
import dt_utilities as utils
from utilities import search_exclude_labeling, label_all, clear_model_output, clear_output, load_navigate
#%%
#set a timer
starttime = datetime.datetime.now()
#%%
# Get the data we'll need for evaluation
consolidated_disaster_tweet_data_df = \
utils.get_consolidated_disaster_tweet_data(root_directory="../baseline-classifier/data/",
event_type_directory="HumAID_data_event_type",
events_set_directories=["HumAID_data_events_set1_47K",
"HumAID_data_events_set2_29K"],
include_meta_data=True)
train_df = consolidated_disaster_tweet_data_df[consolidated_disaster_tweet_data_df["data_type"]=="train"].reset_index(drop=True)
test_df = consolidated_disaster_tweet_data_df[consolidated_disaster_tweet_data_df["data_type"]=="test"].reset_index(drop=True)
vectorizer_needs_transform = True
#%%
download_dir = os.path.join(os.getcwd(), "models")
chrome_options = Options()
chrome_options.add_experimental_option('prefs', {
"download.default_directory": download_dir,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
}
)
#%%
# PARAMETERS
mpath = os.getcwd() + "\chromedriver.exe"
wait_time = 0#.75 #0.75
scroll_wait_seconds = 0#1.75 #1.75
#%%
driver = webdriver.Chrome(mpath, options = chrome_options)
#%%
# load the webpage
load_navigate(driver)
#%%
sectionstarttime = datetime.datetime.now()
label_type = "AllTexts_search_exclude" # list of valid values ["SimilarTexts", "RecommendedTexts, AllTexts_search_exclude"]
df_test_data = pd.read_csv("test_data_search_exclude_short.csv")
clear_model_output()
clear_output()
df_tracker = search_exclude_labeling(driver, test_df, starttime, df_test_data, vectorizer_needs_transform)
# finish by labeling all remaining unlabeled examples
df_tracker = label_all(driver, test_df, starttime, df_tracker, vectorizer_needs_transform)
sectionendtime = datetime.datetime.now()
elapsedsectiontime = sectionendtime - sectionstarttime
print("Elapsed section time", elapsedsectiontime)
#%%
df_tracker.to_csv("tracker_output.csv")
print(df_tracker.head(20))
print(df_tracker.tail(20))
#%%
driver.close()
#%%
endtime = datetime.datetime.now()
elapsedtime = endtime - starttime
print("Elapsed time", elapsedtime) |
from data_structures.doubly_linked_list_node import DoublyLinkedListNode
def test_doubly_linked_list_node():
node = DoublyLinkedListNode()
assert node.data is None
assert node.next is None
assert node.previous is None
node = DoublyLinkedListNode(1)
assert node.data == 1
assert node.previous is None
assert node.next is None
node = DoublyLinkedListNode(2, DoublyLinkedListNode(1), DoublyLinkedListNode(3))
assert node.data == 2
assert node.previous.data == 1
assert node.next.data == 3
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import re
from minicluster_testbase import MiniClusterTestBase
class DfTest(MiniClusterTestBase):
def test_df(self):
client_output = self.client.df()
expected_output = self.cluster.df("/").split("\n")[1]
(filesystem, capacity, used, remaining, pct) = re.split("\s+", expected_output)
self.assertEqual(filesystem, client_output["filesystem"])
self.assertEqual(long(capacity), client_output["capacity"])
self.assertEqual(long(used), client_output["used"])
|
"""
This module defines core training functions for DeepD
"""
import tensorflow as tf
import numpy as np
import time
def get_mse(x, xhat):
mse = tf.reduce_mean(tf.square(x - xhat), name='MSE')
return mse
def get_xent(x, xhat):
xent = tf.compat.v1.losses.softmax_cross_entropy(x, xhat)
return xent
def get_loss(x, xhat, fn=get_mse, l1=0, l2=0, var_list=None):
if var_list is None:
var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
n_params = sum([np.prod(p.shape) for p in var_list]).value
l1 = l1 * tf.reduce_sum([tf.reduce_sum(tf.abs(p)) for p in var_list]) / n_params
l2 = l2 * tf.reduce_sum([tf.reduce_sum(tf.square(p)) for p in var_list]) / n_params
raw_loss = fn(x, xhat)
full_loss = tf.add(raw_loss, l1 + l2, name='loss')
return raw_loss, full_loss
def session_setup():
saver = tf.compat.v1.train.Saver()
sess = tf.compat.v1.Session()
for i in tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope='initialization'):
print(i)
tf.compat.v1.summary.FileWriter("tensorboard", sess.graph)
return sess, saver
def session_init(sess, seed):
sess.run(tf.compat.v1.global_variables_initializer())
np.random.seed(seed), tf.compat.v1.set_random_seed(seed)
def pretrain(model, data, n_iter=1000, n_iter_patience=100):
"""
For layerwise pretraining of the autoencoders
Args
:param model: (deepD.DeepT2cec) Constructed autoencoder models
:param data: (dict) training data dictionary
:param n_iter: (int) maximum number of iterations allowed
:param n_iter_patience: (int) tolerence of training loss no-decrease
Mutates:
model: (Deep.DeepT2vec)
"""
sess, screenshot = model.sess, model.screenshot
if model.screenshot:
print("[Training] Pretrained params detected. Skipping...")
screenshot.load_model("Tp2vec_layer_wise_retrain.ckpt")
return 1
x_train_gold, x_valid_gold, x_test_gold = (data[key]['value'] for key in ['train', 'valid', 'test'])
# Training on train set batches with early stopping on valid set batched
for k, (mse, opt_op) in enumerate(zip(model.pretrain_mses, model.pretrain_optimizer_ops)):
print('[Training] Pre-training on train set at {}...'.format(opt_op[1].name))
n_unchanged = 0
idx_iter = 0
while True:
if idx_iter > n_iter or n_unchanged > n_iter_patience:
break
t0 = time.clock()
pos_train = np.random.choice(range(x_train_gold.shape[0]), model.pretrain_batch_size)
pos_valid = np.random.choice(range(x_valid_gold.shape[0]), model.pretrain_batch_size)
_, mse_train_i = sess.run((opt_op[1], mse), feed_dict={model.x: x_train_gold[pos_train]})
loss_train_i = mse_train_i
# record training
mse_valid_i = sess.run(mse, feed_dict={model.x: x_valid_gold[pos_valid]})
loss_valid_i = mse_valid_i
new_loss = screenshot.avg_n_iters_loss(loss_valid_i)
screenshot.log(filename="training.log", iteration=(idx_iter, n_iter),
unchanged=(n_unchanged, n_iter_patience), t=time.clock() - t0,
loss=(loss_train_i, loss_valid_i, mse_train_i, mse_valid_i, np.nan))
# early stopping
idx_iter += 1
if new_loss < screenshot.loss_min:
n_unchanged = 0
screenshot.screenshot(loss_min=new_loss)
else:
n_unchanged += 1
print('[Training] Saving pre-train results at layer {}...'.format(k + 1))
screenshot.save_model("models/Tp2vec_layer_wise_retrain.ckpt")
screenshot.save_params()
sess.run(tf.compat.v1.variables_initializer(opt_op[0].variables())) # refresh optimizer states
screenshot.reset() # refresh best_loss saved in screenshot
def train(model, optimizer_op, data, full_loss, raw_loss, output, n_iter=1000, n_iter_patience=100, model_name="model"):
"""
Core training function for all DeepD models
Args
:param model: (object) Constructed DeepD models
:param optimizer_op: (tf.train.Optimizer, tf.train.Optimizer.minimize()) Tensorflow operation for each iteration
:param data: (dict) training data dictionary
:param raw_loss: (tf.Tensor) graph loss to optimize, e.g. Mean Squared Error for autoencoders and
Cross Entropy Loss for classifiers
:param full_loss: (tf.Tensor) total graph loss with raw_loss and regularization penalty
:param n_iter: (int) maximum number of iterations allowed
:param n_iter_patience: (int) tolerence of training loss no-decrease
:param output: (list of tf.Tensor) desired outputs tensors in a list
:param model_name: filename for model saving (default: 'model'). Overiden if using a low verbose.
Returns
A list of numpy.array as specified in the param "output"
"""
sess, screenshot = model.sess, model.screenshot
n_unchanged = 0
idx_iter = 0
x_train_gold, x_valid_gold, x_test_gold = (data[key]['value'] for key in ['train', 'valid', 'test'])
y_train_gold, y_valid_gold, y_test_gold = (data[key]['class_annot'] for key in ['train', 'valid', 'test'])
sampler_train, sampler_valid = (data[key]['p_sampler'] for key in ['train', 'valid'])
# Training on train set batches with early stopping on valid set batched
print('[Training] Training on train set...')
while True:
if idx_iter > n_iter or n_unchanged > n_iter_patience:
break
t0 = time.clock()
pos_train = np.random.choice(range(x_train_gold.shape[0]), model.batch_size, p=sampler_train)
pos_valid = np.random.choice(range(x_valid_gold.shape[0]), model.batch_size, p=sampler_valid)
_, loss_train_i, mse_train_i = sess.run((optimizer_op[1], full_loss, raw_loss), feed_dict={
model.x: x_train_gold[pos_train], model.y: y_train_gold[pos_train]})
# record training
loss_valid_i, mse_valid_i = sess.run((full_loss, raw_loss), feed_dict={model.x: x_valid_gold[pos_valid],
model.y: y_valid_gold[pos_valid]})
new_loss = screenshot.avg_n_iters_loss(loss_valid_i)
screenshot.log(filename="training.log", iteration=(idx_iter, n_iter),
unchanged=(n_unchanged, n_iter_patience), t=time.clock() - t0,
loss=(loss_train_i, loss_valid_i, mse_train_i, mse_valid_i, np.nan))
# early stopping
idx_iter += 1
if new_loss < screenshot.loss_min:
n_unchanged = 0
screenshot.screenshot(loss_min=new_loss)
else:
n_unchanged += 1
# Evaluation on entire valid set
print('[Training] Evaluating on valid set... {}'.format(x_valid_gold.shape))
t0 = time.clock()
loss_valid_i, mse_valid_i = sess.run((full_loss, raw_loss),
feed_dict={model.x: x_valid_gold, model.y: y_valid_gold})
screenshot.log(filename="training.log", iteration=(-1, -1),
unchanged=(-1, -1), t=time.clock() - t0,
loss=(np.nan, loss_valid_i, np.nan, mse_valid_i, np.nan))
# Evaluation on test set
print('[Training] Evaluating on test set... {}'.format(x_test_gold.shape))
t0 = time.clock()
mse_test = sess.run(raw_loss, feed_dict={model.x: x_test_gold, model.y: y_test_gold})
screenshot.log(filename="training.log", iteration=(-1, -1),
unchanged=(-1, -1), t=time.clock() - t0,
loss=(np.nan, np.nan, np.nan, np.nan, mse_test))
# Save model
outputs = sess.run(output, feed_dict={model.x: data['test']['value'], model.y: data['test']['class_annot']})
screenshot.save_params()
screenshot.save_model('models/' + model_name + '.ckpt')
sess.run(tf.variables_initializer(optimizer_op[0].variables())) # refresh optimizer states
screenshot.reset() # refresh best_loss saved in screenshot
return outputs
def get_optimizer(loss_in, lr, optimizer="tf.compat.v1.train.AdamOptimizer({})", var_list=None, scope="Optimization"):
if var_list is None:
var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
with tf.compat.v1.variable_scope(scope, reuse=tf.compat.v1.AUTO_REUSE):
opt = eval(optimizer.format(lr))
opt_op = opt.minimize(loss_in, var_list=var_list)
print("[Construct] Successfully generated an operation {} for optimizing: {}.".format(opt_op.name, loss_in))
return opt, opt_op
|
import win32con
import win32gui
import ctypes
from ctypes import wintypes
# container class for global hook
# this will store the HHOOK id and mouse information
class Hook:
def __init__(self):
self.hook = 0
self.m_struct = None
class MSLLHOOKSTRUCT(ctypes.Structure):
_fields_ = [("pt", wintypes.POINT),
("mouseData", ctypes.c_long),
("flags", ctypes.c_long),
("time", ctypes.c_long),
("dwExtraInfo", ctypes.POINTER(ctypes.c_ulong) )]
def CopyMemory( Destination, Source ):
Source = ctypes.c_void_p(Source)
ctypes.windll.kernel32.RtlMoveMemory(ctypes.addressof(Destination), Source, ctypes.sizeof(Destination))
def PostQuitMessage( nMsg ):
return ctypes.windll.user32.PostQuitMessage(nMsg)
def GetModuleHandle( lpModuleName ):
return ctypes.windll.kernel32.GetModuleHandleA(lpModuleName)
def CallNextHookEx( hhk, nCode, wParam, lParam ):
return ctypes.windll.user32.CallNextHookEx(hhk, nCode, wParam, lParam)
def SetWindowsHookEx( idHook, lpFunc, hMod, dwThreadId ):
WINFUNC = ctypes.WINFUNCTYPE(ctypes.c_long, ctypes.c_long, ctypes.c_long, ctypes.c_long)
return ctypes.windll.user32.SetWindowsHookExA( idHook, WINFUNC(lpFunc), hMod, dwThreadId)
def UnhookWindowsHookEx( hhk ):
return user32.UnhookWindowsHookEx(hhk)
# create instance of global mouse hook class
mll_hook = Hook()
mll_hook.m_struct = MSLLHOOKSTRUCT()
# mouse hook callback. intercept mouse events
def LowLevelMouseProc( nCode, wParam, lParam ):
print("something...")
if nCode == win32con.HC_ACTION:
# lparam holds the starting address of the mouse hook structure
# call copymemory so that m_struct class points to the mouse structure pool
CopyMemory( mll_hook.m_struct, lParam )
# print out the cursors x and y screen position
print("x = [%d]\ty = [%d]" % (mll_hook.m_struct.pt.x,mll_hook.m_struct.pt.y))
# wparam contains the windows message id
# if middle mouse button is pressed close the program
if wParam == win32con.WM_MBUTTONUP:
PostQuitMessage(0)
return CallNextHookEx( mll_hook.hook, nCode, wParam, lParam )
if __name__ == '__main__':
print("Press the middle mouse button to exit ")
try:
mll_hook.hook = SetWindowsHookEx(win32con.WH_MOUSE_LL,
LowLevelMouseProc,
GetModuleHandle(0),
0)
except Exception as err:
print(err)
print("got this far...")
# set up a message queue, you can use any valid message loop tkinter, pygtk and wxpythons message loops all work
# win32gui.PumpMessages()
while ctypes.windll.user32.GetMessageA (ctypes.byref (msg), None, 0, 0) != 0:
print("got message...")
ctypes.windll.user32.TranslateMessage (ctypes.byref (msg))
ctypes.windll.user32.DispatchMessageA (ctypes.byref (msg))
print("done?")
# unhook the mouse hook
UnhookWindowsHookEx(mll_hook.hook)
|
def y():
for i in range(10):
if i == 5:
return
yield i
for i in y():
print(i) |
import re
# 태그 제거
def relace_tag(content):
cleaner = re.compile('<.*?>')
cleantext = re.sub(cleaner, '', content)
return cleantext |
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# author: jeremy.zhang(szujeremy@gmail.com, Shenzhen University, China)
import numpy as np
import SimpleITK as sitk
from skradiomics.utils.modules import FILTER_CLASSES
@FILTER_CLASSES.register_module(name='original')
def original_image(image, mask, settings, **kwargs):
yield image, mask
@FILTER_CLASSES.register_module(name='LoG')
def LoG_image(image, mask, settings, **kwargs):
size = np.array(image.GetSize())
spacing = np.array(image.GetSpacing())
if np.min(size) < 4:
raise ValueError('Image too small to apply LoG filter, size: %s' % size)
sigma_values = settings.get('LoGSigma', [])
for sigma in sigma_values:
if np.all(size >= np.ceil(sigma / spacing) + 1):
LoG_filter = sitk.LaplacianRecursiveGaussianImageFilter()
LoG_filter.SetNormalizeAcrossScale(True)
LoG_filter.SetSigma(sigma)
image_name = ('log-sigma-%s-mm' % sigma).replace('.', '-')
yield LoG_filter.Execute(image), mask
@FILTER_CLASSES.register_module(name='wavelet')
def wavelet_image(image, mask, settings, **kwargs):
pass
@FILTER_CLASSES.register_module(name='square')
def square_image(image, mask, settings, **kwargs):
np_image = sitk.GetArrayFromImage(image).astype(np.float64)
coeff = 1 / np.sqrt(np.max(np.abs(np_image)))
np_image = (coeff * np_image) ** 2
sitk_image = sitk.GetImageFromArray(np_image)
sitk_image.CopyInformation(image)
yield sitk_image, mask
@FILTER_CLASSES.register_module(name='square_root')
def square_root_image(image, mask, settings, **kwargs):
np_image = sitk.GetArrayFromImage(image).astype(np.float64)
coeff = np.max(np.abs(np_image))
np_image[np_image > 0] = np.sqrt(np_image[np_image > 0] * coeff)
np_image[np_image < 0] = np.sqrt(-np_image[np_image < 0] * coeff)
sitk_image = sitk.GetImageFromArray(np_image)
sitk_image.CopyInformation(image)
yield sitk_image, mask
@FILTER_CLASSES.register_module(name='logarithm')
def logarithm_image(image, mask, settings, **kwargs):
np_image = sitk.GetArrayFromImage(image).astype(np.float64)
max_value = np.max(np.abs(np_image))
np_image[np_image > 0] = np.log(np_image[np_image > 0] + 1)
np_image[np_image < 0] = - np.log(-np_image[np_image < 0] + 1)
np_image = np_image * (max_value / np.max(np.abs(np_image)))
sitk_image = sitk.GetImageFromArray(np_image)
sitk_image.CopyInformation(image)
yield sitk_image, mask
@FILTER_CLASSES.register_module(name='exponential')
def exponential_image(image, mask, settings, **kwargs):
np_image = sitk.GetArrayFromImage(image).astype(np.float64)
max_value = np.max(np.abs(np_image))
coeff = np.log(max_value) / max_value
np_image = np.exp(coeff * np_image)
sitk_image = sitk.GetImageFromArray(np_image)
sitk_image.CopyInformation(image)
yield sitk_image, mask
@FILTER_CLASSES.register_module(name='gradient')
def gradient_image(image, mask, settings, **kwargs):
gradient_filter = sitk.GradientMagnitudeImageFilter()
gradient_filter.SetUseImageSpacing(settings.get('gradientUseSpacing', True))
yield gradient_filter.Execute(image), mask
# @FILTER_CLASSES.register_module(name='gabor')
# def gabor_image(image, mask, settings, **kwargs):
# pass
|
import os.path
from typing import *
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
class Visualizer:
def __init__(
self,
show_figures: bool, save_directory: Optional[str],
figure_size: Tuple[float, float] = (4, 4),
color_dead: str = "white",
color_live: str = "black",
color_true_dead: str = "lightgrey",
color_true_live: str = "lightgreen",
color_false_dead: str = "blue",
color_false_live: str = "red",
binary_threshold: float = 0.5
):
self._show_figures = show_figures
self._save_directory = save_directory
self._figure_size = figure_size
self._binary_threshold = float(binary_threshold)
self._cmap_binary = matplotlib.colors.ListedColormap([color_dead, color_live])
self._norm_binary = matplotlib.colors.BoundaryNorm([-0.5, 0.5, 1.5], self._cmap_binary.N, clip=True)
self._cmap_comparison = matplotlib.colors.ListedColormap(
[color_true_dead, color_false_dead, color_false_live, color_true_live])
self._norm_comparison = matplotlib.colors.BoundaryNorm(
[-0.5, 0.5, 1.5, 2.5, 3.5], self._cmap_comparison.N, clip=True)
def draw_board(self, board: np.ndarray, title: str):
fig, ax = plt.subplots(figsize=self._figure_size)
board = board.astype(float)
board = 3.0 * np.greater_equal(board, self._binary_threshold)
ax.imshow(board, interpolation="nearest", cmap=self._cmap_binary, norm=self._norm_binary)
self._finalize_and_output_current_fig(title)
def draw_board_comparison(self, board_true: np.ndarray, board_pred: np.ndarray, title: str):
fig, ax = plt.subplots(figsize=self._figure_size)
board_true = board_true.astype(float)
board_pred = board_pred.astype(float)
board_true = np.greater_equal(board_true, self._binary_threshold)
board_pred = np.greater_equal(board_pred, self._binary_threshold)
board_combined = (board_pred * 2.0) + board_true
ax.imshow(board_combined, interpolation="nearest", cmap=self._cmap_comparison, norm=self._norm_comparison)
self._finalize_and_output_current_fig(title)
def _finalize_and_output_current_fig(self, title: str):
plt.title(title)
plt.tight_layout()
if self._save_directory is not None:
file_name = title + ".png"
plt.savefig(os.path.join(self._save_directory, file_name))
if self._show_figures:
plt.show()
plt.close()
# A convenicne method taking board of, e.g., (1, 25, 25, 1) array.
def draw_board_arr(self, board, title):
mid2d = (board.shape[1], board.shape[2])
brd = board.reshape(mid2d)
self.draw_board(brd, title)
# A convenicne method taking board of, e.g., (1, 25, 25, 1) array.
def draw_board_comparison_arr(self, board_true, board_pred, title):
mid2d = (board_true.shape[1], board_true.shape[2])
self.draw_board_comparison(
board_true.reshape(mid2d), board_pred.reshape(mid2d), title)
if __name__ == "__main__":
from data.pretty_test_target import pretty_test_target
test_board = pretty_test_target.squeeze()
visualizer = Visualizer(show_figures=True, save_directory=None)
visualizer.draw_board(test_board, "test_board")
|
from copy import copy
# def get_values(variables, seed=None, solution=None):
# if solution is not None:
# try:
# return [solution[x - 1] for x in variables]
# except IndexError:
# raise Exception('Solution have too few variables: %d' % len(solution))
# else:
# values = RandomState(seed=seed).randint(2, size=len(variables))
# return [x if values[i] else -x for i, x in enumerate(variables)]
class Variables:
slug = 'variables'
name = 'Variables'
def __init__(self, _list):
self._list = _list
self.length = len(self._list)
def __len__(self):
return len(self.variables())
@staticmethod
def _to_str(variables):
try:
strings, i, j = [], 0, 1
while i < len(variables):
if j == len(variables) or variables[j] - variables[i] != j - i:
if j - i > 2:
strings.append(f'{variables[i]}..{variables[j - 1]}')
else:
strings.extend(f'{variables[k]}' for k in range(i, j))
i, j = j, j + 1
else:
j += 1
return ' '.join(strings)
except TypeError:
return str(variables)
@staticmethod
def _from_str(string):
variables = []
for lit in string.split(' '):
if '..' in lit:
var = lit.split('..')
variables.extend(range(int(var[0]), int(var[1]) + 1))
else:
variables.append(int(lit))
return variables
def __repr__(self):
return self._to_str(self.variables())
def __str__(self):
variables = self.variables()
return f'[{self._to_str(variables)}]({len(variables)})'
def __iter__(self):
return self.variables().__iter__()
def __hash__(self):
return hash(tuple(self.variables()))
def __contains__(self, item):
return item in self.variables()
def __copy__(self):
return Variables(copy(self._list))
def variables(self):
return self._list
@staticmethod
def parse(string):
raise NotImplementedError
# def values(self, **kwargs):
# return get_values(self.variables(), **kwargs)
def __info__(self):
return {
'slug': self.slug,
'name': self.name,
}
__all__ = [
'Variables'
]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ArcSqlManagedInstanceLicenseType',
'ArcSqlServerLicenseType',
'ConnectionStatus',
'DefenderStatus',
'EditionType',
'ExtendedLocationTypes',
'Infrastructure',
'SqlManagedInstanceSkuTier',
'SqlVersion',
]
class ArcSqlManagedInstanceLicenseType(str, Enum):
"""
The license type to apply for this managed instance.
"""
BASE_PRICE = "BasePrice"
LICENSE_INCLUDED = "LicenseIncluded"
class ArcSqlServerLicenseType(str, Enum):
"""
SQL Server license type.
"""
PAID = "Paid"
FREE = "Free"
HADR = "HADR"
UNDEFINED = "Undefined"
class ConnectionStatus(str, Enum):
"""
The cloud connectivity status.
"""
CONNECTED = "Connected"
DISCONNECTED = "Disconnected"
UNKNOWN = "Unknown"
class DefenderStatus(str, Enum):
"""
Status of Azure Defender.
"""
PROTECTED = "Protected"
UNPROTECTED = "Unprotected"
UNKNOWN = "Unknown"
class EditionType(str, Enum):
"""
SQL Server edition.
"""
EVALUATION = "Evaluation"
ENTERPRISE = "Enterprise"
STANDARD = "Standard"
WEB = "Web"
DEVELOPER = "Developer"
EXPRESS = "Express"
class ExtendedLocationTypes(str, Enum):
"""
The type of the extended location.
"""
CUSTOM_LOCATION = "CustomLocation"
class Infrastructure(str, Enum):
"""
The infrastructure the data controller is running on.
"""
AZURE = "azure"
GCP = "gcp"
AWS = "aws"
ALIBABA = "alibaba"
ONPREMISES = "onpremises"
OTHER = "other"
class SqlManagedInstanceSkuTier(str, Enum):
"""
The pricing tier for the instance.
"""
GENERAL_PURPOSE = "GeneralPurpose"
BUSINESS_CRITICAL = "BusinessCritical"
class SqlVersion(str, Enum):
"""
SQL Server version.
"""
SQ_L_SERVER_2019 = "SQL Server 2019"
SQ_L_SERVER_2017 = "SQL Server 2017"
SQ_L_SERVER_2016 = "SQL Server 2016"
|
import tensorflow as tf
import numpy as np
import data_reader
import modeleag as M
import network
import cv2
import os
MAXITER = 100000
class AIM(M.Model):
def initialize(self):
self.encoder = network.EncoderNet()
self.decoder = network.DecoderNet()
self.dis_z = network.DiscriminatorZ()
self.age_classifier = network.AgeClassifier()
self.dis_img = network.DiscriminatorPatch()
def generate(self, x, age_batch, gender_batch, img):
res, res_att, _ = self.decoder(self.encoder(x), age_batch, gender_batch, img)
# can choose either res or res_att as output
res = (res_att.numpy() + 1) * 127.5
res = np.uint8(res)
return res
if __name__=='__main__':
AIM_model = AIM()
optim = tf.train.AdamOptimizer(0.0001)
saver = M.Saver(AIM_model, optim)
saver.restore('./model/')
reader = data_reader.data_reader()
# create result folder
if not os.path.exists('./results/'):
os.mkdir('./results/')
# start training
eta = M.ETA(MAXITER+1)
for i in range(MAXITER+1):
img_batch, age_batch, gender_batch = reader.get_next_batch(100)
losses, loss_grad, tape = network.lossFunc(img_batch, age_batch, gender_batch, AIM_model)
network.applyGrad(loss_grad, AIM_model, optim, tape)
if i%10==0:
network.printLosses(losses, i, eta)
if i%1000==0:
# visualize every 1000 iters
for k in range(10):
age_batch = np.zeros([age_batch.shape[0], 10],np.float32,)
age_batch[:,k] = 1
res = AIM_model.generate(img_batch, age_batch, gender_batch, img_batch)
print(res.max())
print(res.min())
img_r = np.uint8((img_batch+1.)*127.5)
for j in range(len(res)):
cv2.imwrite('./results/%d_%d_r.jpg'%(i,j), img_r[j])
for j in range(len(res)):
cv2.imwrite('./results/%d_%d_%d.jpg'%(i,j,k), res[j])
if i%2000==0 and i>0:
saver.save('./model/%d.ckpt'%(i))
|
from django.urls import path
from . import views
urlpatterns = [
path('othersclosets/', views.othersclosets, name="othersclosets"),
path('othersclosets_t/', views.subchoice, name="othersclosets_t"),
path('othersclosets_s/', views.search, name="othersclosets_s"),
] |
#!interpreter [optional-arg]
# -*- coding: utf-8 -*-
"""
file: tracklog.py
created: 1/1/2019
module: tracklog.py
project: pins_tiles
author: Dennis Schmitz
loads and saves tracklogs
"""
import logging
import os
import time
import dateutil
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.backends.backend_tkagg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from .pins import Pin
from .tiles import *
logger = logging.getLogger('tracklog')
__author__ = 'Dennis Schmitz'
__copyright__ = 'Copyright 2020, Dennis Schmitz'
__version__ = '0.0'
__status__ = ''
# Column names from Torque App output and the datatypes
# All float is boring. Also I'm having issues with the pandas read_csv
# so I'm not using it.
# dataset uses "-" string as no-data. if you straight up import without specifying dtype, those columns
# get dtype 'object', where some data is strings. If you use na_values='-' the parser replaces it with
# an ascii '∞' rathen than a numeric NAN. then the float conversion barfs
# so not using them right now. They're all floats anyway and the same effect is saying dtype=float in read_csv
"""
coltypes = {'Longitude': float, 'Latitude': float, 'GPS Speed (Meters/second)': float,
'Horizontal Dilution of Precision': float,
'Altitude': float, 'Bearing': float, 'G(x)': float, 'G(y)': float, 'G(z)': float, 'G(calibrated)': float,
'Absolute Throttle Position B(%)': float, 'Acceleration Sensor(Total)(g)': float,
'Acceleration Sensor(X axis)(g)': float,
'Acceleration Sensor(Y axis)(g)': float, 'Acceleration Sensor(Z axis)(g)': float,
'Accelerator PedalPosition D(%)': float,
'Accelerator PedalPosition E(%)': float, 'Ambient air temp(°F)': float,
'Average trip speed(whilst moving only)(mph)': float,
'Barometer (on Android device)(mb)': float, 'Barometric pressure (from vehicle)(psi)': float,
'Catalyst Temperature (Bank 1 Sensor 1)(°F)': float, 'Cost per mile/km (Instant)($/m)': float,
'Cost per mile/km (Trip)($/m)': float,
'Engine Coolant Temperature(°F)': float, 'Engine kW (At the wheels)(kW)': float, 'Engine Load(%)': float,
'Engine Load(Absolute)(%)': float, 'Engine RPM(rpm)': float, 'Evap System Vapour Pressure(Pa)': float,
'Fuel flow rate/minute(gal/min)': float, 'Fuel Level (From Engine ECU)(%)': float,
'GPS Accuracy(ft)': float,
'GPS Altitude(ft)': float, 'GPS Bearing(°)': float, 'GPS Latitude(°)': float, 'GPS Longitude(°)': float,
'GPS Satellites': float,
'Horsepower (At the wheels)(hp)': float, 'Intake Air Temperature(°F)': float,
'Intake Manifold Pressure(psi)': float,
'Miles Per Gallon(Instant)(mpg)': float, 'Miles Per Gallon(Long Term Average)(mpg)': float,
'Speed (GPS)(mph)': float,
'Speed (OBD)(mph)': float, 'Torque(Nm)': float, 'Transmission Temperature(Method 1)(°F)': float,
'Trip average MPG(mpg)': float,
'Trip Distance(miles)': float, 'Voltage (OBD Adapter)(V)': float, }
"""
# these are used in read_csv to force datetime conversion on these two columns
datecols = ['GPS Time', 'Device Time']
def check_for_strings(tracklist): # in a list of tracklogs
"""Check for string data in a list of dataframes"""
if not isinstance(tracklist, list):
tracklist = [tracklist]
found = False
for tl in tracklist:
df = tl.df
for col in df:
if df[col].dtype == "object":
print("{}:{}".format(df[col].name, df[col].dtype))
found = True
if found:
break
if found:
break
if not found:
print("No strings in data.")
else:
print("Strings found:")
print(df[col].name)
print(vars(df[col]))
def load_all_csv_torque_logs(folder, limit=None):
"""Loads all the Torque logs from a given folder and returns a list of the data."""
folder = os.path.normpath(folder)
assert os.path.isdir(folder), f"Folder {folder} does not exist"
logger.debug(folder)
logs = []
# get list of csv tracklogs
for the_file in os.scandir(folder):
if os.path.splitext(the_file.path)[1].lower() == '.csv':
logs.append(Tracklog(the_file))
if limit is not None and len(logs) >= limit:
logger.warning(f"Reached limit ({limit})")
break
return logs
def is_csv(fn):
if os.path.isfile(fn):
ext = os.path.splitext(fn)[1]
if ext.lower() == '.csv':
return True
return False
def is_h5(fn):
if os.path.isfile(fn):
ext = os.path.splitext(fn)[1]
if ext.lower() == '.h5':
return True
return False
# noinspection PyAttributeOutsideInit
class Tracklog:
"""Loads a single torque log from a directory entry object and returns a Tracklog object with the data"""
def __init__(self, some_file):
assert os.path.isfile(some_file)
self.fn = some_file
@property
def metadata(self):
return {'fdate': self.fdate, 'units': ','.join(self.units)}
@metadata.setter
def metadata(self, the_metadata):
assert isinstance(the_metadata, dict)
@property
def units(self):
if not hasattr(self, '_units'):
# noinspection PyStatementEffect
self.df # forces loading data
assert hasattr(self, '_units')
return self._units
def _from_csv(self):
# get column names
with open(self.fn, 'r', encoding='utf-8') as f:
self.colnames = list(map(str.strip, f.readline().strip().split(',')))
# get the units and remove from colnames
# anything in the list and contained in parentheses is a unit for that column
# after finding, then remove from the column name.
import re
# noinspection PyPep8Naming
TORQUE_UNITS = ['$/m', '%', 'Meters/second', 'Nm', 'Pa', 'V', 'ft', 'g', 'gal/min',
'hp', 'kW', 'mb', 'miles', 'mpg', 'mph', 'psi', 'rpm', '°', '°F', '°C']
units = []
new_colnames = []
for name in self.colnames:
# print(re.findall('\((.*?)\)',s)[-1])
stuff = re.findall(r'\((.*?)\)', name) # a list of parenthesized things in the string
if len(stuff) == 0:
units.append(None)
new_colnames.append(name)
elif stuff[-1] in TORQUE_UNITS:
units.append(stuff[-1])
new_colnames.append(name.replace(f'({stuff[-1]})', ''))
else:
units.append(None)
new_colnames.append(name)
self._units = units
self.colnames = new_colnames
# start reading the dataframe skipping row 0 and using our colnames as header. parse the datecols explicitly.
self._df = pd.read_csv(self.fn, na_values='-', skiprows=[0], header=None, names=self.colnames,
parse_dates=datecols, index_col='GPS Time')
for col in self._df: # check all the columns for stuff
if self._df[col].dtype == "object": # still getting the ascii '∞' instead of NAN so kludge fix this
self._df[col] = pd.to_numeric(self._df[col], 'coerce')
def _from_h5(self):
pass # stub
@property
def df(self):
if not hasattr(self, '_df'):
start = time.time()
if is_csv(self.fn):
self._from_csv()
elif is_h5(self.fn):
self._from_h5()
self.loadtime = time.time() - start # not important, using to eval methods to load the dataframe
return self._df
@property
def fdate(self): # get the date encoded in the filename
timestamp = None
path, fn = os.path.split(self.fn)
fn = os.path.splitext(fn)[0]
if fn[0:8] == "trackLog":
the_date, the_time = fn[9:].split("_")
# print(theDate, theTime)
the_time = the_time.replace("-", ":")
the_date_time = "{} {}".format(the_date, the_time)
timestamp = dateutil.parser.parse(the_date_time)
return timestamp
def map_plot(self, *a, **kwargs):
"""
Plots a track column on a map
Maps the track's longitude and latitude on a scatter plot with a map background.
track value shown as color of the plot point
"""
def rectify(ax, width=15, dpi=100):
"""Change axis aspect for square x and y"""
# fig.set_dpi(dpi)
aspect = np.ptp(ax.get_ylim()) / np.ptp(ax.get_xlim()) # y/x
ax.figure.set_size_inches((width, width * aspect))
ax = kwargs.get('ax')
if not kwargs.get('ax'):
fig, ax = plt.subplots()
kwargs['ax'] = ax # pass it down to df.plot()
else:
fig = ax.figure
column = kwargs.get('column')
if not column:
column = 'Speed (OBD)'
else:
del kwargs['column'] # this is a private param
if isinstance(column, str):
column = [column, ] # turn into list
detail = kwargs.get('detail')
if not detail:
detail = 1
else:
del kwargs['detail']
if not kwargs.get('cmap'): # The default is bad for speed tracks
kwargs['cmap'] = 'hot'
add_colorbar = kwargs.get('add_colorbar')
if add_colorbar:
del kwargs['add_colorbar']
# plot the trace as latitude, longitude scatter with 'column' as color
kwargs['kind'] = 'scatter'
kwargs['x'] = 'Longitude'
kwargs['y'] = 'Latitude'
kwargs['c'] = column
self.df.plot(**kwargs)
add_map_to_ax(ax=ax, detail=detail) # add a map png as axis background
rectify(ax) # fix the screen aspect to unwarp the map
if not add_colorbar: # then delete the colorbar created by dataframe.plot
if len(fig.axes) > 1:
fig.delaxes(fig.axes[-1]) # last axis is the colorbar
# plt.axis('off')
return # fig, ax
def plot_on_map(self, param='Speed (OBD)', detail=1, cmap='hot'):
"""@deprecated Plots a trace parameter on a map."""
lats, lngs = self.df['Latitude'], self.df['Longitude']
# plot longitude vs latitude using speed as color
track_bounds = Pin(np.min(lats), np.min(lngs)), Pin(np.max(lats), np.max(lngs))
track_center = Pin(np.median(lats), np.median(lngs))
fig = Figure() # create new figure outside pyplot
# A canvas must be manually attached to the figure (pyplot would automatically
# do it). This is done by instantiating the canvas with the figure as
# argument.
FigureCanvas(fig)
ax = fig.add_subplot(111)
# ax.set_aspect('equal')
# plot the trace as latitude, longitude scatter with 'param' as color
colors = self.df[param] # .values
self.df.plot(kind='scatter', x='Longitude', y='Latitude', ax=ax, c=colors, cmap=cmap)
# delete the colorbar created by dataframe.plot
if len(fig.axes) > 1:
fig.delaxes(fig.axes[-1]) # delete the last one
# save limits and use them to get a map
xlim = ax.get_xlim()
ylim = ax.get_ylim()
width = np.ptp(xlim)
height = np.ptp(ylim)
self.aspect = height / width
# fig.set_dpi(100)
screen_width = 15.
size = (screen_width, screen_width * self.aspect)
fig.set_size_inches(size)
# make lat, lng pins for bounds to get_bounded_map
# get a map matching the bounds of the plot
sw = Pin(ylim[0], xlim[0])
ne = Pin(ylim[1], xlim[1])
bounds = (sw, ne)
mapfield = Mapfield(bounds=bounds, detail=detail)
mapimage = mapfield.image
ext = *xlim, *ylim # left, right, bottom, top
ax.imshow(mapimage, zorder=0, extent=ext, aspect='auto', interpolation='bicubic')
# plt.axis('off')
return fig
@property
def h5fn(self):
return os.path.splitext(self.fn)[0] + ".h5"
def h5store(self, filename=None):
if not filename:
filename = self.h5fn
# df.to_hdf(filentame, key='torque_df', mode='w')
store = pd.HDFStore(filename)
store.put('torque_df', self.df)
store.get_storer('torque_df').attrs.metadata = self.metadata
store.close()
def h5load(self, filename=None):
if not filename:
filename = self.h5fn
# self._df = pd.read_hdf(filename)
store = pd.HDFStore(filename)
self._df = store['torque_df']
self.metadata = store.get_storer('torque_df').attrs.metadata
|
#!/usr/bin/env python2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ctypes
import torch
# load seam-finding library:
FINDSEAM_LIB = ctypes.cdll.LoadLibrary(
'libexperimental_deeplearning_lvdmaaten_adversarial_findseam.so')
# other globals:
LATTICE_CACHE = {} # cache lattices here
# function that constructs a four-connected lattice:
def __four_lattice__(height, width, use_cache=True):
# try the cache first:
if use_cache and (height, width) in LATTICE_CACHE:
return LATTICE_CACHE[(height, width)]
# assertions and initialization:
assert type(width) == int and type(height) == int and \
width > 0 and height > 0, 'height and width should be positive integers'
N = height * width
height, width = width, height # tensors are in row-major format
graph = {
'from': torch.LongTensor(4 * N - (height + width) * 2),
'to': torch.LongTensor(4 * N - (height + width) * 2),
}
# closure that copies stuff in:
def add_edges(i, j, offset):
graph['from'].narrow(0, offset, i.nelement()).copy_(i)
graph['from'].narrow(0, offset + i.nelement(), j.nelement()).copy_(j)
graph['to'].narrow(0, offset, j.nelement()).copy_(j)
graph['to'].narrow(0, offset + j.nelement(), i.nelement()).copy_(i)
# add vertical connections:
i = torch.arange(0, N).squeeze().long()
mask = torch.ByteTensor(N).fill_(1)
mask.index_fill_(0, torch.arange(height - 1, N, height).squeeze().long(), 0)
i = i[mask]
add_edges(i, torch.add(i, 1), 0)
# add horizontal connections:
offset = 2 * i.nelement()
i = torch.arange(0, N - height).squeeze().long()
add_edges(i, torch.add(i, height), offset)
# cache and return graph:
if use_cache:
LATTICE_CACHE[(height, width)] = graph
return graph
# utility function for checking inputs:
def __assert_inputs__(im1, im2, mask=None):
assert type(im1) == torch.ByteTensor or type(im1) == torch.FloatTensor, \
'im1 should be a ByteTensor or FloatTensor'
assert type(im2) == torch.ByteTensor or type(im2) == torch.FloatTensor, \
'im2 should be a ByteTensor or FloatTensor'
assert im1.dim() == 3, 'im1 should be three-dimensional'
assert im2.dim() == 3, 'im2 should be three-dimensional'
assert im1.size() == im2.size(), 'im1 and im2 should have same size'
if mask is not None:
assert mask.dim() == 2, 'mask should be two-dimensional'
assert type(mask) == torch.ByteTensor, 'mask should be torch.ByteTensor'
assert mask.size(0) == im1.size(1) and mask.size(1) == im1.size(2), \
'mask should have same height and width as images'
# function that finds seam between two images:
def find_seam(im1, im2, mask):
# assertions:
__assert_inputs__(im1, im2, mask)
im1 = im1.float()
im2 = im2.float()
# construct edge weights:
graph = __four_lattice__(im1.size(1), im1.size(2))
values = torch.FloatTensor(graph['from'].size(0)).fill_(0.)
for c in range(im1.size(0)):
im1c = im1[c].contiguous().view(im1.size(1) * im1.size(2))
im2c = im2[c].contiguous().view(im2.size(1) * im2.size(2))
values.add_(torch.abs(
im2c.index_select(0, graph['to']) -
im1c.index_select(0, graph['from'])
))
# construct terminal weights:
idxim = torch.arange(0, mask.nelement()).long().view(mask.size())
tvalues = torch.FloatTensor(mask.nelement(), 2).fill_(0)
for c in range(2):
select_c = (mask == (c + 1))
if select_c.any():
tvalues.select(1, c).index_fill_(0, idxim[select_c], float('inf'))
# convert graph to IntTensor (make sure this is not GC'ed):
graph_from = graph['from'].int()
graph_to = graph['to'].int()
# run the Boykov algorithm to obtain stitching mask:
labels = torch.IntTensor(mask.nelement())
FINDSEAM_LIB.findseam(
ctypes.c_int(mask.nelement()),
ctypes.c_int(values.nelement()),
ctypes.c_void_p(graph_from.data_ptr()),
ctypes.c_void_p(graph_to.data_ptr()),
ctypes.c_void_p(values.data_ptr()),
ctypes.c_void_p(tvalues.data_ptr()),
ctypes.c_void_p(labels.data_ptr()),
)
mask = labels.resize_(mask.size()).byte()
return mask
# function that performs the stitch:
def __stitch__(im1, im2, overlap, y, x):
# assertions:
__assert_inputs__(im1, im2)
# construct mask:
patch_size = im1.size(1)
mask = torch.ByteTensor(patch_size, patch_size).fill_(2)
if y > 0: # there is not overlap at the border
mask.narrow(0, 0, overlap).fill_(0)
if x > 0: # there is not overlap at the border
mask.narrow(1, 0, overlap).fill_(0)
# seam the two patches:
seam_mask = find_seam(im1, im2, mask)
stitched_im = im1.clone()
for c in range(stitched_im.size(0)):
stitched_im[c][seam_mask == 1] = im2[c][seam_mask]
return stitched_im
# main quilting function:
def quilting(img, faiss_index, patch_dict, patch_size=5, overlap=2,
graphcut=False, patch_transform=None):
# assertions:
assert torch.is_tensor(img)
assert torch.is_tensor(patch_dict) and patch_dict.dim() == 2
assert type(patch_size) == int and patch_size > 0
assert type(overlap) == int and overlap > 0
assert patch_size > overlap
if patch_transform is not None:
assert callable(patch_transform)
# gather all image patches:
patches = []
y_range = range(0, img.size(1) - patch_size, patch_size - overlap)
x_range = range(0, img.size(2) - patch_size, patch_size - overlap)
for y in y_range:
for x in range(0, img.size(2) - patch_size, patch_size - overlap):
patch = img[:, y:y + patch_size, x:x + patch_size]
if patch_transform is not None:
patch = patch_transform(patch)
patches.append(patch)
# find nearest patches in faiss index:
patches = torch.stack(patches, dim=0)
patches = patches.view(patches.size(0), int(patches.nelement() / patches.size(0)))
faiss_index.nprobe = 5
_, neighbors = faiss_index.search(patches.numpy(), 1)
neighbors = torch.LongTensor(neighbors).squeeze()
if (neighbors == -1).any():
print('WARNING: %d out of %d neighbor searches failed.' %
((neighbors == -1).sum(), neighbors.nelement()))
# piece the image back together:
n = 0
quilt_img = img.clone().fill_(0)
for y in y_range:
for x in x_range:
if neighbors[n] != -1:
# get current image and new patch:
patch = patch_dict[neighbors[n]].view(
img.size(0), patch_size, patch_size
)
cur_img = quilt_img[:, y:y + patch_size, x:x + patch_size]
# compute graph cut if requested:
if graphcut:
patch = __stitch__(cur_img, patch, overlap, y, x)
# copy the patch into the image:
cur_img.copy_(patch)
n += 1
# return the quilted image:
return quilt_img
|
# allow-warning: converting unicode literal to str
# currently broken:
# import os.path
import os
r1 = os.urandom(8)
r2 = os.urandom(8)
print len(r1), len(r2), type(r1), type(r2), r1 == r2
|
import boto3
import os.path
from flask import current_app as app
from werkzeug.utils import secure_filename
def s3_upload(source_file):
source_filename = secure_filename(source_file.data.filename)
# Connect to S3 and upload file.
s3 = boto3.client(
"s3",
aws_access_key_id=app.config["AWS_KEY"],
aws_secret_access_key=app.config["AWS_SECRET"],
aws_session_token=app.config["AWS_SESSION"]
)
s3.put_object(
Bucket=app.config["S3_BUCKET"],
Key=source_filename,
Body=source_file.data
)
return source_filename
|
import io
import os
import re
import textwrap
import typing as tp
from statistics import mean
from string import ascii_letters
from subprocess import check_output
from PIL import Image, ImageDraw, ImageFont
from PIL.ImageFont import FreeTypeFont
from brother_ql import BrotherQLRaster, conversion
from brother_ql.backends.helpers import send
from loguru import logger
from ..shared.Singleton import SingletonMeta
from ..shared.config import config
class Printer(metaclass=SingletonMeta):
"""a printing task for the label printer. executed at init"""
def __init__(self) -> None:
self._paper_width: str = str(config.printer.paper_width)
self._model: str = config.printer.printer_model
self._enabled: bool = config.printer.enable
@property
def _address(self) -> tp.Optional[str]:
"""Get printer USB bus address"""
try:
command: str = f'lsusb | grep "{self._model}"'
output: str = check_output(command, shell=True, text=True)
addresses: tp.List[str] = re.findall("[0-9a-fA-F]{4}:[0-9a-fA-F]{4}", output)
address: tp.List[str] = addresses[0].split(":")
bus_address: str = f"usb://0x{address[0]}:0x{address[1]}"
return bus_address
except Exception as e:
logger.warning("Could not get the printer USB bus address. The printer may be disconnected.")
logger.debug(f"An error occurred while parsing USB address: {e}")
return None
def print_image(self, image_data: tp.Union[str, bytes], annotation: tp.Optional[str] = None) -> None:
"""execute the task"""
if not all((self._enabled, self._address)):
message = "Printer disabled in config or disconnected. Task dropped."
logger.info("Printer disabled in config or disconnected. Task dropped.")
raise BrokenPipeError(message)
logger.info("Printing task created for image")
image: Image = self._get_image(image_data)
if annotation:
image = self._annotate_image(image, annotation)
self._print_image(image)
logger.info("Printing task done")
def _get_image(self, image_data: tp.Union[str, bytes]) -> Image:
"""prepare and resize the image before printing"""
if isinstance(image_data, str):
image: Image = Image.open(image_data)
else:
image = Image.open(io.BytesIO(image_data))
w, h = image.size
target_w = 696 if self._paper_width == "62" else 554
target_h = int(h * (target_w / w))
image = image.resize((target_w, target_h))
return image
def _print_image(self, image: Image) -> None:
"""print provided image"""
logger.info(f"Printing image of size {image.size}")
qlr: BrotherQLRaster = BrotherQLRaster(self._model)
red: bool = config.printer.red
conversion.convert(qlr, [image], self._paper_width, red=red)
# need to provide multiple fallbacks as the QL library us pretty unstable
# while printer keeps getting different addresses so we need to try them all
directory = "/dev/usb"
usb_devices = (f"{directory}/{desc}" for desc in os.listdir(directory) if desc.startswith("lp"))
backends = [("linux_kernel", dev) for dev in usb_devices]
backends.insert(0, ("pyusb", str(self._address)))
success = False
for backend, address in backends:
try:
status = send(
instructions=qlr.data,
backend_identifier=backend,
printer_identifier=address,
)
logger.debug(f"Printing succeeded using {backend=}, {address=}.")
logger.debug(f"Job status: {status}")
success = True
break
except Exception as e:
logger.warning(f"Execution of 'brother_ql.backends.helpers.send()' failed. {backend=}, {address=}. {e}")
if not success:
raise BrokenPipeError("Printing failed. No backend was able to to handle the task.")
@staticmethod
def _annotate_image(image: Image, text: str) -> Image:
"""add an annotation to the bottom of the image"""
# wrap the message
font: FreeTypeFont = ImageFont.truetype("src/printing/fonts/helvetica-cyrillic-bold.ttf", 24)
avg_char_width: float = mean((font.getsize(char)[0] for char in ascii_letters))
img_w, img_h = image.size
max_chars_in_line: int = int(img_w * 0.95 / avg_char_width)
wrapped_text: str = textwrap.fill(text, max_chars_in_line)
# get message size
sample_draw: ImageDraw.Draw = ImageDraw.Draw(image)
_, txt_h = sample_draw.textsize(wrapped_text, font)
# https://stackoverflow.com/questions/59008322/pillow-imagedraw-text-coordinates-to-center/59008967#59008967
txt_h += font.getoffset(text)[1]
# draw the message
annotated_image: Image = Image.new(mode="RGB", size=(img_w, img_h + txt_h + 5), color=(255, 255, 255))
annotated_image.paste(image, (0, 0))
new_img_w, new_img_h = annotated_image.size
txt_draw: ImageDraw.Draw = ImageDraw.Draw(annotated_image)
text_pos: tp.Tuple[int, int] = (
int(new_img_w / 2),
int((new_img_h - img_h) / 2 + img_h),
)
txt_draw.text(
text_pos,
wrapped_text,
font=font,
fill=(0, 0, 0),
anchor="mm",
align="center",
)
return annotated_image
|
from datetime import datetime
import re
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.schema import UniqueConstraint, CheckConstraint
from sqlalchemy.orm import validates
from validators import url as url_validator
from core.database import (
relationship,
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Model,
String,
)
class UriPrefix(Model):
"""Common prefix amount various URIs (only DOI for now).
Columns
-------
value:
String representation of the doi/uri prefix.
"""
__tablename__ = 'uri_prefix'
id = Column(Integer, primary_key=True)
value = Column(String, unique=True, nullable=False)
last_checked = Column(DateTime)
# # TODO: replace last_checked with another table - one day.
uris = relationship('Uri', backref='uri_prefix') # Child Relationships
errors = relationship('Error', backref='uri_prefix')
class Uri(Model):
"""DOI to be scraped.
Columns
-------
raw:
String representation of doi/uri.
last_checked:
datetime when scraped was triggered for this uri.
"""
__tablename__ = 'uri'
id = Column(Integer, primary_key=True)
raw = Column(String, unique=True, nullable=False)
last_checked = Column(DateTime)
errors = relationship('Error', backref='uri') # Child Relationships
events = relationship('Event', backref='uri')
urls = relationship('Url', backref='uri')
metrics = relationship('Metric', uselist=False, backref='uri')
prefix = Column(
String,
ForeignKey('uri_prefix.value', name='uri_prefix_fkey')
)
# TODO: Add validator DOIs.
@validates('raw')
def valid_uri(self, key, uri):
pattern = re.compile(r'10\.\d{4,9}/[-._;()/:A-Z0-9]+', re.I)
assert pattern.match(uri)
return uri
# FIXME: this only validates DOIs.
def __str__(self):
return self.raw
@property
def owners(self):
return self.users.all()
class Url(Model):
"""Url associated with DOI."""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(String)
uri_id = Column(Integer, ForeignKey('uri.id'), nullable=False)
@validates('url')
def url_validator(self, key, url):
assert url_validator(url.replace('_', '-'))
return url
def __str__(self):
return self.url
class Scrape(Model):
"""Keep track of when DOIs were checked for new events."""
__tablename__ = 'scrape'
id = Column(Integer, primary_key=True)
start_date = Column(DateTime, default=datetime.utcnow)
end_date = Column(DateTime, nullable=True)
# Child Relationships
errors = relationship('Error', backref='scrape')
raw_events = relationship('RawEvent', backref='scrape')
def __str__(self):
return f'<Scrape on: {self.start_date}>'
class Error(Model):
""" Keep track of failed scrapes for a given doi and origin. Only created
when scrape task exceeds max retries.
Columns:
--------
description:
Description of error.
last_successful_scrape_at:
when last successful scrape occurred. Used when looking for new events.
"""
__tablename__ = 'error'
__table_args__ = (
(
CheckConstraint(
'(uri_id IS NULL) <> (uri_prefix_id IS NULL)',
name='uri_xor_uri_prefix_required'
)
),
)
id = Column(Integer, primary_key=True)
uri_id = Column(Integer, ForeignKey('uri.id'), nullable=True)
uri_prefix_id = Column(
Integer,
ForeignKey('uri_prefix.id', name='error_uri_prefix_id_fkey'),
nullable=True,
)
scrape_id = Column(Integer, ForeignKey('scrape.id'), nullable=False)
origin = Column(Integer, nullable=True)
provider = Column(Integer, nullable=False)
description = Column(String(100))
last_successful_scrape_at = Column(DateTime, nullable=False)
def __str__(self):
return f'<Error: {self.id} - {self.description}>'
class Event(Model):
""" Hold data related to the events with unique subject ids.
Columns:
--------
origin:
The service where the event originated (e.g. Twitter).
subject_id:
identifier of the event, e.g. url of a tweet, doi of citing article.
created_at:
When this event first occurred on the origin service (specified by
the provider).
"""
__tablename__ = 'event'
id = Column(Integer, primary_key=True)
uri_id = Column(Integer, ForeignKey('uri.id'), nullable=False)
last_updated = Column(DateTime, default=datetime.utcnow)
subject_id = Column(String, nullable=False)
origin = Column(Integer, nullable=False)
created_at = Column(DateTime, nullable=False)
is_deleted = Column(Boolean, default=False)
raw_events = relationship('RawEvent', backref='event')
__table_args__ = (
UniqueConstraint('uri_id', 'subject_id'),
)
def __str__(self):
return f'<Event: {self.id} - {self.uri}>'
class RawEvent(Model): # May want to rename this (and the Event table)
""" Hold raw event data. This may be duplicated for what we would consider
to be a single 'event'. For example, if a Wikipedia page is updated it
creates an event on Crossref Event Data, but this should not add to the
metrics count. This will also store data related to event deletions.
Columns:
--------
provider:
The service we are talking to in order to retrieve the event
(e.g. Crossref Event Data API).
origin:
The service where the event originated (e.g. Twitter).
external_id:
id of the event as specified by the provider (e.g. UUID of Crossref
Event data event)
created_at:
When this event occurred on the origin service (specified by the
provider).
reason_for_deletion:
Description of why the event was deleted (applies only to RawEvent
entries that mark an Event deletion).
"""
__tablename__ = 'raw_event'
id = Column(Integer, primary_key=True)
event_id = Column(Integer, ForeignKey('event.id'), nullable=False)
scrape_id = Column(Integer, ForeignKey('scrape.id'), nullable=True)
external_id = Column(String, unique=True, nullable=True)
origin = Column(Integer, nullable=False)
provider = Column(Integer, nullable=False)
created_at = Column(DateTime, nullable=False)
reason_for_deletion = Column(String, nullable=True)
def __str__(self):
return f'<Raw Event: {self.id} - {self.event.uri}>'
class Metric(Model):
"""Sum of events for a given doi for each origin."""
__tablename__ = 'metric'
id = Column(Integer, primary_key=True)
uri_id = Column(Integer, ForeignKey('uri.id'), nullable=False)
data = Column(HSTORE)
last_updated = Column(DateTime, default=datetime.utcnow)
def __str__(self):
return f'<Metric: {self.uri}: {self.data}>'
|
import torch
import torch.nn as nn
class Lovasz(nn.Module):
"""Lovasz Loss. Cf: https://arxiv.org/abs/1705.08790 """
def __init__(self):
super().__init__()
def forward(self, inputs, targets, classes_weights, tiles_weights, config):
N, C, H, W = inputs.size()
assert C >= 2, "Classification imply at least two Classes"
assert len(classes_weights) == C, "Classes Weights mismatch Classes"
loss = 0.0
non_empty_C = 0
for c in range(C):
if classes_weights[c] == 0.0:
continue
inputs_class = inputs[:, c]
masks = (targets == c).float()
for mask, input_class, tile_weight in zip(masks.view(N, -1), inputs_class.view(N, -1), tiles_weights):
if mask.sum() == 0 and (input_class > 0.25).sum() == 0:
continue
distance = (mask - input_class).abs()
distance_sorted, indices = torch.sort(distance, 0, descending=True)
mask_sorted = mask[indices.data]
inter = mask_sorted.sum() - mask_sorted.cumsum(0)
union = mask_sorted.sum() + (1.0 - mask_sorted).cumsum(0)
iou = 1.0 - inter / union
p = len(mask_sorted)
iou[1:p] = iou[1:p] - iou[0:-1]
loss += torch.dot(distance_sorted, iou) * tile_weight * classes_weights[c]
non_empty_C += 1
return loss / N / non_empty_C
|
from datetime import datetime
from core.config import SQLITE_URL
from models.models import LinkedLists, User
from tortoise.query_utils import Q
from tortoise.contrib.fastapi import register_tortoise
def init(app):
register_tortoise(
app,
db_url=SQLITE_URL,
modules={'models': ['models.models']},
generate_schemas=True,
add_exception_handlers=True,
)
async def link_lists(sync_from, sync_to):
await LinkedLists.create(sync_from = sync_from, sync_to = sync_to)
async def unlink_list(sync_from = None, sync_to = None):
if sync_from != None and sync_to != None:
return await LinkedLists.filter(Q(sync_from = sync_from) & Q(sync_to = sync_to)).delete()
elif sync_from != None and sync_to == None:
return await LinkedLists.filter(sync_from = sync_from).delete()
elif sync_from == None and sync_to != None:
return await LinkedLists.filter(sync_to = sync_to).delete()
else:
return await LinkedLists.all().delete()
async def get_linked_lists():
linked_list = await LinkedLists.all()
return list(linked_list)
async def set_synced_date(sync_from):
await LinkedLists.filter(sync_from=sync_from).update(last_synced=datetime.now())
async def set_user(client_id, client_secret):
await User.create(client_id = client_id, client_secret = client_secret)
async def set_username(client_id, username):
await User.filter(client_id = client_id).update(client_username = username)
async def get_all_users():
users = await User.all()
return users
async def get_user_credentials(client_id, client_secret):
return await User.filter(Q(client_id = client_id) & Q(client_secret = client_secret)).first()
async def reset_user():
await User.all().delete()
|
# -*- coding: utf8 -*-
# 0123456789012345678901234567890123456789012345678901234567890123456789012345678
# 上边一行用来对比一行代码是否超过80个字符,超过换行
import tkinter as tk
import random
import codecs
root = tk.Tk(className = '范例循环模型')
root.geometry('800x400')
count = 0 # 因为点击两次show按钮才完成一个完整的英译汉或者汉译英过程,这里通过count为0或1来判断
e_or_c = 0 # e_or_c标志是英译汉还是汉译英,英译汉为0,汉译英为1
keys = list() # 存储所有的英语句子
key_num = 0 # 英语句子在key中的下标
example = {} # 通过字典这种键值对的方式存储英汉对照句子
## 每次打开程序时在文件中读出数据,通过字符串处理,存储到字典example中
f = codecs.open("elm.txt", encoding="utf8")
data = f.read()
f.close()
data = data.rstrip('\n') # 为下面的切分做准备
datas = data.split('\n')
for elm in datas:
elm_element = elm.split('%')
example[elm_element[0]] = elm_element[1]
## 点击第一次show按钮,判断是英译汉还是汉译英,在字典elmDict中随机取一对值,先显示英语或汉语;第二次点击
## 时显示答案。
def show_call_back():
"""点击show按钮时执行的函数,每点击一次显示一条英语或汉语 """
global count, keys, key_num, e_or_c
if count == 0:
keys = list(example.keys())
key_num = random.randint(0, len(keys)-1)
count += 1
if e_or_c == 0:
var_chinese.set('')
else:
var_english.set('')
else:
count -= 1
if e_or_c == 0:
var_english.set(keys[key_num])
e_or_c += 1
elif e_or_c == 1:
var_chinese.set(example[keys[key_num]])
e_or_c -= 1
## 通过改变EC的值,在英译汉与汉译英之间切换
def change_call_back():
"""点击change按钮调用的函数,在英译汉与汉译英模式直接切换"""
global e_or_c
var_english.set("English")
var_chinese.set("Chinese")
if e_or_c == 0:
e_or_c = 1
else:
e_or_c = 0
## 把文本框中的数据存入文件中
def insert_call_back():
"""点击insert按钮调用的函数,将输入的范例插入文件"""
global elmdict
entry_english_str = entry_english.get()
entry_chinese_stry = entry_chinese.get()
example[entry_english_str] = entry_chinese_stry
f = codecs.open("elm.txt", "a", encoding = "utf8")
f.write(entry_english_str + '%' + entry_chinese_stry + '\n')
f.close()
entry_english.delete(0, 'end')
entry_chinese.delete(0, 'end')
## 英文和中文显示的内容变量,调用他们的set方法就可以改变显示的内容
var_english = tk.StringVar()
var_chinese = tk.StringVar()
## 定义并放置用于显示英语汉语的标签
english_label = tk.Label(textvar = var_english, font = ("黑体",24))
english_label.pack()
chinese_label = tk.Label(textvar = var_chinese, font = ("宋体",14))
chinese_label.pack()
## 定义并放置show和change按钮
show_button = tk.Button(root, text = 'show', command = show_call_back,
background = '#00ff00')
change_button = tk.Button(root, text = 'change', command = change_call_back,
background = '#0000ff')
show_button.pack()
change_button.pack()
## 定义并放置输入英语和汉语的文本框
entry_english = tk.Entry(root, width = 80, font = ("黑体",24))
entry_english.pack()
entry_chinese = tk.Entry(root, width = 50, font = ("宋体",14))
entry_chinese.pack()
insert_button = tk.Button(root, text = 'insert', command = insert_call_back,
bg = "#ff0000") # 定义并放置insert按钮
insert_button.pack()
root.mainloop() # 启动主程序
## problem:
## 1.可能插入空白到文件中
## 2.前后点击的题目重复,体验不好
## 3.change按钮最好在点完两次show按钮后再点,否则体验不好
######################################################################
## idea:
## 1.每个合作人有自己的文件,在界面上可以通过一个下拉菜单选择或单选框
## 模拟我现在做出了一个新功能在login-feature分支上 |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import logging
import sys
from xmind2testcase.zentao import xmind_to_zentao_csv_file
from xmind2testcase.testlink import xmind_to_testlink_xml_file
from xmind2testcase.utils import get_absolute_path, xmind_testcase_to_json_file
from webtool.application import launch
"""
A tool to parse xmind file into testcase file, which will help you generate a testlink recognized
xml file or a zentao recognized cvs file, then you can import it into testlink or zentao.
Usage:
xmind2testcase [path_to_xmind_file] [-csv] [-xml] [-json]
xmind2testcase [webtool] [port_num]
Example:
xmind2testcase /path/to/testcase.xmind => output testcase.csv、testcase.xml、testcase.json
xmind2testcase /path/to/testcase.xmind -csv => output testcase.csv
xmind2testcase /path/to/testcase.xmind -xml => output testcase.xml
xmind2testcase /path/to/testcase.xmind -json => output testcase.json
xmind2testcase webtool => launch the web testcase conversion tool locally: 127.0.0.1:5001
xmind2testcase webtool 8000 => launch the web testcase conversion tool locally: 127.0.0.1:8000
"""
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s %(levelname)s [%(module)s - %(funcName)s]: %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
def cli_main():
if len(sys.argv) > 1 and sys.argv[1].endswith('.xmind'):
xmind_file = sys.argv[1]
xmind_file = get_absolute_path(xmind_file)
logging.info('Start to convert XMind file: %s', xmind_file)
if len(sys.argv) == 3 and sys.argv[2] == '-json':
testlink_json_file = xmind_testcase_to_json_file(xmind_file)
logging.info('Convert XMind file to testcase json file successfully: %s', testlink_json_file)
elif len(sys.argv) == 3 and sys.argv[2] == '-xml':
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file)
logging.info('Convert XMind file to testlink xml files successfully: %s', testlink_xml_file)
elif len(sys.argv) == 3 and sys.argv[2] == '-csv':
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
logging.info('Convert XMind file to zentao csv file successfully: %s', zentao_csv_file)
else:
testlink_json_file = xmind_testcase_to_json_file(xmind_file)
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file)
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
logging.info('Convert XMind file successfully: \n'
'1、 testcase json file(%s)\n'
'2、 testlink xml file(%s)\n'
'3、 zentao csv file(%s)',
testlink_json_file,
testlink_xml_file,
zentao_csv_file)
elif len(sys.argv) > 1 and sys.argv[1] == 'webtool':
if len(sys.argv) == 3:
try:
port = int(sys.argv[2])
launch(port=port)
except ValueError:
launch()
else:
launch()
else:
print(__doc__)
logging.error('%s', __doc__)
if __name__ == '__main__':
cli_main()
|
import logging
from elasticsearch import Elasticsearch
from crawlerx_server.settings import ELASTIC_SEARCH_USERNAME, ELASTIC_SEARCH_PASSWORD, ELASTIC_SEARCH_HOSTNAME, \
ELASTIC_SEARCH_PORT
class ELKConnection:
elk_uri = 'http://' + ELASTIC_SEARCH_USERNAME + ':' + ELASTIC_SEARCH_PASSWORD + '@' + ELASTIC_SEARCH_HOSTNAME \
+ ':' + ELASTIC_SEARCH_PORT
def __init__(self):
self.es = Elasticsearch(hosts=self.elk_uri)
logging.info("Connection between ElasticSearch has been established")
def close_connection(self):
# clean up when db is closed
self.es.close()
def get_data_from_query(self, req_index, body):
try:
if self.es.indices.exists(index=req_index):
res = self.es.search(index=req_index, body=body)
return res
except Exception as e:
logging.error("Error while getting data from the ElasticSearch" + str(e))
|
import os
PROJECT_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
DATA_DIR = os.path.join(PROJECT_ROOT, "data")
SCRAP_TMP = os.path.join(DATA_DIR, "db_dump.tmp")
def split_dict(target_dict, nums):
"""
Split a dictionary whose value is a list into multiple smaller dictionary with same key but shorter list.
E.g origin = {k1:[1,2,3,4], k2:[5,6,7]} may be splitted as p1 = {k1:[1,3],ke:[5,7]} and p2{k1:[2,4],k2:[6]}
:param target_dict:
:param nums:
:return:
"""
if nums <= 1:
return target_dict
sub_dicts = []
for i in range(nums):
sub_dicts.append(dict())
for key in target_dict:
origin_list = target_dict[key]
for item_index, item in enumerate(origin_list):
sub_dict_index = item_index % nums
sub_dict = sub_dicts[sub_dict_index]
if key not in sub_dict:
sub_dict[key] = []
sub_dict[key].append(item)
return sub_dicts
|
import time
from concurrent.futures import ThreadPoolExecutor
import buzzard as buzz
import numpy as np
def work(i):
j = i % 9 + 1
assert buzz.env.significant == 42
with buzz.Env(significant=j):
assert buzz.env.significant == j
time.sleep(np.random.rand() / 100)
assert buzz.env.significant == j
assert buzz.env.significant == 42
def test_thread_pool():
with buzz.Env(significant=42):
with ThreadPoolExecutor(10) as ex:
it = ex.map(
work,
range(100),
)
list(it)
|
from django import forms
from .models import Transaction
class TransactionForm(forms.Form):
name = forms.CharField(label='Your First name', max_length=20)
surname = forms.CharField(label='Your Last name', max_length=20)
email = forms.EmailField(label='Your e-mail', max_length=20)
price_range = forms.IntegerField(max_value= 5, min_value=0)
preferences = forms.CharField(label="Your Preferences", max_length=1000)
deals = forms.IntegerField(max_value=1, min_value=0) #1 - 5
location = forms.CharField(label="Your Preferences", max_length=200, initial="Mclean, Virginia")
# listing_date = forms.DateTimeField()
def __init__(self, *args, **kwargs):
super(TransactionForm, self).__init__(*args, **kwargs)
for key in self.fields:
self.fields['name'].required = False
self.fields['surname'].required = False
self.fields['email'].required = False
self.fields['deals'].required = False
|
import warnings
import re
from xigt.mixins import (
XigtContainerMixin,
XigtAttributeMixin
)
from xigt.errors import XigtError
# name_re approximately follows the XML 1.0 spec for Name productions,
# so long as the re character classes correspond to XML productions.
# This also excludes colons in the name (because of potential trouble
# with xml processors treating them as namespace delimiters)
# http://www.w3.org/TR/2000/WD-xml-2e-20000814#NT-Name
name_re = re.compile(r'^[^\W\d][-.\w]*$')
class XigtMetadataMixin(object):
"""
Enables the management of metadata.
"""
def __init__(self, metadata=None):
self._md = XigtContainerMixin(container=self, contained_type=Metadata)
if metadata is not None:
self.metadata = metadata
def __eq__(self, other):
try:
return self._md == other._md
except AttributeError:
return False
@property
def metadata(self):
return self._md
@metadata.setter
def metadata(self, value):
if isinstance(value, Metadata):
raise XigtError('The metadata attribute must be a sequence '
'of Metadata objects.')
self._md.clear()
self._md.extend(value)
# possibly pending deprecation
def get_meta(self, key, conditions=None, default=None, inherit=True):
if conditions is None:
conditions = []
metas = []
for metadata in self.metadata:
if metadata.type != 'xigt-meta':
continue
for meta in metadata.metas:
if meta.type == key and all(c(meta) for c in conditions):
metas.append(meta)
if metas:
return metas
elif inherit and hasattr(self, '_parent') and self._parent is not None:
return self._parent.get_meta(key, conditions, default, inherit)
else:
return default
def metadata_text_warning():
warnings.warn(
'Metadata.text is deprecated; use Metadata.metas instead.',
DeprecationWarning
)
class Metadata(XigtContainerMixin, XigtAttributeMixin):
"""
A container for metadata on XigtCorpus, Igt, or Tier objects.
Extensions may place constraints on the allowable metadata.
"""
def __init__(self, id=None, type=None, attributes=None,
text=None, metas=None, namespace=None, nsmap=None):
XigtContainerMixin.__init__(self, contained_type=Meta)
XigtAttributeMixin.__init__(
self, id=id, type=type, attributes=attributes,
namespace=namespace, nsmap=nsmap
)
if text is not None:
metadata_text_warning()
if metas is not None:
raise XigtError(
'text and metas cannot both be specified.'
)
if isinstance(text, str):
warnings.warn(
'String values of Metadata are deprecated; '
'it will be put in an untyped Meta object.',
DeprecationWarning
)
text = [Meta(text=text)]
metas = text
self.extend(metas or [])
def __repr__(self):
return '<Metadata object (id: {}) with {} Metas at {}>'.format(
str(self.id or '--'), len(self), str(id(self))
)
def __eq__(self, other):
return (
XigtContainerMixin.__eq__(self, other)
and XigtAttributeMixin.__eq__(self, other)
)
@property
def metas(self):
return list(self)
@metas.setter
def metas(self, value):
self.clear()
self.extend(value or [])
# deprecated properties
@property
def text(self):
metadata_text_warning()
return self.metas
@text.setter
def text(self, value):
metadata_text_warning()
self.metas = value
class Meta(XigtContainerMixin, XigtAttributeMixin):
def __init__(self, id=None, type=None, attributes=None, text=None,
children=None, metadata=None, namespace=None, nsmap=None):
XigtContainerMixin.__init__(self, contained_type=MetaChild)
XigtAttributeMixin.__init__(
self, id=id, type=type, attributes=attributes,
namespace=namespace, nsmap=nsmap
)
self._parent = metadata
self.text = text
self.extend(children or [])
def __repr__(self):
return '<Meta object (id: {}) at {}>'.format(
str(self.id or '--'), str(id(self))
)
def __eq__(self, other):
try:
return (
self.text == other.text
and XigtContainerMixin.__eq__(self, other)
and XigtAttributeMixin.__eq__(self, other)
)
except AttributeError:
return False
@property
def children(self):
return list(self)
@children.setter
def children(self, value):
self.clear()
self.extend(value or [])
class MetaChild(XigtContainerMixin, XigtAttributeMixin):
def __init__(self, name, attributes=None, text=None,
children=None, parent=None, namespace=None, nsmap=None):
XigtContainerMixin.__init__(self, contained_type=MetaChild)
XigtAttributeMixin.__init__(
self, id=None, type=None, attributes=attributes,
namespace=namespace, nsmap=nsmap
)
if not name_re.match(name):
raise ValueError('Invalid name for MetaChild: {}'.format(name))
self.name = name
self._parent = parent
self.text = text
self.extend(children or [])
def __repr__(self):
return '<MetaChild object (name: {}) at {}>'.format(
str(self.name or '--'), str(id(self))
)
def __eq__(self, other):
try:
return (
self.name == other.name
and self.text == other.text
and XigtContainerMixin.__eq__(self, other)
and XigtAttributeMixin.__eq__(self, other)
)
except AttributeError:
return False
@property
def children(self):
return list(self)
@children.setter
def children(self, value):
self.clear()
self.extend(value or [])
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import os
import IECore
import Gaffer
import GafferUI
##########################################################################
# MetadataValueWidgets. These display metadata values, allowing the user
# to edit them.
##########################################################################
class MetadataWidget( GafferUI.Widget ) :
def __init__( self, topLevelWidget, key, target = None, defaultValue = None, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__key = key
self.__target = None
self.__defaultValue = defaultValue
self.setTarget( target )
def setTarget( self, target ) :
assert( isinstance( target, ( Gaffer.Node, Gaffer.Plug, type( None ) ) ) )
self.__target = target
self.setEnabled( self.__target is not None )
if isinstance( self.__target, Gaffer.Node ) :
self.__metadataChangedConnection = Gaffer.Metadata.nodeValueChangedSignal( self.__target ).connect(
Gaffer.WeakMethod( self.__metadataChanged ),
scoped = True
)
elif isinstance( self.__target, Gaffer.Plug ) :
self.__metadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal( self.__target.node() ).connect(
Gaffer.WeakMethod( self.__metadataChanged ),
scoped = True
)
else :
self.__metadataChangedConnection = None
self.__update()
def getTarget( self ) :
return self.__target
def setKey( self, key ) :
if key == self.__key :
return
self.__key = key
self.__update()
def getKey( self, key ) :
return self.__key
def defaultValue( self ) :
return self.__defaultValue
## Must be implemented in derived classes to update
# the widget from the value.
def _updateFromValue( self, value ) :
raise NotImplementedError
## Must be called by derived classes to update
# the Metadata value when the widget value changes.
def _updateFromWidget( self, value ) :
if self.__target is None :
return
with Gaffer.UndoScope( self.__target.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( self.__target, self.__key, value )
## May be called by derived classes to deregister the
# metadata value.
def _deregisterValue( self ) :
if self.__target is None :
return
with Gaffer.UndoScope( self.__target.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.deregisterValue( self.__target, self.__key )
def __update( self ) :
if self.__target is None :
self._updateFromValue( self.defaultValue() )
return
v = Gaffer.Metadata.value( self.__target, self.__key )
if v is None :
k = self.__fallbackKey( self.__key )
if k is not None :
v = Gaffer.Metadata.value( self.__target, k )
self._updateFromValue( v if v is not None else self.defaultValue() )
def __metadataChanged( self, target, key, reason ) :
if key == self.__key and target == self.__target :
self.__update()
@staticmethod
def __fallbackKey( k ) :
for oldPrefix, newPrefix in [
( "pathPlugValueWidget:", "path:" ),
( "fileSystemPathPlugValueWidget:", "fileSystemPath:" ),
] :
if k.startswith( newPrefix ) :
return k.replace( newPrefix, oldPrefix )
return None
class BoolMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, defaultValue = False, **kw ) :
self.__boolWidget = GafferUI.BoolWidget()
MetadataWidget.__init__( self, self.__boolWidget, key, target, defaultValue = defaultValue, **kw )
self.__boolWidget.stateChangedSignal().connect(
Gaffer.WeakMethod( self.__stateChanged ), scoped = False
)
def _updateFromValue( self, value ) :
self.__boolWidget.setState( value )
def __stateChanged( self, *unused ) :
self._updateFromWidget( self.__boolWidget.getState() )
class StringMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, defaultValue = "", acceptEmptyString = True, **kw ) :
self.__textWidget = GafferUI.TextWidget()
MetadataWidget.__init__( self, self.__textWidget, key, target, defaultValue = defaultValue, **kw )
self.__acceptEmptyString = acceptEmptyString
self.__textWidget.editingFinishedSignal().connect(
Gaffer.WeakMethod( self.__editingFinished ), scoped = False
)
def textWidget( self ) :
return self.__textWidget
def _updateFromValue( self, value ) :
self.__textWidget.setText( str( value ) )
def __editingFinished( self, *unused ) :
text = self.__textWidget.getText()
if text or self.__acceptEmptyString :
self._updateFromWidget( text )
else :
self._deregisterValue()
class MultiLineStringMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, defaultValue = "", role = GafferUI.MultiLineTextWidget.Role.Text, **kw ) :
self.__textWidget = GafferUI.MultiLineTextWidget( role = role )
MetadataWidget.__init__( self, self.__textWidget, key, target, defaultValue = defaultValue, **kw )
self.__textWidget.editingFinishedSignal().connect(
Gaffer.WeakMethod( self.__editingFinished ), scoped = False
)
def textWidget( self ) :
return self.__textWidget
def _updateFromValue( self, value ) :
self.__textWidget.setText( str( value ) )
def __editingFinished( self, *unused ) :
self._updateFromWidget( self.__textWidget.getText() )
class ColorSwatchMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, defaultValue = imath.Color4f( 0, 0, 0, 0 ), **kw ) :
self.__swatch = GafferUI.ColorSwatch( useDisplayTransform = False )
MetadataWidget.__init__( self, self.__swatch, key, target, defaultValue = defaultValue, **kw )
self.__swatch._qtWidget().setFixedHeight( 18 )
self.__swatch._qtWidget().setMaximumWidth( 40 )
self.__swatch.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False )
def _updateFromValue( self, value ) :
self.__swatch.setColor( value )
def __buttonRelease( self, swatch, event ) :
if event.button != event.Buttons.Left :
return False
dialogue = GafferUI.ColorChooserDialogue( color = self.__swatch.getColor(), useDisplayTransform = False )
color = dialogue.waitForColor( parentWindow = self.ancestor( GafferUI.Window ) )
if color is not None :
self._updateFromWidget( color )
class MenuMetadataWidget( MetadataWidget ) :
def __init__( self, key, labelsAndValues, target = None, defaultValue = None, **kw ) :
self.__menuButton = GafferUI.MenuButton(
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) )
)
self.__labelsAndValues = labelsAndValues
self.__currentValue = None
MetadataWidget.__init__( self, self.__menuButton, key, target, defaultValue = defaultValue, **kw )
def _updateFromValue( self, value ) :
self.__currentValue = value
buttonText = str( value )
for label, value in self.__labelsAndValues :
if value == self.__currentValue :
buttonText = label
break
self.__menuButton.setText( buttonText )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
for label, value in self.__labelsAndValues :
result.append(
"/" + label,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setValue ), value = value ),
"checkBox" : value == self.__currentValue
}
)
return result
def __setValue( self, unused, value ) :
self._updateFromWidget( value )
class FileSystemPathMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, acceptEmptyString = True, defaultValue = "", **kw ) :
self.__row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
self.__path = Gaffer.FileSystemPath()
self.__pathWidget = GafferUI.PathWidget( self.__path )
MetadataWidget.__init__( self, self.__row, key, target, defaultValue = defaultValue, **kw )
self.__row.append( self.__pathWidget )
button = GafferUI.Button( image = "pathChooser.png", hasFrame=False )
button.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__row.append( button )
self.__acceptEmptyString = acceptEmptyString
self.__pathWidget.editingFinishedSignal().connect(
Gaffer.WeakMethod( self.__editingFinished ), scoped = False
)
def _updateFromValue( self, value ) :
self.__path.setFromString( str( value ) )
def __editingFinished( self, *unused ) :
text = str( self.__path )
if text or self.__acceptEmptyString :
self._updateFromWidget( text )
else :
self._deregisterValue()
def __buttonClicked( self, widget ) :
path = str( self.__path )
path = path if os.path.exists( path ) else os.path.expanduser( "~" )
dialogue = GafferUI.PathChooserDialogue( Gaffer.FileSystemPath( path ) )
chosenPath = dialogue.waitForPath( parentWindow = self.ancestor( GafferUI.Window ) )
if chosenPath is not None :
self.__path.setFromString( str( chosenPath ) )
self.__editingFinished()
|
# encoding: utf-8
#
# munki.py
# Managed Software Center
#
# Created by Greg Neagle on 2/11/10.
# Copyright 2010-2014 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''munki-specific code for use with Managed Software Center'''
import os
import stat
import subprocess
import random
import FoundationPlist
import msclog
import Foundation
from Foundation import NSDate
from Foundation import NSFileManager
from Foundation import CFPreferencesCopyAppValue
from Foundation import CFPreferencesAppSynchronize
INSTALLATLOGOUTFILE = "/private/tmp/com.googlecode.munki.installatlogout"
UPDATECHECKLAUNCHFILE = \
"/private/tmp/.com.googlecode.munki.updatecheck.launchd"
INSTALLWITHOUTLOGOUTFILE = \
"/private/tmp/.com.googlecode.munki.managedinstall.launchd"
def call(cmd):
'''Convenience function; works around an issue with subprocess.call
in PyObjC in Snow Leopard'''
proc = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, err) = proc.communicate()
return proc.returncode
def osascript(osastring):
"""Wrapper to run AppleScript commands"""
cmd = ['/usr/bin/osascript', '-e', osastring]
proc = subprocess.Popen(cmd, shell=False, bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
if proc.returncode != 0:
print >> sys.stderr, 'Error: ', err
if out:
return str(out).decode('UTF-8').rstrip('\n')
def restartNow():
'''Trigger a restart'''
osascript('tell application "System Events" to restart')
BUNDLE_ID = u'ManagedInstalls'
def reload_prefs():
"""Uses CFPreferencesAppSynchronize(BUNDLE_ID)
to make sure we have the latest prefs. Call this
if another process may have modified ManagedInstalls.plist,
this needs to be run after returning from MunkiStatus"""
CFPreferencesAppSynchronize(BUNDLE_ID)
DEFAULT_GUI_CACHE_AGE_SECS = 600
def pref(pref_name):
"""Return a preference. Since this uses CFPreferencesCopyAppValue,
Preferences can be defined several places. Precedence is:
- MCX
- ~/Library/Preferences/ManagedInstalls.plist
- /Library/Preferences/ManagedInstalls.plist
- default_prefs defined here.
"""
default_prefs = {
'ManagedInstallDir': '/Library/Managed Installs',
'InstallAppleSoftwareUpdates': False,
'AppleSoftwareUpdatesOnly': False,
'ShowRemovalDetail': False,
'InstallRequiresLogout': False,
'CheckResultsCacheSeconds': DEFAULT_GUI_CACHE_AGE_SECS,
}
pref_value = CFPreferencesCopyAppValue(pref_name, BUNDLE_ID)
if pref_value == None:
pref_value = default_prefs.get(pref_name)
#if type(pref_value).__name__ in ['__NSCFDate', '__NSDate', '__CFDate']:
# convert NSDate/CFDates to strings
#pref_value = str(pref_value)
return pref_value
WRITEABLE_SELF_SERVICE_MANIFEST_PATH = "/Users/Shared/.SelfServeManifest"
def readSelfServiceManifest():
'''Read the SelfServeManifest if it exists'''
# read our working copy if it exists
SelfServeManifest = WRITEABLE_SELF_SERVICE_MANIFEST_PATH
if not os.path.exists(SelfServeManifest):
# no working copy, look for system copy
managedinstallbase = pref('ManagedInstallDir')
SelfServeManifest = os.path.join(managedinstallbase, "manifests",
"SelfServeManifest")
if os.path.exists(SelfServeManifest):
try:
return FoundationPlist.readPlist(SelfServeManifest)
except FoundationPlist.NSPropertyListSerializationException:
return {}
else:
return {}
def writeSelfServiceManifest(optional_install_choices):
'''Write out our self-serve manifest
so managedsoftwareupdate can use it'''
usermanifest = WRITEABLE_SELF_SERVICE_MANIFEST_PATH
try:
FoundationPlist.writePlist(optional_install_choices, usermanifest)
except FoundationPlist.FoundationPlistException:
pass
def userSelfServiceChoicesChanged():
'''Is WRITEABLE_SELF_SERVICE_MANIFEST_PATH different from
the 'system' version of this file?'''
if not os.path.exists(WRITEABLE_SELF_SERVICE_MANIFEST_PATH):
return False
user_choices = FoundationPlist.readPlist(WRITEABLE_SELF_SERVICE_MANIFEST_PATH)
managedinstallbase = pref('ManagedInstallDir')
system_path = os.path.join(managedinstallbase, "manifests",
"SelfServeManifest")
if not os.path.exists(system_path):
return True
system_choices = FoundationPlist.readPlist(system_path)
return (user_choices != system_choices)
def getRemovalDetailPrefs():
'''Returns preference to control display of removal detail'''
return pref('ShowRemovalDetail')
def installRequiresLogout():
'''Returns preference to force logout for all installs'''
return pref('InstallRequiresLogout')
def getInstallInfo():
'''Returns the dictionary describing the managed installs and removals'''
managedinstallbase = pref('ManagedInstallDir')
plist = {}
installinfo = os.path.join(managedinstallbase, 'InstallInfo.plist')
if os.path.exists(installinfo):
try:
plist = FoundationPlist.readPlist(installinfo)
except FoundationPlist.NSPropertyListSerializationException:
pass
return plist
def munkiUpdatesContainAppleItems():
"""Return True if there are any Apple items in the list of updates"""
installinfo = getInstallInfo()
# check managed_installs
for item in installinfo.get('managed_installs', []):
if item.get('apple_item'):
return True
# check removals
for item in installinfo.get('removals', []):
if item.get('apple_item'):
return True
return False
def thereAreUpdatesToBeForcedSoon(hours=72):
'''Return True if any updates need to be installed within the next
X hours, false otherwise'''
installinfo = getInstallInfo().get('managed_installs', [])
installinfo.extend(getAppleUpdates().get('AppleUpdates', []))
if installinfo:
now = NSDate.date()
now_xhours = NSDate.dateWithTimeIntervalSinceNow_(hours * 3600)
for item in installinfo:
force_install_after_date = item.get('force_install_after_date')
if force_install_after_date:
try:
force_install_after_date = discardTimeZoneFromDate(
force_install_after_date)
if now_xhours >= force_install_after_date:
return True
except BadDateError:
# some issue with the stored date
pass
return False
def earliestForceInstallDate(installinfo=None):
"""Check installable packages for force_install_after_dates
Returns None or earliest force_install_after_date converted to local time
"""
earliest_date = None
if not installinfo:
installinfo = getInstallInfo().get('managed_installs', [])
installinfo.extend(getAppleUpdates().get('AppleUpdates', []))
for install in installinfo:
this_force_install_date = install.get('force_install_after_date')
if this_force_install_date:
try:
this_force_install_date = discardTimeZoneFromDate(this_force_install_date)
if not earliest_date or this_force_install_date < earliest_date:
earliest_date = this_force_install_date
except BadDateError:
# some issue with the stored date
pass
return earliest_date
class BadDateError(Exception):
pass
def discardTimeZoneFromDate(the_date):
"""Input: NSDate object
Output: NSDate object with same date and time as the UTC.
In Los Angeles (PDT), '2011-06-20T12:00:00Z' becomes
'2011-06-20 12:00:00 -0700'.
In New York (EDT), it becomes '2011-06-20 12:00:00 -0400'.
"""
try:
# get local offset
offset = the_date.descriptionWithCalendarFormat_timeZone_locale_('%z', None, None)
except:
raise BadDateError()
hour_offset = int(offset[0:3])
minute_offset = int(offset[0] + offset[3:])
seconds_offset = 60 * 60 * hour_offset + 60 * minute_offset
# return new NSDate minus local_offset
return the_date.dateByAddingTimeInterval_(-seconds_offset)
def stringFromDate(nsdate):
"""Input: NSDate object
Output: unicode object, date and time formatted per system locale.
"""
df = Foundation.NSDateFormatter.alloc().init()
df.setFormatterBehavior_(Foundation.NSDateFormatterBehavior10_4)
df.setDateStyle_(Foundation.kCFDateFormatterLongStyle)
df.setTimeStyle_(Foundation.kCFDateFormatterShortStyle)
return unicode(df.stringForObjectValue_(nsdate))
def shortRelativeStringFromDate(nsdate):
"""Input: NSDate object
Output: unicode object, date and time formatted per system locale.
"""
df = Foundation.NSDateFormatter.alloc().init()
df.setDateStyle_(Foundation.kCFDateFormatterShortStyle)
df.setTimeStyle_(Foundation.kCFDateFormatterShortStyle)
df.setDoesRelativeDateFormatting_(True)
return unicode(df.stringFromDate_(nsdate))
def startUpdateCheck(suppress_apple_update_check=False):
'''Does launchd magic to run managedsoftwareupdate as root.'''
try:
if not os.path.exists(UPDATECHECKLAUNCHFILE):
plist = {}
plist['SuppressAppleUpdateCheck'] = suppress_apple_update_check
try:
FoundationPlist.writePlist(plist, UPDATECHECKLAUNCHFILE)
except FoundationPlist.FoundationPlistException:
# problem creating the trigger file
return 1
return 0
except (OSError, IOError):
return 1
def getAppleUpdates():
'''Returns any available Apple updates'''
managedinstallbase = pref('ManagedInstallDir')
plist = {}
appleUpdatesFile = os.path.join(managedinstallbase, 'AppleUpdates.plist')
if (os.path.exists(appleUpdatesFile) and
(pref('InstallAppleSoftwareUpdates') or
pref('AppleSoftwareUpdatesOnly'))):
try:
plist = FoundationPlist.readPlist(appleUpdatesFile)
except FoundationPlist.NSPropertyListSerializationException:
pass
return plist
def humanReadable(kbytes):
"""Returns sizes in human-readable units."""
units = [(" KB", 2**10), (" MB", 2**20), (" GB", 2**30), (" TB", 2**40)]
for suffix, limit in units:
if kbytes > limit:
continue
else:
return str(round(kbytes/float(limit/2**10), 1)) + suffix
def trimVersionString(version_string):
"""Trims all lone trailing zeros in the version string after major/minor.
Examples:
10.0.0.0 -> 10.0
10.0.0.1 -> 10.0.0.1
10.0.0-abc1 -> 10.0.0-abc1
10.0.0-abc1.0 -> 10.0.0-abc1
"""
if version_string == None or version_string == '':
return ''
version_parts = version_string.split('.')
# strip off all trailing 0's in the version, while over 2 parts.
while len(version_parts) > 2 and version_parts[-1] == '0':
del(version_parts[-1])
return '.'.join(version_parts)
def getconsoleuser():
'''Get current GUI user'''
from SystemConfiguration import SCDynamicStoreCopyConsoleUser
cfuser = SCDynamicStoreCopyConsoleUser( None, None, None )
return cfuser[0]
def currentGUIusers():
'''Gets a list of GUI users by parsing the output of /usr/bin/who'''
gui_users = []
proc = subprocess.Popen("/usr/bin/who", shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output, err) = proc.communicate()
lines = str(output).splitlines()
for line in lines:
if "console" in line:
parts = line.split()
gui_users.append(parts[0])
return gui_users
def logoutNow():
'''Uses oscascript to run an AppleScript
to tell loginwindow to logout.
Ugly, but it works.'''
script = """
ignoring application responses
tell application "loginwindow"
«event aevtrlgo»
end tell
end ignoring
"""
cmd = ['/usr/bin/osascript', '-e', script]
result = call(cmd)
def logoutAndUpdate():
'''Touch a flag so the process that runs after
logout knows it's OK to install everything'''
try:
if not os.path.exists(INSTALLATLOGOUTFILE):
open(INSTALLATLOGOUTFILE, 'w').close()
logoutNow()
except (OSError, IOError):
return 1
def clearLaunchTrigger():
'''Clear the trigger file that fast-launches us at loginwindow.
typically because we have been launched in statusmode at the
loginwindow to perform a logout-install.'''
try:
if os.path.exists(INSTALLATLOGOUTFILE):
os.unlink(INSTALLATLOGOUTFILE)
except (OSError, IOError):
return 1
def justUpdate():
'''Trigger managedinstaller via launchd KeepAlive path trigger
We touch a file that launchd is is watching
launchd, in turn,
launches managedsoftwareupdate --installwithnologout as root'''
try:
if not os.path.exists(INSTALLWITHOUTLOGOUTFILE):
open(INSTALLWITHOUTLOGOUTFILE, 'w').close()
return 0
except (OSError, IOError):
return 1
def pythonScriptRunning(scriptname):
"""Returns Process ID for a running python script"""
cmd = ['/bin/ps', '-eo', 'pid=,command=']
proc = subprocess.Popen(cmd, shell=False, bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, dummy_err) = proc.communicate()
mypid = os.getpid()
lines = str(out).splitlines()
for line in lines:
try:
(pid, process) = line.split(None, 1)
except ValueError:
# funky process line, so we'll skip it
pass
else:
args = process.split()
try:
# first look for Python processes
if (args[0].find('MacOS/Python') != -1 or
args[0].find('python') != -1):
# look for first argument being scriptname
if args[1].find(scriptname) != -1:
try:
if int(pid) != int(mypid):
return pid
except ValueError:
# pid must have some funky characters
pass
except IndexError:
pass
# if we get here we didn't find a Python script with scriptname
# (other than ourselves)
return 0
def getRunningProcessesWithUsers():
"""Returns a list of usernames and paths of running processes"""
proc_list = []
proc = subprocess.Popen(['/bin/ps', '-axo' 'user=,comm='],
shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, dummy_err) = proc.communicate()
if proc.returncode == 0:
proc_lines = [item for item in output.splitlines()]
LaunchCFMApp = ('/System/Library/Frameworks/Carbon.framework'
'/Versions/A/Support/LaunchCFMApp')
saw_launch_cfmapp = False
for line in proc_lines:
# split into max two parts on whitespace
parts = line.split(None, 1)
if len(parts) > 1 and parts[1] == LaunchCFMApp:
saw_launch_cfmapp = True
elif len(parts) > 1:
info = {'user': parts[0],
'pathname': parts[1]}
proc_list.append(info)
if saw_launch_cfmapp:
# look at the process table again with different options
# and get the arguments for LaunchCFMApp instances
proc = subprocess.Popen(['/bin/ps', '-axo' 'user=,command='],
shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, dummy_err) = proc.communicate()
if proc.returncode == 0:
proc_lines = [item for item in output.splitlines()]
for line in proc_lines:
# split into max three parts on whitespace
parts = line.split(None, 2)
if len(parts) > 2 and parts[1] == LaunchCFMApp:
info = {'user': parts[0],
'pathname': parts[2]}
proc_list.append(info)
return proc_list
else:
return []
def getRunningBlockingApps(appnames):
"""Given a list of app names, return a list of dicts for apps in the list
that are running. Each dict contains username, pathname, display_name"""
proc_list = getRunningProcessesWithUsers()
running_apps = []
filemanager = NSFileManager.alloc().init()
for appname in appnames:
matching_items = []
if appname.startswith('/'):
# search by exact path
matching_items = [item for item in proc_list
if item['pathname'] == appname]
elif appname.endswith('.app'):
# search by filename
matching_items = [item for item in proc_list
if '/'+ appname + '/Contents/MacOS/' in item['pathname']]
else:
# check executable name
matching_items = [item for item in proc_list
if item['pathname'].endswith('/' + appname)]
if not matching_items:
# try adding '.app' to the name and check again
matching_items = [item for item in proc_list
if '/' + appname + '.app/Contents/MacOS/' in item['pathname']]
#matching_items = set(matching_items)
for item in matching_items:
path = item['pathname']
while '/Contents/' in path or path.endswith('/Contents'):
path = os.path.dirname(path)
# ask NSFileManager for localized name since end-users
# will see this name
item['display_name'] = filemanager.displayNameAtPath_(path)
running_apps.append(item)
return running_apps
def getPowerInfo():
'''Returns power info in a dictionary'''
power_dict = {}
power_dict['PowerSource'] = 'Unknown Power'
power_dict['BatteryCharge'] = -1
power_dict['ChargingStatus'] = 'unknown'
power_dict['TimeRemaining'] = -1
cmd = ['/usr/bin/pmset', '-g', 'ps']
proc = subprocess.Popen(cmd, bufsize=-1, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(output, dummy_error) = proc.communicate()
if proc.returncode:
# handle error
return power_dict
#
# output from `pmset -g ps` looks like:
#
# Currently drawing from 'AC Power'
# -InternalBattery-0 100%; charged; 0:00 remaining
#
# or
#
# Currently drawing from 'AC Power'
# -InternalBattery-0 98%; charging; 0:08 remaining
#
# or
#
# Currently drawing from 'Battery Power'
# -InternalBattery-0 100%; discharging; (no estimate)
#
# or
#
# Currently drawing from 'Battery Power'
# -InternalBattery-0 100%; discharging; 5:55 remaining
#
line = output.splitlines()
if 'AC Power' in line[0]:
power_dict['PowerSource'] = 'AC Power'
power_dict['ChargingStatus'] = 'not applicable'
if 'Battery Power' in line[0]:
power_dict['PowerSource'] = 'Battery Power'
if len(line) > 1:
part = line[1].split()
try:
power_dict['BatteryCharge'] = int(part[1].rstrip('%;'))
except (IndexError, ValueError):
pass
try:
power_dict['ChargingStatus'] = part[2].rstrip(';')
except IndexError:
pass
try:
time_remaining_text = part[3]
time_part = time_remaining_text.split(':')
minutes = 60 * int(time_part[0]) + int(time_part[1])
power_dict['TimeRemaining'] = minutes
except (IndexError, ValueError):
pass
return power_dict
|
'''
Created on 2018年10月30日
@author: yinyayun
'''
from loader.hook.DynamicLoadByHooks import DynamicLoadByHooks
if __name__ == '__main__':
base = '../package/demo'
dynamic = DynamicLoadByHooks(base)
print("-----create 001.ImportDemo")
ins1 = dynamic.create_ins('001', 'ImportDemo', 'ImportDemo')
print(ins1.zz())
print("-----create 002.ImportDemo")
ins2 = dynamic.create_ins('002', 'ImportDemo', 'ImportDemo')
print(ins2.zz())
print('run ins1:', ins1.zz())
print('run ins2:', ins2.zz())
|
import argparse
class VerboseStore(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError('nargs not allowed')
super(VerboseStore, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
print('Here I am, setting the '
'values %r for the %r option...' % (values, option_string))
setattr(namespace, self.dest, values)
my_parser = argparse.ArgumentParser()
my_parser.add_argument('-i', '--input', action=VerboseStore, type=int)
args = my_parser.parse_args()
print(vars(args))
|
# author: Bartlomiej "furas" Burek (https://blog.furas.pl)
# date: 2022.03.11
# [Scraping Investopedia using selenium python - Stack Overflow](https://stackoverflow.com/questions/71443533/scraping-investopedia-using-selenium-python/71444720#71444720)
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
#from webdriver_manager.firefox import GeckoDriverManager
import time
# --- functions --- # PEP8: `lower_case_names`
def login():
driver.get(r'https://www.investopedia.com/simulator/home.aspx')
driver.implicitly_wait(10)
driver.find_element(By.ID, 'username').send_keys('xxx@xxx.com')
time.sleep(0.5)
driver.find_element(By.ID, 'password').send_keys('hello-world')
time.sleep(0.5)
driver.find_element(By.ID, 'login').click()
def get_trade_page():
url = 'https://www.investopedia.com/simulator/trade/stocks'
driver.get(url)
def set_stock(ticker):
driver.find_element(By.XPATH, '//input[@placeholder="Look up Symbol/Company Name"]').send_keys(ticker)
#driver.find_element(By.XPATH, '//div[@role="option"]').click()
option = driver.find_element(By.XPATH, '//div[@role="option"]')
driver.execute_script('arguments[0].click()', option)
# --- main ---
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install())
#driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())
login()
get_trade_page()
set_stock('hvt')
#driver.close()
|
zhihu_headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'referer': 'https://www.zhihu.com/',
'cookie' : '会出错呦'
} |
"""
title : node.py
description : Model of the node
author : Amanda Garcia-Garcia
version : 0
python_version : 3.6.1
"""
import Crypto
import binascii
from db import db
from Crypto.Hash import SHA, SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5, PKCS1_PSS
from flask import request
NodeBlockchain = db.Table('nodes_blockchains',
db.Column('id', db.Integer, primary_key=True),
db.Column('node_id', db.Integer, db.ForeignKey('nodes.id')),
db.Column('blockchain_id', db.Integer,
db.ForeignKey('blockchains.id_blockchain')))
class NodeModel(db.Model):
__tablename__ = 'nodes'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
ip = db.Column(db.String, nullable=False)
#password = db.Column(db.String)
nonce_challenge = db.Column(db.String)
challenge_created = db.Column(db.Boolean)
node_token = db.Column(db.String)
node_refresh_token = db.Column(db.String)
node_private_key = db.Column(db.String)
node_public_key = db.Column(db.String)
blockchains = db.relationship("BlockchainModel", secondary=NodeBlockchain,
backref=db.backref('nodesin', lazy='dynamic'))
def create_key_pair(self):
random_gen = Crypto.Random.new().read
node_private_key = RSA.generate(1024, random_gen)
node_public_key = node_private_key.publickey()
self.node_public_key = binascii.hexlify(node_public_key.exportKey(format='DER')).decode('ascii')
self.node_private_key = binascii.hexlify(node_private_key.exportKey(format='DER')).decode('ascii')
def sign_response(self, response):
"""
Sign transaction with private key
"""
private_key = RSA.importKey(binascii.unhexlify(self.node_private_key))
signer = PKCS1_v1_5.new(private_key)
h = SHA.new(str(response).encode('utf8'))
return binascii.hexlify(signer.sign(h)).decode('ascii')
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
@classmethod
def find_by_ip(cls, _ip):
return cls.query.filter_by(ip=_ip).first()
@classmethod
def find_by_public_key(cls, node_public_key):
return cls.query.filter_by(node_public_key=node_public_key).first()
@classmethod
def nodes_in_a_blockchain_2(cls, blockchain_name):
result = db.session.query(cls.ip).filter(cls.blockchains.any(blockchain_name=blockchain_name)).all()
message = {'nodes': [ips[0] for ips in result]}
return message
@classmethod
def verify_public_key(cls, node_public_key):
query_result = cls.query.filter_by(node_public_key=node_public_key).first()
if query_result is None:
return False
else:
return True
def save_to_db(self):
db.session.add(self)
db.session.commit()
|
from tests import ScraperTest
from recipe_scrapers.nihhealthyeating import NIHHealthyEating
# test recipe's URL
# https://healthyeating.nhlbi.nih.gov/recipedetail.aspx?linkId=17&cId=10&rId=247
class TestNIHHealthyEatingRecipesScraper(ScraperTest):
scraper_class = NIHHealthyEating
def test_host(self):
self.assertEqual(
'healthyeating.nhlbi.nih.gov',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
'Vietnamese Fresh Spring Rolls (Gỏi Cuốn)'
)
def test_total_time(self):
self.assertEqual(
15,
self.harvester_class.total_time()
)
def test_yields(self):
self.assertEqual(
"8 serving(s)",
self.harvester_class.yields()
)
def test_ingredients(self):
self.assertCountEqual(
[
'1 C carrots, cut into long, thin strips',
'2 C bean sprouts',
'2 C cucumber, seeded and cut into long, thin strips',
'1 C minced scallions',
'½ C chopped fresh cilantro',
'¼ C chopped fresh mint',
'8 rice paper wrappers'
],
self.harvester_class.ingredients()
)
def test_instructions(self):
return self.assertEqual(
'Toss first six ingredients in a large bowl.\nSoak one rice paper wrapper in warm water until soft (1 to 2 minutes). Shake off excess water.\nPlace vegetable filling off-center on rice paper, and fold like an egg roll (tuck in the sides to keep the filling inside).\nRepeat with remaining vegetable filling and rice paper wrappers.\nOnce you have assembled all of the spring rolls, serve immediately.',
self.harvester_class.instructions()
)
|
# -*- coding: utf-8 -*-
"""
The socket api module.
======================
The module has the SocketApi class, that is a underlying class for socket.socket object.
"""
import socket
import struct
from . import protocol
from .utils import to_bytes
class SocketApi(object):
"""
The api to handle socket operations, as:
* properly sending/receiving data.
* properly close the connection.
* verify if connection is alive.
* receive data with timeout.
"""
def __init__(self, sock):
"""
Instantiate the api.
:param sock: The socket underlay.
"""
if type(sock) is not type(socket.socket()):
raise TypeError('Argument socket must be a socket, not {}'.format(sock.__class__.__name__))
self.__socket = sock
def send(self, data):
"""
Send data through socket.
:param data: The data content as bytes-like object.
"""
# Convert data to bytes.
data = to_bytes(data)
# Get the total length in bytes of the request.
lenght = struct.pack('>Q', len(data))
# Send the length of the request.
self.__socket.send(lenght)
# Actually send the request.
self.__socket.sendall(data)
# Receive socket ACk.
self.__recv_ack()
def recv(self, timeout=None):
"""
Receive response from socket.
:param timeout: The max time in seconds the socket will listen.
:return: The response class.
"""
def reader():
"""
Read data from socket and return the result.
:return: The content in bytes.
"""
is_syn_request = True
while is_syn_request:
# Read the lenght in bytes of the response.
(lenght,) = struct.unpack('>Q', self.__socket.recv(8))
# The amount of data in bytes.
data = b''
while lenght > len(data):
# Bytes left.
to_read = lenght - len(data)
# Append data to socket. The buffer size of the data received depends on how
# much data we have to receive. Once the data to receive is lower than the
# default buffer size, we will only receive the remaining data.
data += self.__socket.recv(to_read if to_read < self.getsocket_bufer()[1]
else self.getsocket_bufer()[1])
if data.decode() == str(protocol.EXIT):
# Send ACk to socket.
self.__socket.sendall(struct.pack('>Q', protocol.ACK))
return None
if data.decode() != str(protocol.SYN):
# The request is not SYN.
is_syn_request = False
# Send ACk to socket.
self.__socket.sendall(struct.pack('>Q', protocol.ACK))
return data
if timeout:
# Attempt to read content from socket with a timeout set.
content = self._wrap_timeout(reader, timeout)
# Socket has timedout.
if not content:
return False
else:
# Read the content.
content = reader()
return content.decode()
def getsocket_bufer(self):
"""
Return the lenght of the buffer.
:return: A tuple with the send and receive buffer size.
"""
return (self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF),
self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF))
def close(self):
"""Close the socket connection."""
if self.is_alive():
exit_code = str(protocol.EXIT).encode()
# Get the close connection length.
lenght = struct.pack('>Q', len(exit_code))
# Send the signal to clone connection.
self.__socket.send(lenght)
# Actually send the exit message.
self.__socket.sendall(exit_code)
# Shoul receive a acklodgment.
self.__recv_ack()
# Actually close the socket file descriptor.
self.__socket.close()
def is_alive(self):
"""
Determine whether the socket connetion is alive.
:return: A boolean value.
"""
def checker():
"""
Verify whether the socket connection is good.
"""
# Encode the SYN code.
syn = str(protocol.SYN).encode()
# Get the total lenght of NULL request
lenght = struct.pack('>Q', len(syn))
try:
# Send the lenght of the request.
self.__socket.send(lenght)
# Actually send the request content.
self.__socket.sendall(syn)
# Receive acknoledge.
self.__recv_ack()
except (BrokenPipeError, ConnectionError, AcknoledgeNotReceivedExecption):
return False
return True
return self._wrap_timeout(checker, 5)
def _wrap_timeout(self, function, timeout):
"""
Execute a socket operation with timeout.
:param function: The function to call.
:param timeout: The socket timeout in seconds.
:return: The content of operation. Returns false if socket timed out.
"""
self.__socket.settimeout(timeout)
try:
content = function()
except (socket.timeout, struct.error):
# Socket has timed out.
content = False
# Adjust socket to no timeout.
self.__socket.settimeout(None)
return content
def __recv_ack(self):
"""Receive acknoledge from socket."""
# Receive socket acknoledge.
(ack,) = struct.unpack('>Q', self.__socket.recv(8))
if ack is not protocol.ACK:
raise AcknoledgeNotReceivedExecption('Socket returned invalid ACK.')
class AcknoledgeNotReceivedExecption(socket.error):
def __init__(self, message):
self.message = message
def __repr__(self):
return str(self.message)
|
import traceback
class ActionBase(object):
def __init__(self, *args, **kwargs):
pass
def _setup(self):
pass
def _execute(self):
pass
def _cleanup(self):
pass
def run(self):
try:
self._setup()
result = self._execute()
self._cleanup()
except:
result = False
traceback.print_exc()
return result |
# pylint: disable=unused-variable
from cv2 import cv2
import numpy as np
import os
import time
import random
import sys
from tqdm import tqdm
import json
from core.match_func import sift
from core.match_func import match_template as match
import threading
class base_unit():
def __init__(self, device, templates):
self.device = device
self.templates = templates
self.screenshot = None
self.screen_muti = int(self.get_screen(detect=True))
def get_screen(self, mode=0, detect=False):
image = cv2.imdecode(np.fromstring(
bytes(self.device.screencap()), np.uint8), cv2.IMREAD_COLOR)
if detect:
return image.shape[0]/540
else:
if self.screen_muti != 1:
self.screenshot = cv2.resize(image, (960, 540))
else:
self.screenshot = image
if mode == 1:
self.screenshot = cv2.cvtColor(self.screenshot, cv2.COLOR_BGR2GRAY)
def compare(self, imgs, screenshot=None, crop=None, mode=0, debug=False):
if screenshot is None:
if mode == 1:
self.get_screen(mode=1)
else:
self.get_screen()
if crop is not None:
self.screenshot = self.screenshot[int(crop['y']):int(crop['y'])+int(crop['height']),
int(crop['x']):int(crop['x'])+int(crop['width'])]
if isinstance(imgs, dict):
if mode == 1:
for name in imgs:
pos = sift(imgs[name], self.screenshot, debug=debug)
if isinstance(pos, list):
return [name, np.int32(pos)]
else:
for name in imgs:
pos = match(imgs[name], self.screenshot, debug=debug)
if isinstance(pos, list):
return [name, np.int32(pos)]
else:
if mode == 1:
pos = sift(imgs, self.screenshot, debug=debug)
if isinstance(pos, list):
pos = np.int32(pos)
return ["", [pos[0], pos[1]]]
else:
pos = match(imgs, self.screenshot, debug=debug)
if isinstance(pos, list):
pos = np.int32(pos)
return ["", [pos[0], pos[1]]]
return False
def standby(self, img, coordinate=None, mode=0, tap=True, disapper=False, debug=False):
flag = False
if isinstance(img, list):
template = dict()
for name in img:
template[name] = self.templates[name]
else:
template = self.templates[img]
if not disapper:
while True:
flag = self.compare(template, mode=mode, debug=debug)
if not isinstance(flag, bool):
break
if coordinate is not None:
self.tap([coordinate[0], coordinate[1]])
if tap:
self.tap(flag[1])
else:
return flag
else:
while True:
flag = self.compare(template, mode=mode, debug=debug)
if isinstance(flag, bool):
break
if coordinate is not None:
self.tap([coordinate[0], coordinate[1]])
def tap(self, pos):
pos = np.multiply(self.screen_muti, pos)
self.device.shell("input tap {} {}".format(pos[0], pos[1]))
def swipe(self, pos1, pos2, delay=1000):
pos1 = np.multiply(self.screen_muti, pos1)
pos2 = np.multiply(self.screen_muti, pos2)
self.device.shell(
"input swipe {} {} {} {} {}".format(pos1[0], pos1[1], pos2[0], pos2[1], delay))
class worker(base_unit):
def __init__(self, root, device, templates, name, times: int, apple: str, count: int, team: int, support, recover: int, progress_bar):
self.root = root
super().__init__(device, templates)
self.name = name
self.max_times = int(times)
self.runtimes = 0
self.apple = apple
if apple != '':
self.maxapple = int(count)
self.count = int(count)
else:
self.maxapple = 0
self.count = 0
self.use = 0
self.team = team
self.recover = recover
self.friend = self.get_friend(support)
self.button = self.get_button()
self.pbar = progress_bar
def get_button(self):
with open('{}/UserData/button.json'.format(self.root), newline='') as jsonfile:
return json.load(jsonfile)
def timecal(self, time):
if time > 60:
return "{} 分鐘".format(round(time/60, 1))
else:
return "{} 秒鐘".format(round(time, 1))
def enter_stage(self, total, singel, quit=False):
os.system("cls")
result = self.standby(["continue", "menu"], tap=False)
if self.apple == "quartz":
applestr = "聖晶石"
elif self.apple == "goldden":
applestr = "金蘋果"
elif self.apple == "silver":
applestr = "銀蘋果"
elif self.apple == "copper":
applestr = "銅蘋果"
elif self.apple == "":
applestr = "自然回體"
print("使用中的腳本:{}".format(self.name))
with tqdm(total=self.max_times, desc="腳本進度", bar_format="{{desc:}}{{percentage:3.0f}}%|{{bar:20}}|完成: {}/ {}".format(self.runtimes-1, self.max_times)) as rbar:
rbar.update(self.runtimes-1)
print("回體方式: {} /額度: {} /已使用: {} /剩餘: {}".format(
applestr, self.maxapple, self.use, self.count))
print("總運行時間為: {} /上回循環時間為: {}".format(
self.timecal(total), self.timecal(singel)))
if quit:
if result[0] == "continue":
self.standby("close")
print("====================================================")
print("[EXIT]腳本運行完成!!")
exit()
else:
print("===============================================")
print("[INFO]準備開始第 {} 輪".format(self.runtimes))
self.pbar.display()
if result[0] == "menu":
state = result[0]
self.tap((750, 160))
else:
state = result[0]
self.tap(result[1])
print("\r\x1b[2K", end='')
print("[INFO]嘗試進入關卡")
self.pbar.reset()
result = self.standby(["noap", "select_friend"], tap=False)
if result[0] == "noap":
print("\r\x1b[2K", end='')
print("[Waring]體力耗盡")
self.pbar.display()
if self.count > 0:
self.tap(self.button["apple"][self.apple])
self.standby("confirm")
self.count -= 1
self.use += 1
print("\r\x1b[2K", end='')
if self.apple == "quartz":
print("[INFO]使用聖晶石回體!")
elif self.apple == "goldden":
print("[INFO]使用金蘋果回體!")
elif self.apple == "silver":
print("[INFO]使用銀蘋果回體!")
elif self.apple == "copper":
print("[INFO]使用銅蘋果回體!")
else:
self.tap((470, 470))
print("\r\x1b[2K", end='')
print("[INFO]等待回體中...")
start_time = time.time()
end_time = time.time()
while not int(end_time-start_time) >= int(self.recover)*60:
remain = round(
int(self.recover) - float(int(end_time-start_time)/60), 1)
if remain >= 60:
print("\x1b[1A\r\x1b[2K", end='')
print("[INFO]等待回體中...,剩餘 {} 分鐘".format(remain))
else:
print("\x1b[1A\r\x1b[2K", end='')
print("[INFO]等待回體中...,剩餘 {} 秒鐘".format(remain))
for i in range(30):
end_time = time.time()
if int(end_time-start_time) >= int(self.recover)*60:
break
time.sleep(1)
state = self.enter_stage(total, singel)
self.pbar.update(1)
return state
def skill(self, position: int, skill: int, target=None):
self.standby("attack", tap=False, coordinate=(670, 25))
self.tap(self.button["servert{}".format(position)]
["skill{}".format(skill)])
print("\r\x1b[2K", end='')
print("[BATTLE]使用從者 {} 技能 {} ".format(
position, skill), end='')
if target is not None:
print("附加給從者 {}".format(target))
self.standby("select", tap=False)
self.tap(self.button["servert{}".format(target)]["locate"])
else:
print("")
self.pbar.update(1)
def attack(self, first=None, second=None, third=None):
self.standby("attack", coordinate=(670, 25))
print("\r\x1b[2K", end='')
print("[BATTLE]準備使用指令卡")
self.pbar.display()
time.sleep(2)
select = [first, second, third]
card = ""
for i in range(len(select)):
if select[i] is None:
rnd = random.randrange(1, 6)
while rnd in select:
rnd = random.randrange(1, 6)
select[i] = rnd
card += "指令卡 {}/".format(rnd)
self.tap(self.button["card"]["{}".format(rnd)])
else:
if select[i] > 5:
card += "寶具 {}/".format(int(select[i])-5)
else:
card += "指令卡 {}/".format(select[i])
self.tap(self.button["card"]["{}".format(select[i])])
print("\r\x1b[2K", end='')
print("[BATTLE]使用 {}".format(card))
self.pbar.update(1)
def master(self, skill, target=None):
self.standby("attack", tap=False, coordinate=(670, 25))
print("\r\x1b[2K", end='')
print("[MASTER]準備使用御主技能")
self.pbar.display()
self.tap(self.button["master"]["locate"])
time.sleep(1)
print("\r\x1b[2K", end='')
print("[MASTER]使用御主技能 {}".format(skill), end='')
self.tap(self.button["master"]["skill{}".format(skill)])
if target is not None:
print("附加給從者 {}".format(target))
self.standby("select", tap=False)
self.tap(self.button["servert{}".format(target)]["locate"])
else:
print("")
self.pbar.update(1)
def change(self, front: int, back: int):
self.standby("attack", tap=False, coordinate=(670, 25))
print("\r\x1b[2K", end='')
print("[Change]準備更換角色")
self.pbar.display()
self.tap(self.button["master"]["locate"])
time.sleep(1)
self.tap(self.button["master"]["skill3"])
self.standby("order_change", tap=False)
print("\r\x1b[2K", end='')
print("[Change]前排 {} 號從者,更換成後排 {} 號從者".format(front, back))
self.tap(self.button["change"]["{}".format(front)])
self.tap(self.button["change"]["{}".format(back+3)])
self.tap(self.button["change"]["confirm"])
self.pbar.update(1)
def start_battle(self, total, singel):
self.runtimes += 1
if self.runtimes > self.max_times:
self.enter_stage(total, singel, quit=True)
else:
state = self.enter_stage(total, singel)
self.select_friend()
if state == "menu":
start = self.standby("start", tap=False)
select = self.compare(self.templates["yello_dot"], self.screenshot,
crop=self.button["team_select"]["{}".format(self.team)])
if isinstance(select, bool):
self.tap(self.button["team"]["{}".format(self.team)])
time.sleep(1)
self.standby("start")
print("\r\x1b[2K", end='')
print("[BATTLE]進入關卡")
self.pbar.update(1)
def finish_stage(self):
print("\r\x1b[2K", end='')
print("[Finish]等待下一步...")
self.pbar.update(1)
self.standby("next", coordinate=(670, 25))
result = self.standby(["continue", "menu", "friendrequest"], tap=False)
if result[0] == "friendrequest":
print("\r\x1b[2K", end='')
print("[Finish]拒絕好友申請")
self.pbar.display()
self.tap([250, 465])
print("\r\x1b[2K", end='')
print("[Finish]完成關卡!!")
def get_friend(self, support):
support_path = os.path.join(self.root, "UserData")
support_path = os.path.join(support_path, "support")
if os.path.isfile(os.path.join(support_path, support)):
return cv2.imread(os.path.join(support_path, support))
else:
friend = dict()
support_dict = os.path.join(support_path, support)
for img in os.listdir(support_dict):
friend[img.split('.')[0]] = cv2.imread(
os.path.join(support_dict, img))
return friend
def select_friend(self):
have_bar = False
found = False
bar_crop = {'x': 910, 'y': 140, 'width': 50, 'height': 400}
print("\r\x1b[2K", end='')
print("[Support]開始選擇Support角色")
self.pbar.display()
while not found:
have_bar = self.compare(self.templates["bar"], crop=bar_crop)
if isinstance(have_bar, list):
have_bar = False
else:
have_bar = True
result = self.standby(["no_friend", "friend_bar"], tap=False)
if result[0] == "no_friend":
print("\r\x1b[2K", end='')
print("[Support]沒有符合條件好友,將更新列表")
self.pbar.display()
self.standby("update")
self.standby("refresh")
self.standby("dis_refresh", disapper=True)
else:
result = self.compare(self.friend)
if isinstance(result, list):
print("\r\x1b[2K", end='')
print("[Support]發現符合好友角色!")
self.tap(result[1])
self.pbar.update(1)
found = True
else:
if have_bar:
while True:
self.swipe((100, 440), (100, 180))
time.sleep(1)
result = self.compare(self.friend)
if isinstance(result, list):
print("\r\x1b[2K", end='')
print("[Support]發現符合好友角色!")
self.tap(result[1])
self.pbar.update(1)
found = True
break
else:
end_crop = {'x': 925, 'y': 520,
'width': 35, 'height': 20}
result = self.compare(
self.templates["friendEnd"], self.screenshot, crop=end_crop)
if isinstance(result, list):
print("\r\x1b[2K", end='')
print("[Support]好友列表已經至底,將更新列表")
self.pbar.display()
self.standby("update")
self.standby("refresh")
self.standby("dis_refresh", disapper=True)
break
else:
print("\r\x1b[2K", end='')
print("[Support]沒有符合條件好友,將更新列表")
self.pbar.display()
self.standby("update")
self.standby("refresh")
self.standby("dis_refresh", disapper=True)
class box(base_unit):
def __init__(self, device, templates):
super().__init__(device, templates)
self.status = "stop"
self.result = None
def gacha_tap(self):
while True:
if self.status not in ["full", "complete"]:
self.tap([300, 330])
else:
break
def box_gacha(self):
self.status = "start"
job = threading.Thread(target=self.gacha_tap)
job.start()
self.result = self.standby(["reset", "box_full"], tap=False)
if self.result[0] == "box_full":
self.status = "full"
self.tap(self.result[1])
print("禮物箱已滿!!")
job.join()
return True
else:
self.status = "complete"
time.sleep(0.5)
# self.tap(self.result[1])
print("完成!!")
# self.standby("execute")
# self.standby("close")
job.join()
return False
|
import logging
from os import getenv, environ
import boto3
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
DYNAMODB_TABLE: str = environ["DYNAMODB_TABLE"]
AWS_DEFAULT_REGION: str = getenv("AWS_DEFAULT_REGION", "us-east-2")
LIMIT_OUTPUT: int = int(getenv("LIMIT_OUTPUT", 8))
LATITUDE: float = float(getenv("LATITUDE")) if getenv("LATITUDE") else None
LONGITUDE: float = float(getenv("LONGITUDE")) if getenv("LONGITUDE") else None
APPID: str = getenv("APPID")
dynamodb = boto3.resource("dynamodb", region_name=AWS_DEFAULT_REGION)
table = dynamodb.Table(DYNAMODB_TABLE)
DATE_FORMAT = "%Y-%m-%d"
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
|
from django.db import models
from django.core.validators import MinValueValidator
class Topping(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class Pizza(models.Model):
name = models.CharField(max_length=255, unique=True)
price = models.DecimalField(max_digits=5, decimal_places=2,
validators=[MinValueValidator(0)])
toppings = models.ManyToManyField(Topping)
votes = models.IntegerField(default=0)
def count_toppings(self):
return self.toppings.count()
def __str__(self):
return self.name
|
# Copyright (c) 2020 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
from os import getenv
from datetime import datetime
from retrying import retry
from rest3client import RESTclient
from requests.exceptions import HTTPError
from requests.exceptions import ChunkedEncodingError
logger = logging.getLogger(__name__)
logging.getLogger('urllib3.connectionpool').setLevel(logging.CRITICAL)
HOSTNAME = 'api.github.com'
VERSION = 'v3'
DEFAULT_PAGE_SIZE = 30
DEFAULT_GRAPHQL_PAGE_SIZE = 100
class GraphqlRateLimitError(Exception):
""" GraphQL Rate Limit Error
"""
pass
class GraphqlError(Exception):
""" GraphQL Error
"""
pass
class GitHubAPI(RESTclient):
""" An advanced REST client for the GitHub API
"""
def __init__(self, **kwargs):
logger.debug('executing GitHubAPI constructor')
hostname = kwargs.pop('hostname', HOSTNAME)
self.version = kwargs.pop('version', VERSION)
super(GitHubAPI, self).__init__(hostname, **kwargs)
def get_response(self, response, **kwargs):
""" subclass override to including logging of ratelimits
"""
ratelimit = GitHubAPI.get_ratelimit(response.headers)
if ratelimit:
self.log_ratelimit(ratelimit)
return super(GitHubAPI, self).get_response(response, **kwargs)
def get_headers(self, **kwargs):
""" return headers to pass to requests method
"""
headers = super(GitHubAPI, self).get_headers(**kwargs)
headers['Accept'] = f'application/vnd.github.{self.version}+json'
return headers
def total(self, endpoint):
""" return total number of resources
"""
# logger.debug(f'get total number of resources at endpoint {endpoint}')
if 'per_page' in endpoint:
raise ValueError(f'endpoint {endpoint} with per_page argument is not supported')
if '?' in endpoint:
endpoint = f'{endpoint}&per_page=1'
else:
endpoint = f'{endpoint}?per_page=1'
response = self.get(endpoint, raw_response=True)
if response.links:
last_url = response.links['last']['url']
endpoint = self._get_endpoint_from_url(last_url)
items = self.get(endpoint)
per_page = GitHubAPI.get_per_page_from_url(last_url)
last_page = GitHubAPI.get_page_from_url(last_url)
total = per_page * (last_page - 1) + len(items)
else:
items = response.json()
total = len(items)
return total
@staticmethod
def get_page_from_url(url):
""" get page query parameter form url
"""
regex = r'^.*page=(?P<value>\d+).*$'
match = re.match(regex, url)
if match:
return int(match.group('value'))
@staticmethod
def get_per_page_from_url(url):
""" get per_page query parameter from url
"""
per_page = DEFAULT_PAGE_SIZE
regex = r'^.*per_page=(?P<value>\d+).*$'
match = re.match(regex, url)
if match:
per_page = int(match.group('value'))
return per_page
@classmethod
def get_client(cls):
""" return instance of GitHubAPI
"""
return GitHubAPI(
hostname=getenv('GH_BASE_URL', HOSTNAME),
bearer_token=getenv('GH_TOKEN_PSW'))
@staticmethod
def get_ratelimit(headers):
""" get rate limit data
"""
reset = headers.get('X-RateLimit-Reset')
if not reset:
return {}
remaining = headers.get('X-RateLimit-Remaining')
limit = headers.get('X-RateLimit-Limit')
delta = datetime.fromtimestamp(int(reset)) - datetime.now()
minutes = str(delta.total_seconds() / 60).split('.')[0]
return {
'remaining': remaining,
'limit': limit,
'minutes': minutes
}
@staticmethod
def log_ratelimit(ratelimit):
""" log rate limit data
"""
logger.debug(f"{ratelimit['remaining']}/{ratelimit['limit']} resets in {ratelimit['minutes']} min")
@staticmethod
def retry_ratelimit_error(exception):
""" return True if exception is 403 HTTPError, False otherwise
retry:
wait_fixed:60000
stop_max_attempt_number:60
"""
logger.debug(f"checking if '{type(exception).__name__}' exception is a ratelimit error")
if isinstance(exception, HTTPError):
if exception.response.status_code == 403:
logger.info('ratelimit error encountered - retrying request in 60 seconds')
return True
logger.debug(f'exception is not a ratelimit error: {exception}')
return False
@staticmethod
def clear_cursor(query, cursor):
""" return query with all cursor references removed if no cursor
"""
if not cursor:
query = query.replace('after: $cursor', '')
query = query.replace('$cursor: String!', '')
return query
@staticmethod
def sanitize_query(query):
""" sanitize query
"""
return query.replace('\n', ' ').replace(' ', '').strip()
@staticmethod
def raise_if_error(response):
""" raise GraphqlRateLimitError if error exists in errors
"""
if 'errors' in response:
logger.debug(f'errors detected in graphql response: {response}')
for error in response['errors']:
if error.get('type', '') == 'RATE_LIMITED':
raise GraphqlRateLimitError(error.get('message', ''))
raise GraphqlError(response['errors'][0]['message'])
@staticmethod
def get_value(data, keys):
""" return value represented by keys dot notated string from data dictionary
"""
if '.' in keys:
key, rest = keys.split('.', 1)
if key in data:
return GitHubAPI.get_value(data[key], rest)
raise KeyError(f'dictionary does not have key {key}')
else:
return data[keys]
def _get_graphql_page(self, query, variables, keys):
""" return generator that yields page from graphql response
"""
variables['page_size'] = DEFAULT_GRAPHQL_PAGE_SIZE
variables['cursor'] = ''
while True:
updated_query = GitHubAPI.clear_cursor(query, variables['cursor'])
response = self.post('/graphql', json={'query': updated_query, 'variables': variables})
GitHubAPI.raise_if_error(response)
yield GitHubAPI.get_value(response, f'{keys}.edges')
page_info = GitHubAPI.get_value(response, f'{keys}.pageInfo')
has_next_page = page_info['hasNextPage']
if not has_next_page:
logger.debug('no more pages')
break
variables['cursor'] = page_info['endCursor']
def check_graphqlratelimiterror(exception):
""" return True if exception is GraphQL Rate Limit Error, False otherwise
"""
logger.debug(f"checking if '{type(exception).__name__}' exception is a GraphqlRateLimitError")
if isinstance(exception, (GraphqlRateLimitError, TypeError)):
logger.debug('exception is a GraphqlRateLimitError - retrying request in 60 seconds')
return True
logger.debug(f'exception is not a GraphqlRateLimitError: {exception}')
return False
@retry(retry_on_exception=check_graphqlratelimiterror, wait_fixed=60000, stop_max_attempt_number=60)
def graphql(self, query, variables, page=False, keys=None):
""" execute graphql query and return response or paged response if page is True
"""
query = GitHubAPI.sanitize_query(query)
if page:
response = self._get_graphql_page(query, variables, keys)
else:
updated_query = GitHubAPI.clear_cursor(query, variables.get('cursor'))
response = self.post('/graphql', json={'query': updated_query, 'variables': variables})
GitHubAPI.raise_if_error(response)
return response
check_graphqlratelimiterror = staticmethod(check_graphqlratelimiterror)
|
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.core.management.base import BaseCommand
from datawinners.main.couchdb.utils import all_db_names
from datawinners.main.ut_couch_fixtures import load_data, load_ft_data
from datawinners.main.database import get_db_manager
from datawinners.main.management.sync_changed_views import SyncOnlyChangedViews
from datawinners.main.management.commands.utils import document_stores_to_process
from mangrove.datastore.cache_manager import get_cache_manager
from mangrove.datastore.database import _delete_db_and_remove_db_manager
class Command(BaseCommand):
def handle(self, *args, **options):
get_cache_manager().flush_all()
for database_name in all_db_names():
print ("Database %s") % (database_name,)
print 'Deleting...'
manager = get_db_manager(database_name)
_delete_db_and_remove_db_manager(manager)
load_ft_data()
for database_name in document_stores_to_process(args):
recreated_manager = get_db_manager(database_name)
print "Syncing Views....."
SyncOnlyChangedViews().sync_view(recreated_manager)
print "Loading data....."
load_data()
print "Done."
|
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2010-2011, 2013, James McCoy
# Copyright (c) 2010-2021, The Limnoria Contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import sys
import time
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
import supybot.ircdb as ircdb
from supybot.commands import *
import supybot.irclib as irclib
import supybot.utils.minisix as minisix
import supybot.ircmsgs as ircmsgs
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Seen')
class IrcStringAndIntDict(utils.InsensitivePreservingDict):
def key(self, x):
if isinstance(x, int):
return x
else:
return ircutils.toLower(x)
class SeenDB(plugins.ChannelUserDB):
IdDict = IrcStringAndIntDict
def serialize(self, v):
return list(v)
def deserialize(self, channel, id, L):
(seen, saying) = L
return (float(seen), saying)
def update(self, channel, nickOrId, saying):
seen = time.time()
self[channel, nickOrId] = (seen, saying)
self[channel, '<last>'] = (seen, saying)
def seenWildcard(self, channel, nick):
nicks = ircutils.IrcSet()
nickRe = re.compile('^%s$' % '.*'.join(nick.split('*')), re.I)
for (searchChan, searchNick) in self.keys():
#print 'chan: %s ... nick: %s' % (searchChan, searchNick)
if isinstance(searchNick, int):
# We need to skip the reponses that are keyed by id as they
# apparently duplicate the responses for the same person that
# are keyed by nick-string
continue
if ircutils.strEqual(searchChan, channel):
if nickRe.search(searchNick) is not None:
nicks.add(searchNick)
L = [[nick, self.seen(channel, nick)] for nick in nicks]
def negativeTime(x):
return -x[1][0]
utils.sortBy(negativeTime, L)
return L
def seen(self, channel, nickOrId):
return self[channel, nickOrId]
filename = conf.supybot.directories.data.dirize('Seen.db')
anyfilename = conf.supybot.directories.data.dirize('Seen.any.db')
class Seen(callbacks.Plugin):
"""This plugin allows you to see when and what someone last said and
what you missed since you left a channel."""
noIgnore = True
def __init__(self, irc):
self.__parent = super(Seen, self)
self.__parent.__init__(irc)
self.db = SeenDB(filename)
self.anydb = SeenDB(anyfilename)
self.lastmsg = {}
world.flushers.append(self.db.flush)
world.flushers.append(self.anydb.flush)
def die(self):
if self.db.flush in world.flushers:
world.flushers.remove(self.db.flush)
else:
self.log.debug('Odd, no flush in flushers: %r', world.flushers)
self.db.close()
if self.anydb.flush in world.flushers:
world.flushers.remove(self.anydb.flush)
else:
self.log.debug('Odd, no flush in flushers: %r', world.flushers)
self.anydb.close()
self.__parent.die()
def __call__(self, irc, msg):
self.__parent.__call__(irc, msg)
def doPrivmsg(self, irc, msg):
if ircmsgs.isCtcp(msg) and not ircmsgs.isAction(msg):
return
if msg.channel:
channel = msg.channel
said = ircmsgs.prettyPrint(msg)
self.db.update(channel, msg.nick, said)
self.anydb.update(channel, msg.nick, said)
try:
id = ircdb.users.getUserId(msg.prefix)
self.db.update(channel, id, said)
self.anydb.update(channel, id, said)
except KeyError:
pass # Not in the database.
def doPart(self, irc, msg):
channel = msg.args[0]
said = ircmsgs.prettyPrint(msg)
self.anydb.update(channel, msg.nick, said)
try:
id = ircdb.users.getUserId(msg.prefix)
self.anydb.update(channel, id, said)
except KeyError:
pass # Not in the database.
doJoin = doPart
doKick = doPart
def doQuit(self, irc, msg):
said = ircmsgs.prettyPrint(msg)
try:
id = ircdb.users.getUserId(msg.prefix)
except KeyError:
id = None # Not in the database.
for channel in msg.tagged('channels'):
self.anydb.update(channel, msg.nick, said)
if id is not None:
self.anydb.update(channel, id, said)
doNick = doQuit
def doMode(self, irc, msg):
# Filter out messages from network Services
if msg.nick:
try:
id = ircdb.users.getUserId(msg.prefix)
except KeyError:
id = None # Not in the database.
channel = msg.args[0]
said = ircmsgs.prettyPrint(msg)
self.anydb.update(channel, msg.nick, said)
if id is not None:
self.anydb.update(channel, id, said)
doTopic = doMode
def _seen(self, irc, channel, name, any=False):
if any:
db = self.anydb
else:
db = self.db
try:
results = []
if '*' in name:
if (len(name.replace('*', '')) <
self.registryValue('minimumNonWildcard',
channel, irc.network)):
irc.error(_('Not enough non-wildcard characters.'),
Raise=True)
results = db.seenWildcard(channel, name)
else:
results = [[name, db.seen(channel, name)]]
if len(results) == 1:
(nick, info) = results[0]
(when, said) = info
reply = format(_('%s was last seen in %s %s ago'),
nick, channel,
utils.timeElapsed(time.time()-when))
if self.registryValue('showLastMessage', channel, irc.network):
if minisix.PY2:
said = said.decode('utf8')
reply = _('%s: %s') % (reply, said)
irc.reply(reply)
elif len(results) > 1:
L = []
for (nick, info) in results:
(when, said) = info
L.append(format(_('%s (%s ago)'), nick,
utils.timeElapsed(time.time()-when)))
irc.reply(format(_('%s could be %L'), name, (L, _('or'))))
else:
irc.reply(format(_('I haven\'t seen anyone matching %s.'), name))
except KeyError:
irc.reply(format(_('I have not seen %s.'), name))
def _checkChannelPresence(self, irc, channel, target, you):
if channel not in irc.state.channels:
irc.error(_("I'm not in %s." % channel), Raise=True)
if target not in irc.state.channels[channel].users:
if you:
msg = format(_('You must be in %s to use this command.'), channel)
else:
msg = format(_('%s must be in %s to use this command.'),
target, channel)
irc.error(msg, Raise=True)
@internationalizeDocstring
def seen(self, irc, msg, args, channel, name):
"""[<channel>] <nick>
Returns the last time <nick> was seen and what <nick> was last seen
saying. <channel> is only necessary if the message isn't sent on the
channel itself. <nick> may contain * as a wildcard.
"""
if name and ircutils.strEqual(name, irc.nick):
irc.reply(_("You've found me!"))
return
self._checkChannelPresence(irc, channel, msg.nick, True)
self._seen(irc, channel, name)
seen = wrap(seen, ['channel', 'something'])
@internationalizeDocstring
def any(self, irc, msg, args, channel, optlist, name):
"""[<channel>] [--user <name>] [<nick>]
Returns the last time <nick> was seen and what <nick> was last seen
doing. This includes any form of activity, instead of just PRIVMSGs.
If <nick> isn't specified, returns the last activity seen in
<channel>. If --user is specified, looks up name in the user database
and returns the last time user was active in <channel>. <channel> is
only necessary if the message isn't sent on the channel itself.
"""
if name and ircutils.strEqual(name, irc.nick):
irc.reply(_("You've found me!"))
return
self._checkChannelPresence(irc, channel, msg.nick, True)
if name and optlist:
raise callbacks.ArgumentError
elif name:
self._seen(irc, channel, name, any=True)
elif optlist:
for (option, arg) in optlist:
if option == 'user':
user = arg
self._user(irc, channel, user, any=True)
else:
self._last(irc, channel, any=True)
any = wrap(any, ['channel', getopts({'user': 'otherUser'}),
additional('something')])
def _last(self, irc, channel, any=False):
if any:
db = self.anydb
else:
db = self.db
try:
(when, said) = db.seen(channel, '<last>')
reply = format(_('Someone was last seen in %s %s ago'),
channel, utils.timeElapsed(time.time()-when))
if self.registryValue('showLastMessage', channel, irc.network):
reply = _('%s: %s') % (reply, said)
irc.reply(reply)
except KeyError:
irc.reply(_('I have never seen anyone.'))
@internationalizeDocstring
def last(self, irc, msg, args, channel):
"""[<channel>]
Returns the last thing said in <channel>. <channel> is only necessary
if the message isn't sent in the channel itself.
"""
self._checkChannelPresence(irc, channel, msg.nick, True)
self._last(irc, channel)
last = wrap(last, ['channel'])
def _user(self, irc, channel, user, any=False):
if any:
db = self.anydb
else:
db = self.db
try:
(when, said) = db.seen(channel, user.id)
reply = format(_('%s was last seen in %s %s ago'),
user.name, channel,
utils.timeElapsed(time.time()-when))
if self.registryValue('showLastMessage', channel, irc.network):
reply = _('%s: %s') % (reply, said)
irc.reply(reply)
except KeyError:
irc.reply(format(_('I have not seen %s.'), user.name))
@internationalizeDocstring
def user(self, irc, msg, args, channel, user):
"""[<channel>] <name>
Returns the last time <name> was seen and what <name> was last seen
saying. This looks up <name> in the user seen database, which means
that it could be any nick recognized as user <name> that was seen.
<channel> is only necessary if the message isn't sent in the channel
itself.
"""
self._checkChannelPresence(irc, channel, msg.nick, True)
self._user(irc, channel, user)
user = wrap(user, ['channel', 'otherUser'])
@internationalizeDocstring
def since(self, irc, msg, args, channel, nick):
"""[<channel>] [<nick>]
Returns the messages since <nick> last left the channel.
If <nick> is not given, it defaults to the nickname of the person
calling the command.
"""
if nick is None:
nick = msg.nick
you = True
else:
you = False
self._checkChannelPresence(irc, channel, nick, you)
if nick is None:
nick = msg.nick
end = None # By default, up until the most recent message.
for (i, m) in utils.seq.renumerate(irc.state.history):
if end is None and m.command == 'JOIN' and \
ircutils.strEqual(m.args[0], channel) and \
ircutils.strEqual(m.nick, nick):
end = i
if m.command == 'PART' and \
ircutils.strEqual(m.nick, nick) and \
ircutils.strEqual(m.args[0], channel):
break
elif m.command == 'QUIT' and ircutils.strEqual(m.nick, nick):
# XXX We assume the person was in-channel at this point.
break
elif m.command == 'KICK' and \
ircutils.strEqual(m.args[1], nick) and \
ircutils.strEqual(m.args[0], channel):
break
else: # I never use this; it only kicks in when the for loop exited normally.
irc.error(format(_('I couldn\'t find in my history of %s messages '
'where %r last left %s'),
len(irc.state.history), nick, channel))
return
msgs = [m for m in irc.state.history[i:end]
if m.command == 'PRIVMSG' and ircutils.strEqual(m.args[0], channel)]
if msgs:
irc.reply(format('%L', list(map(ircmsgs.prettyPrint, msgs))))
else:
irc.reply(format(_('Either %s didn\'t leave, '
'or no messages were sent while %s was gone.'), nick, nick))
since = wrap(since, ['channel', additional('nick')])
Class = Seen
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import ReadWordDict
import Lemmatizer
from nltk.stem.wordnet import WordNetLemmatizer
lmzer = WordNetLemmatizer()
lmdb = Lemmatizer.LemmaDB()
lmdb.load("../../DataBase/English/ECDICTData/lemma.en.txt")
def lemmatize(word):
res = lmdb.word_stem(word)
if res is None:
return lmzer.lemmatize(word)
return res[0]
rd = ReadWordDict.Dict("../../DataBase/English/ECDICTData/readword.db").dumps()
rd.extend({"uh-huh", "um-hum"})
out = open("Output/ANCCount.txt", "w", encoding="utf-8")
with open("Input/ANC-token-count.txt", encoding="utf-8") as f:
count = 0
for line in f.readlines():
pos = line.find(" ")
word = line[:pos]
lem = lemmatize(word)
if len(word) > 2 and lem not in rd and word not in rd:
rd.append(lem)
out.write(line)
count = count+1
if count > 100:
break
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lingvo.core.predictor."""
import lingvo.compat as tf
from lingvo.core import base_input_generator
from lingvo.core import base_model
from lingvo.core import inference_graph_exporter
from lingvo.core import inference_graph_pb2
from lingvo.core import predictor
from lingvo.core import test_utils
class DummyModel(base_model.BaseTask):
def Inference(self):
with tf.name_scope('inference'):
feed1 = tf.placeholder(name='feed1_node', dtype=tf.float32, shape=[1])
fetch1 = tf.identity(feed1, name='fetch1_node')
inference_graph = inference_graph_pb2.InferenceGraph()
subgraph = inference_graph.subgraphs['default']
subgraph.feeds['feed1'] = feed1.name
subgraph.fetches['fetch1'] = fetch1.name
return inference_graph
class PredictorTest(test_utils.TestCase):
def _testInferenceGraph(self):
p = base_model.SingleTaskModel.Params(DummyModel.Params().Set(name='test'))
p.input = base_input_generator.BaseInputGenerator.Params().Set(name='test')
inference_graph = inference_graph_exporter.InferenceGraphExporter.Export(p)
return inference_graph
def testPredictor(self):
pred = predictor.Predictor(self._testInferenceGraph())
[fetch1] = pred.Run(['fetch1'], feed1=[12345])
self.assertEqual(12345, fetch1)
def testMissingFeedRaisesInvalidArgumentError(self):
pred = predictor.Predictor(self._testInferenceGraph())
with self.assertRaisesRegex(tf.errors.InvalidArgumentError, 'feed1'):
pred.Run(['fetch1'])
def testInvalidFetchRaisesKeyError(self):
pred = predictor.Predictor(self._testInferenceGraph())
with self.assertRaisesRegex(KeyError, 'nonexistent'):
pred.Run(['fetch1', 'nonexistent'], feed1=[12345])
def testInvalidFetchWithoutValidateFetchesReturnsNone(self):
pred = predictor.Predictor(self._testInferenceGraph())
fetch1, nonexistent = pred.Run(['fetch1', 'nonexistent'],
feed1=[12345],
validate_fetches=False)
self.assertEqual(12345, fetch1)
self.assertIsNone(nonexistent)
if __name__ == '__main__':
tf.test.main()
|
#!/usr/bin/env python3
# coding: utf-8
import unittest
from .base_test import BaseTest
from .base_test import ThreadQueryExecutor
from .base_test import wait_checkpointer_stopevent
from .base_test import wait_bgwriter_stopevent
class EvictionTest(BaseTest):
def test_eviction_txn(self):
node = self.node
node.append_conf('postgresql.conf',
"orioledb.main_buffers = 8MB\n")
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test (\n"
" id integer NOT NULL,\n"
" PRIMARY KEY (id)\n"
") USING orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_eviction (\n"
" id integer NOT NULL,\n"
" PRIMARY KEY (id)\n"
") USING orioledb;\n")
node.safe_psql('postgres',
"INSERT INTO o_test\n"
" (SELECT id FROM generate_series(501, 1500, 1) id);")
n = 30000
node.safe_psql('postgres',
"INSERT INTO o_eviction\n"
" (SELECT id FROM generate_series(%d, %d, 1) id);" % (1, n))
con1 = node.connect()
con1.execute("BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;")
self.assertEqual(con1.execute("SELECT COUNT(*) FROM o_test;")[0][0], 1000)
con2 = node.connect()
con2.begin()
con2.execute("INSERT INTO o_test\n"
" (SELECT id FROM generate_series(1, 500, 1) id);")
con2.commit()
con2.close()
self.assertEqual(node.execute("SELECT COUNT(*) FROM o_eviction;")[0][0], 30000)
self.assertEqual(con1.execute("SELECT COUNT(*) FROM o_test;")[0][0], 1000)
con1.commit()
con1.close()
node.stop()
def test_eviction_tree(self):
INDEX_NOT_LOADED = "Index o_evicted_pkey: not loaded"
node = self.node
node.append_conf('postgresql.conf',
"shared_preload_libraries = orioledb\n"
"orioledb.main_buffers = 8MB\n"
"checkpoint_timeout = 86400\n"
"max_wal_size = 1GB\n"
"orioledb.debug_disable_bgwriter = true\n")
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test (\n"
" key SERIAL NOT NULL,\n"
" val int NOT NULL,\n"
" PRIMARY KEY (key)\n"
") USING orioledb;\n"
"CREATE UNIQUE INDEX o_test_ix2 ON o_test (key);\n"
"CREATE UNIQUE INDEX o_test_ix3 ON o_test (key);\n"
"CREATE UNIQUE INDEX o_test_ix4 ON o_test (key);\n"
"CREATE TABLE IF NOT EXISTS o_evicted (\n"
" key SERIAL NOT NULL,\n"
" val int NOT NULL,\n"
" PRIMARY KEY (key)\n"
") USING orioledb;\n"
"CREATE UNIQUE INDEX o_evicted_ix2 ON o_evicted (key);\n")
con1 = node.connect()
con1.execute("INSERT INTO o_evicted (val) SELECT val id FROM generate_series(1001, 1500, 1) val;\n")
self.assertEqual(con1.execute("SELECT count(*) FROM o_evicted;")[0][0], 500)
n = 250000
step = 1000
for i in range(1, n, step):
con1.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%d, %d, 1) val);\n" %
(i, i + step - 1))
con1.commit()
con1.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY key) x;")
con1.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY key) x;")
con2 = node.connect()
try:
self.assertEqual(
con1.execute("SELECT orioledb_tbl_structure('o_evicted'::regclass, 'e');")[0][0].split('\n')[0],
INDEX_NOT_LOADED)
self.assertEqual(
con2.execute("SELECT orioledb_tbl_structure('o_evicted'::regclass, 'e');")[0][0].split('\n')[0],
INDEX_NOT_LOADED)
self.assertEqual(con2.execute("SELECT count(*) FROM o_evicted;")[0][0], 500)
con2.commit()
self.assertEqual(con1.execute("SELECT count(*) FROM o_evicted;")[0][0], 500)
con1.commit()
con2.execute("INSERT INTO o_evicted (val) SELECT val id FROM generate_series(1, 500, 1) val;")
con2.commit()
self.assertEqual(con2.execute("SELECT count(*) FROM o_evicted;")[0][0], 1000)
self.assertEqual(con2.execute("SELECT val FROM o_evicted WHERE key = 500")[0][0], 1500)
self.assertEqual(con2.execute("SELECT count(*) FROM o_evicted WHERE val > 1500 LIMIT 1;")[0][0], 0)
con2.commit()
self.assertNotEqual(
con1.execute("SELECT orioledb_tbl_structure('o_evicted'::regclass, 'e');")[0][0].split('\n')[0],
INDEX_NOT_LOADED)
self.assertNotEqual(
con2.execute("SELECT orioledb_tbl_structure('o_evicted'::regclass, 'e');")[0][0].split('\n')[0],
INDEX_NOT_LOADED)
con1.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%d, %d, 1) val);\n" %
(1, n))
con1.commit()
con1.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY key) x;")
con1.execute("SELECT COUNT(*) FROM (SELECT * FROM o_test ORDER BY key) x;")
self.assertEqual(
con1.execute("SELECT orioledb_tbl_structure('o_evicted'::regclass, 'e');")[0][0].split('\n')[0],
INDEX_NOT_LOADED)
self.assertEqual(
con2.execute("SELECT orioledb_tbl_structure('o_evicted'::regclass, 'e');")[0][0].split('\n')[0],
INDEX_NOT_LOADED)
con3 = node.connect()
self.assertEqual(
con3.execute("SELECT orioledb_tbl_structure('o_evicted'::regclass, 'e');")[0][0].split('\n')[0],
INDEX_NOT_LOADED)
con3.close()
finally:
con1.close()
con2.close()
def eviction_after_checkpoint_base(self, compressed):
node = self.node
node.append_conf('postgresql.conf',
"shared_preload_libraries = orioledb\n"
"orioledb.main_buffers = 8MB\n")
node.start()
arg1 = "WITH (primary_compress)" if compressed else ""
arg2 = "WITH (compress)" if compressed else ""
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
key SERIAL NOT NULL,
val int NOT NULL,
PRIMARY KEY (key)
) USING orioledb;
CREATE UNIQUE INDEX o_test_ix2 ON o_test (key);
CREATE UNIQUE INDEX o_test_ix3 ON o_test (key);
CREATE UNIQUE INDEX o_test_ix4 ON o_test (key);
CREATE TABLE IF NOT EXISTS o_evicted (
key SERIAL NOT NULL,
val int NOT NULL,
PRIMARY KEY (key)
) USING orioledb %s;
CREATE UNIQUE INDEX o_evicted_ix2 ON o_evicted (key) %s;
"""
% (arg1, arg2)
)
con1 = node.connect()
con1.execute("INSERT INTO o_evicted (val) SELECT val id FROM generate_series(1001, 1500, 1) val;\n")
# different errors when CHECKPOINT called even times or odd
node.safe_psql("CHECKPOINT;")
con1.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%s, %s, 1) val);\n" %
(1, 999))
node.safe_psql("CHECKPOINT;")
node.safe_psql("CHECKPOINT;")
node.safe_psql("CHECKPOINT;")
con1.execute("SELECT * FROM o_evicted;")
self.assertEqual(con1.execute("SELECT count(*) FROM o_evicted;")[0][0], 500)
n = 20000
con1.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%s, %s, 1) val);\n" %
(str(1), str(n)))
con1.commit()
con1.close()
node.stop()
def test_eviction_after_checkpoint(self):
self.eviction_after_checkpoint_base(False)
def test_eviction_compress_after_checkpoint(self):
self.eviction_after_checkpoint_base(True)
def test_eviction_after_checkpoint_con1(self):
node = self.node
node.append_conf('postgresql.conf',
"shared_preload_libraries = orioledb\n"
"orioledb.main_buffers = 8MB\n")
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test (\n"
" key SERIAL NOT NULL,\n"
" val int NOT NULL\n"
") USING orioledb;\n")
con1 = node.connect()
# different errors when CHECKPOINT called even times or odd
con1.execute("CHECKPOINT;")
con1.begin()
con1.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%s, %s, 1) val);\n" %
(1, 999))
con1.commit()
con1.execute("CHECKPOINT;")
n = 20000
con1.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%s, %s, 1) val);\n" %
(str(1), str(n)))
con1.commit()
con1.close()
node.stop()
def test_eviction_concurrent_checkpoint_next_tbl(self):
node = self.node
node.append_conf('postgresql.conf',
"shared_preload_libraries = orioledb\n"
"orioledb.main_buffers = 8MB\n"
"bgwriter_delay = 400\n"
"orioledb.enable_stopevents = true\n")
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_first (\n"
" id int NOT NULL,\n"
" PRIMARY KEY (id)"
") USING orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_second (\n"
" id text NOT NULL\n"
") USING orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test (\n"
" key SERIAL NOT NULL,\n"
" val int NOT NULL\n"
") USING orioledb;\n")
con1 = node.connect()
con2 = node.connect()
con1.execute("CHECKPOINT;")
con2.execute("SELECT pg_stopevent_set('checkpoint_table_start',\n"
"format(E'$.table.reloid == \\045s',\n"
"'o_second'::regclass::oid)::jsonpath);")
t1 = ThreadQueryExecutor(con1, "CHECKPOINT;")
t1.start()
wait_checkpointer_stopevent(node)
con2.execute("INSERT INTO o_first VALUES (0);")
con2.execute("INSERT INTO o_second VALUES ('ajnslajslkdlaksjdlkajlsdkjlakjsdl')")
con2.execute("SELECT * FROM o_first;")
con2.execute("SELECT * FROM o_second;")
n = 50000
con2.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%s, %s, 1) val);\n" %
(str(1), str(n)))
con2.commit()
con2.execute("SELECT * FROM o_first;")
con2.execute("SELECT pg_stopevent_reset('checkpoint_table_start')")
t1.join()
con1.execute("CHECKPOINT")
con1.execute("CHECKPOINT")
con1.close()
con2.close()
node.stop()
def eviction_concurrent_checkpoint_base(self, compressed):
node = self.node
node.append_conf('postgresql.conf',
"shared_preload_libraries = orioledb\n"
"orioledb.main_buffers = 8MB\n"
"bgwriter_delay = 400\n"
"orioledb.enable_stopevents = true\n")
node.start()
node.safe_psql('postgres',
"""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_checkpoint (
id text NOT NULL,
PRIMARY KEY (id) %s
) USING orioledb;
CREATE TABLE IF NOT EXISTS o_test (
key SERIAL NOT NULL,
val int NOT NULL
) USING orioledb;
"""
% ("WITH (compress)" if compressed else "")
)
con1 = node.connect()
con2 = node.connect()
con3 = node.connect()
con1.begin()
con1.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%s, %s, 1) val);\n" %
(1, 7999))
con1.commit()
con3.execute("SELECT pg_stopevent_set('checkpoint_step',\n"
"'$.action == \"walkDownwards\" && "
"$.treeName == \"ctid_primary\" && "
"$.lokey.ctid[0] >= 2');")
t1 = ThreadQueryExecutor(con1, "CHECKPOINT;")
t1.start()
wait_checkpointer_stopevent(node)
n = 20000
con2.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%s, %s, 1) val);\n" %
(str(1), str(n)))
con2.commit()
con2.execute("SELECT * FROM o_checkpoint;")
con3.execute("SELECT pg_stopevent_reset('checkpoint_step')")
t1.join()
con1.execute("CHECKPOINT;")
con1.execute("CHECKPOINT;")
con1.execute("CHECKPOINT;")
con1.close()
con2.close()
con3.close()
node.stop()
def test_eviction_concurrent_checkpoint(self):
self.eviction_concurrent_checkpoint_base(False)
def test_eviction_compress_concurrent_checkpoint(self):
self.eviction_concurrent_checkpoint_base(True)
def test_eviction_concurrent_drop(self):
node = self.node
node.append_conf('postgresql.conf',
"shared_preload_libraries = orioledb\n"
"orioledb.main_buffers = 8MB\n"
"bgwriter_delay = 200\n"
"orioledb.enable_stopevents = true\n"
"checkpoint_timeout = 86400\n"
"max_wal_size = 1GB\n")
node.start()
node.safe_psql('postgres',
"CREATE EXTENSION IF NOT EXISTS orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_evicted (\n"
" id int8 NOT NULL,\n"
" val int8 NOT NULL,\n"
" PRIMARY KEY (id, val)\n"
") USING orioledb;\n"
"CREATE TABLE IF NOT EXISTS o_test (\n"
" id int8 NOT NULL,\n"
" val int8 NOT NULL,\n"
" PRIMARY KEY (id, val)\n"
") USING orioledb;\n")
con1 = node.connect()
con2 = node.connect()
con2.execute("SELECT pg_stopevent_set('after_write_page', '$backendType == \"orioledb background writer\"');")
n = 150000
con1.execute("INSERT INTO o_evicted (id, val)\n"
" (SELECT id, id + 1 FROM generate_series(%s, %s, 1) id);\n" %
(str(1), str(n)))
con1.commit()
wait_bgwriter_stopevent(node)
n = 150000
con1.execute("INSERT INTO o_test (id, val)\n"
" (SELECT id, id + 1 FROM generate_series(%s, %s, 1) id);\n" %
(str(1), str(n)))
con1.commit()
t1 = ThreadQueryExecutor(con1, "DROP TABLE o_evicted;")
t1.start()
self.assertTrue(con2.execute("SELECT pg_stopevent_reset('after_write_page');")[0][0])
t1.join()
con1.commit()
con1.close()
con2.close()
node.stop()
def test_eviction_and_change_main_buffers_size(self):
node = self.node
node.append_conf('postgresql.conf',
"orioledb.main_buffers = 8MB\n"
"log_min_messages = notice\n")
node.start() # start PostgreSQL
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS o_test (
key SERIAL NOT NULL,
val int NOT NULL,
PRIMARY KEY (key)
) USING orioledb;
CREATE TABLE IF NOT EXISTS o_evicted (
id int NOT NULL,
val int NOT NULL,
PRIMARY KEY (id)
) USING orioledb;""")
con1 = node.connect()
con2 = node.connect()
node.execute("CHECKPOINT;")
con1.execute("INSERT INTO o_evicted (SELECT id, id + 1 FROM generate_series(0, 75000, 5) id);\n")
con1.commit()
n = 200000
con2.execute("INSERT INTO o_test (val)\n"
" (SELECT val FROM generate_series(%s, %s, 1) val);\n" %
(str(1), str(n)))
con2.commit()
con1.execute("INSERT INTO o_evicted (SELECT id, id + 1 FROM generate_series(1, 15000, 5) id);\n")
con1.commit()
node.execute("SELECT * FROM o_test;")
con1.execute("INSERT INTO o_evicted (SELECT id, id + 1 FROM generate_series(2, 15000, 5) id);\n")
con1.commit()
node.execute("SELECT * FROM o_test;")
con1.execute("INSERT INTO o_evicted (SELECT id, id + 1 FROM generate_series(3, 15000, 5) id);\n")
con1.commit()
node.execute("SELECT * FROM o_test;")
con1.execute("INSERT INTO o_evicted (SELECT id, id + 1 FROM generate_series(4, 10000, 5) id);\n")
con1.commit()
node.execute("SELECT * FROM o_test;")
con1.close()
con2.close()
node.stop()
node.append_conf('postgresql.conf',
"orioledb.main_buffers = 10MB\n")
node.start()
self.assertEqual(node.execute("SELECT COUNT(*) FROM o_evicted;")[0][0], 26001)
self.assertTrue(node.execute("SELECT orioledb_tbl_check('o_evicted'::regclass)")[0][0])
|
### import from ml_pipeline
import ml_pipeline
import py_starter as ps
import dir_ops as do
import os
file_Path = do.Path( os.path.abspath(__file__) )
name = file_Path.root.split( ml_pipeline.ML_Models.Models.SUFFIX )[0]
repo_Dir = file_Path.ascend()
custom_Model_module = do.Path.import_module_path( repo_Dir.join( name + ml_pipeline.ML_Model.Model.SUFFIX + '.py' ) )
custom_Input_Files_module = do.Path.import_module_path( repo_Dir.join( name + ml_pipeline.ML_Input_Files.Input_Files.SUFFIX + '.py' ) )
custom_Input_File_module = do.Path.import_module_path( repo_Dir.join( name + ml_pipeline.ML_Input_File.Input_File.SUFFIX + '.py' ) )
custom_Features_module = do.Path.import_module_path( repo_Dir.join( name + ml_pipeline.ML_Features.Features.SUFFIX + '.py' ) )
custom_Feature_module = do.Path.import_module_path( repo_Dir.join( name + ml_pipeline.ML_Feature.Feature.SUFFIX + '.py' ) )
### Edit your own database params here
database_conn_params = {}
class Models( ml_pipeline.ML_Models.Models ):
OVERRIDE_KWARGS = {
'Model_class_pointer': custom_Model_module.Model,
'Input_Files_class_pointer': custom_Input_Files_module.Input_Files,
'Input_File_class_pointer': custom_Input_File_module.Input_File,
'Features_class_pointer': custom_Features_module.Features,
'Feature_class_pointer': custom_Feature_module.Feature,
'database_conn_params': database_conn_params
}
def __init__( self, **supplemental_kwargs ):
kwargs = ps.merge_dicts( Models.OVERRIDE_KWARGS, supplemental_kwargs )
ml_pipeline.ML_Models.Models.__init__( self, name, repo_Dir, **kwargs )
if __name__ == '__main__':
Models_inst = Models()
Models_inst.run()
|
m = 1
num1 = float(input('Digite o primeiro valor: '))
num2 = float(input('Digite o segundo valor: '))
while m != 0:
print('''
Você deseja:
[1] Somar
[2] Multiplicar
[3] Maior entre eles
[4] Digitar novos valores
[5] Sair do programa
''', end='')
escolha = int(input('''
Escolha: '''))
if escolha == 1:
print('A soma entre os valores é igual a: {}'.format(num1 + num2))
if escolha == 2:
print('O produto dos valores é igual a: {}'.format(num1 * num2))
if escolha == 3:
if num1 > num2:
print('O maior valor entre os informados é: {}'.format(num1))
else:
print('O maior valor entre os informados é: {}'.format(num2))
if escolha == 4:
num1 = float(input('Digite o primeiro valor: '))
num2 = float(input('Digite o segundo valor: '))
if escolha == 5:
m = 0
|
from empire.python.typings import *
from tokenize import tokenize
from io import BytesIO
from empire.betsiamites.python.lexer.specialized_token import SpecializedToken
class PythonLexer:
def __init__(self, raw_code: str):
self._tokenized_lines: List[SpecializedToken] = list()
token_lines = tokenize(BytesIO(raw_code.encode('utf-8')).readline)
for token_line in token_lines:
print(token_line)
#self._tokenized_lines.append(Token(token_line))
PythonLexer('a = 1j')
# PythonLexer("""from typing import Tuple, Any, List
#
# from tokenize \
# import tokenize
# from io import BytesIO
#
# from rhetorik.tasks.analyzer.python.lexer.specializedToken import SpecializedToken
# from rhetorik.tasks.analyzer.python.lexer.tokenTypes import PythonTokenTypes
#
# a_dict = {
# 'zz': {
# 'patate': True
# }
# }
#
# class Interpreter:
# def __init__(self,
# raw_code: str):
# self._tokenized_lines: List[List[SpecializedToken]] = list()
# self._raw_code: str = raw_code
# self._current_line_number: int = 1
#
# self._is_defining_collection: bool = False
# self._is_in_parameter_list: bool = False
#
# self._last_non_empty_line: str = ''
# self._last_non_empty_line_number: int = 1
#
# self._current_scope: Tuple[int, str] = (0, '')
#
# def interpret(self):
# raw_token_generator = tokenize(BytesIO(self._raw_code.encode('utf-8')).readline)
#
# self._tokenized_lines.append(list())
# current_line_number: int = 1
#
# for raw_token in raw_token_generator:
# if raw_token[0] == PythonTokenTypes.NEWLINE:
# if len(self._tokenized_lines) > 0:
# self._last_non_empty_line = self._tokenized_lines[current_line_number - 1][0].raw_line
# self._last_non_empty_line_number = current_line_number
#
# current_line_number += 1
# self._tokenized_lines.append(list())
# elif raw_token[0] == PythonTokenTypes.ENDMARKER:
# break
# elif raw_token[0] == PythonTokenTypes.INDENT and (not self._is_in_parameter_list and not self._is_defining_collection):
#
# else:
# self._tokenized_lines[current_line_number - 1].append(SpecializedToken(raw_token))
#
#
# """)
|
import random
import math
from vmengine import Program
chance0 = 0.08
chance1 = 0.1
chance2 = 0.01
chance3 = 0.008
defaultLength = 5
class Evolving(Program):
'''This class allows regular programs to be mutated and evaluated.'''
def __init__(self, code, fitness):
if code == []:
Program.__init__(self, Evolving.newCode())
else:
Program.__init__(self, code)
# fitness is a function taking an Evolving object
# and returning a number, smaller is better
self.fitness = fitness
self.score = math.inf
def evaluate(self, timeLimit):
self.runProgram(timeLimit)
self.score = self.fitness(self)
def mutateNew(self):
l = []
for i in range(len(self.code)):
if random.random() < chance3:
#take one away
pass
elif random.random() < chance1:
#change it a bit
l.append(Evolving.change(self.code[i], self.bits))
else:
#plain copy
l.append(self.code[i])
if random.random() < chance2:
#add a new one
l.append(Evolving.randomBits(Program.bits))
return Evolving(l, self.fitness)
def newCode(length=defaultLength):
l = []
for i in range(length):
l.append(Evolving.randomBits(Program.bits))
return l
def randomBits(n):
s = ''
for i in range(n):
s += random.choice(['0', '1'])
return s
def change(s, bits):
mask = 0
if random.random() < chance0:
mask = mask | 1
for i in range(bits):
mask = mask << 1
if random.random() < chance0:
mask = mask | 1
return s ^ mask
def populate(number, fitness):
l = []
for i in range(number):
l.append(Evolving([], fitness))
return l
def __lt__(self, other):
return self.score < other.score
|
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from StringIO import StringIO
import random
import sys
import time
import unittest
from lunr.api.console import Console, TypeConsole, NodeConsole, AccountConsole
from testlunr.integration import IetTest
def _parse_long(value):
"""
Attempts to parse the long-handed output console commands output, usually
in a form similar to::
status: ACTIVE
name: zzzd
created_at: 2013-08-23 17:04:46
min_size: 1
last_modified: 2013-08-23 17:04:46
read_iops: 0
write_iops: 0
max_size: 1024
Returns a dictionary of a value similar to that example, parsed.
Lines without a colon will be ignored.
"""
dict_value = {}
for line in value.split('\n'):
if ':' in line:
k, v = line.split(':', 1)
dict_value[k.strip()] = v.strip()
return dict_value
def _parse_table(value):
"""
Parses a textual table that the console commands generate.
Each row is converted into a dictionary.
"""
lines = value.split('\n')
header = None
rows = []
for l in lines:
if l.startswith('+-'):
pass
elif l.startswith('|'):
columns = [c.strip() for c in l.split('|')[1:-1]]
if header is None:
header = columns
else:
row = {}
for i, c in enumerate(columns):
if len(header)-1 <= i:
row[i] = c
else:
row[header[i]] = c
rows.append(row)
return rows
def _generate_name(name):
"""
Generate names for tested objects where you can probably tell at a glance
which objects were created by which executions of the unittests.
"""
return 'test-%s-%s-%s' % (time.strftime('%Y%m%d%H%M%S'),
random.randint(0, 999), name)
@contextmanager
def captured(stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
"""
Used to capture standard input/output for testing I/O on console commands.
Intended to be used with the "with" statement:
>>> from StringIO import StringIO
>>> with captured(stdout=StringIO()) as (stdin, stdout, stderr):
... print 'foo'
...
>>>
>>> stdout.getvalue()
'foo\n'
>>>
"""
original_streams = {}
try:
for stream in ('stdin', 'stdout', 'stderr'):
original_streams[stream] = getattr(sys, stream)
setattr(sys, stream, locals()[stream])
yield (stdin, stdout, stderr)
finally:
for stream_name, original_stream in original_streams.items():
setattr(sys, stream_name, original_stream)
class TestConsoleType(IetTest):
"""
Tests the TypeConsole class.
"""
def setUp(self):
c = TypeConsole()
c.verbose = c.config = c.url = None
self.c = c
def tearDown(self):
pass
def test_type_create(self):
name = _generate_name('type_create')
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.create({'name': name, 'url': None, 'min_size': None,
'read_iops': None, 'config': None,
'write_iops': None, 'max_size': None,
'verbose': None})
self.failIf(stderr.getvalue())
output_values = _parse_long(stdout.getvalue())
self.failUnlessEqual(output_values['name'], name)
# TODO: Will this always be active?
self.failUnlessEqual(output_values['status'], 'ACTIVE')
self.c.delete(name)
def test_type_list(self):
name = _generate_name('type_list')
self.c.create({'name': name, 'url': None, 'min_size': None,
'read_iops': None, 'config': None,
'write_iops': None, 'max_size': None, 'verbose': None})
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.list()
self.failIf(stderr.getvalue())
data = _parse_table(stdout.getvalue())
for row in data:
if row['name'] == name and row['status'] == 'ACTIVE':
break
else:
self.fail('Failed to find created row in list.')
self.c.delete(name)
def test_type_get(self):
name = _generate_name('type_get')
self.c.create({'name': name, 'url': None, 'min_size': None,
'read_iops': None, 'config': None,
'write_iops': None, 'max_size': None, 'verbose': None})
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.get(name=name)
self.failIf(stderr.getvalue())
value = _parse_long(stdout.getvalue())
self.failUnlessEqual(value['name'], name)
self.c.delete(name)
def test_type_delete(self):
name = _generate_name('type_delete')
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.create({'name': name, 'url': None, 'min_size': None,
'read_iops': None, 'config': None,
'write_iops': None, 'max_size': None,
'verbose': None})
self.failIf(stderr.getvalue())
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.delete(name=name)
self.failIf(stderr.getvalue())
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.get(name=name)
self.failIf(stderr.getvalue())
value = _parse_long(stdout.getvalue())
self.failUnlessEqual(value['status'], 'DELETED')
self.c.delete(name)
class TestConsoleNode(IetTest):
"""
Tests the NodeConsole class.
"""
def setUp(self):
c = NodeConsole()
c.verbose = c.config = c.url = None
self.vtc = TypeConsole()
self.vtc.config = self.vtc.url = self.vtc.verbose = None
self.c = c
def tearDown(self):
pass
def _create_node(self, node_name, volume_name):
self.vtc.create({'name': volume_name, 'url': None, 'min_size': None,
'read_iops': None, 'config': None, 'write_iops': None,
'max_size': None, 'verbose': None})
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.create({
'volume_type_name': volume_name, 'verbose': None, 'url': None,
'hostname': 'localhost', 'storage_hostname': 'localhost',
'size': '1', 'config': None, 'port': '1234',
'name': node_name})
self.failIf(stderr.getvalue())
return _parse_long(stdout.getvalue())
def test_node_create(self):
volume_name = _generate_name('node_create_volume')
node_name = _generate_name('node_create_node')
node_info = self._create_node(node_name, volume_name)
self.failUnlessEqual(node_info['name'], node_name)
self.c.delete(node_info['id'])
self.vtc.delete(volume_name)
def test_node_list(self):
volume_name = _generate_name('node_list_volume')
node_name = _generate_name('node_list_node')
node_info = self._create_node(node_name, volume_name)
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.list()
self.failIf(stderr.getvalue())
data = _parse_table(stdout.getvalue())
for row in data:
if (row['name'] == node_name and
row['volume_type_name'] == volume_name):
break
else:
self.fail('Did not find created node in node list')
self.c.delete(node_info['id'])
self.vtc.delete(volume_name)
def test_node_get(self):
volume_name = _generate_name('node_get_volume')
node_name = _generate_name('node_get_node')
node_info = self._create_node(node_name, volume_name)
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.get(id=node_info['id'])
self.failIf(stderr.getvalue())
node_info2 = _parse_long(stdout.getvalue())
self.failUnlessEqual(node_info2['id'], node_info['id'])
self.failUnlessEqual(node_info2['name'], node_info['name'])
self.c.delete(node_info['id'])
self.vtc.delete(volume_name)
def test_node_update(self):
volume_name = _generate_name('node_get_volume')
node_name = _generate_name('node_get_node')
node_name2 = _generate_name('node_get_node2')
node_info = self._create_node(node_name, volume_name)
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.get(id=node_info['id'])
self.failIf(stderr.getvalue())
self.c.update(node_info['id'], {'name': node_name2})
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.get(id=node_info['id'])
self.failIf(stderr.getvalue())
data = _parse_long(stdout.getvalue())
self.failUnlessEqual(data['name'], node_name2)
self.c.delete(node_info['id'])
self.vtc.delete(volume_name)
def test_node_delete(self):
volume_name = _generate_name('node_get_volume')
node_name = _generate_name('node_get_node')
node_info = self._create_node(node_name, volume_name)
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.get(id=node_info['id'])
self.failIf(stderr.getvalue())
self.c.delete(node_info['id'])
with captured(stdout=StringIO(), stderr=StringIO()) as \
(stdin, stdout, stderr):
self.c.get(id=node_info['id'])
self.failIf(stderr.getvalue())
data = _parse_long(stdout.getvalue())
self.failUnlessEqual(data['status'], 'DELETED')
self.vtc.delete(volume_name)
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
#
# Partnerbox E2
#
# $Id$
#
# Coded by Dr.Best (c) 2009
# Support: www.dreambox-tools.info
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from Components.EpgList import EPGList
from enigma import eEPGCache, eListbox, eListboxPythonMultiContent, loadPNG, gFont, getDesktop, eRect, eSize, RT_HALIGN_LEFT, RT_HALIGN_RIGHT, RT_HALIGN_CENTER, RT_VALIGN_CENTER, RT_VALIGN_TOP, RT_WRAP, BT_SCALE, BT_KEEP_ASPECT_RATIO
from Components.config import config
from time import localtime, strftime, ctime, time
from skin import parameters as skinparameter
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN, SCOPE_ACTIVE_SKIN
from Tools.LoadPixmap import LoadPixmap
from . import PartnerboxFunctions as partnerboxfunctions
from .PartnerboxFunctions import getServiceRef
baseEPGList__init__ = None
basebuildSingleEntry = None
basebuildSimilarEntry = None
basebuildMultiEntry = None
picDY = 0
sf = 1
def Partnerbox_EPGListInit():
global baseEPGList__init__, basebuildSingleEntry, basebuildSimilarEntry, basebuildMultiEntry
if baseEPGList__init__ is None:
baseEPGList__init__ = EPGList.__init__
if basebuildSingleEntry is None:
basebuildSingleEntry = EPGList.buildSingleEntry
if basebuildSimilarEntry is None:
basebuildSimilarEntry = EPGList.buildSimilarEntry
if basebuildMultiEntry is None:
basebuildMultiEntry = EPGList.buildMultiEntry
if partnerboxfunctions.remote_timer_list is None:
partnerboxfunctions.remote_timer_list = []
EPGList.__init__ = Partnerbox_EPGList__init__
EPGList.buildSingleEntry = Partnerbox_SingleEntry
EPGList.buildSimilarEntry = Partnerbox_SimilarEntry
EPGList.buildMultiEntry = Partnerbox_MultiEntry
EPGList.getClockTypesEntry = getClockTypesEntry
EPGList.isInTimer = isInTimer
EPGList.iconSize = 0
EPGList.space = 0
EPGList.iconDistance = 0
def Partnerbox_EPGList__init__(self, type=0, selChangedCB=None, timer = None, time_epoch = 120, overjump_empty = False, graphic=False):
baseEPGList__init__(self, type, selChangedCB, timer, time_epoch, overjump_empty, graphic)
self.clocks = [ LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_zap.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_post.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_pre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_zaprec.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_prepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_post.png'))]
self.selclocks = [ LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selpre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selprepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selpost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selpre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_zap.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selprepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selpost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_add.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selpre.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_zaprec.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selprepost.png')),
LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_selpost.png'))]
self.autotimericon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/epgclock_autotimer.png'))
self.nowEvPix = None
self.nowSelEvPix = None
self.othEvPix = None
self.selEvPix = None
self.othServPix = None
self.nowServPix = None
self.recEvPix = None
self.recSelEvPix = None
self.recordingEvPix= None
self.zapEvPix = None
self.zapSelEvPix = None
self.borderTopPix = None
self.borderBottomPix = None
self.borderLeftPix = None
self.borderRightPix = None
self.borderSelectedTopPix = None
self.borderSelectedLeftPix = None
self.borderSelectedBottomPix = None
self.borderSelectedRightPix = None
self.InfoPix = None
self.selInfoPix = None
self.graphicsloaded = False
self.borderColor = 0xC0C0C0
self.borderColorService = 0xC0C0C0
self.foreColor = 0xffffff
self.foreColorSelected = 0xffffff
self.backColor = 0x2D455E
self.backColorSelected = 0xd69600
self.foreColorService = 0xffffff
self.backColorService = 0x2D455E
self.foreColorNow = 0xffffff
self.foreColorNowSelected = 0xffffff
self.backColorNow = 0x00825F
self.backColorNowSelected = 0xd69600
self.foreColorPast = 0x808080
self.foreColorPastSelected = 0x808080
self.backColorPast = 0x2D455E
self.backColorPastSelected = 0xd69600
self.foreColorServiceNow = 0xffffff
self.backColorServiceNow = 0x00825F
self.foreColorRecord = 0xffffff
self.backColorRecord = 0xd13333
self.foreColorRecordSelected = 0xffffff
self.backColorRecordSelected = 0x9e2626
self.foreColorZap = 0xffffff
self.backColorZap = 0x669466
self.foreColorZapSelected = 0xffffff
self.backColorZapSelected = 0x436143
self.serviceFontNameGraph = "Regular"
self.eventFontNameGraph = "Regular"
self.eventFontNameSingle = "Regular"
self.eventFontNameMulti = "Regular"
self.serviceFontNameInfobar = "Regular"
self.eventFontNameInfobar = "Regular"
if self.screenwidth and self.screenwidth == 1920:
global sf
sf = 1.5
self.posx, self.posy, self.picx, self.picy, self.gap = skinparameter.get("EpgListIcon", (2, 13, 25, 25, 2))
else:
self.posx, self.posy, self.picx, self.picy, self.gap = skinparameter.get("EpgListIcon", (1, 11, 23, 23, 1))
self.serviceFontSizeGraph = int(20 * sf)
self.eventFontSizeGraph = int(18 * sf)
self.eventFontSizeSingle = int(22 * sf)
self.eventFontSizeMulti = int(22 * sf)
self.serviceFontSizeInfobar = int(20 * sf)
self.eventFontSizeInfobar = int(22 * sf)
self.listHeight = None
self.listWidth = None
self.serviceBorderWidth = 1
self.serviceNamePadding = 3
self.eventBorderWidth = 1
self.eventNamePadding = 3
self.eventNameAlign = 'left'
self.eventNameWrap = 'yes'
self.NumberOfRows = None
def Partnerbox_SingleEntry(self, service, eventId, beginTime, duration, EventName):
if self.listSizeWidth != self.l.getItemSize().width(): #recalc size if scrollbar is shown
self.recalcEntrySize()
if (beginTime is not None) and (beginTime+duration < time()):
foreColor = self.foreColorPast
backColor = self.backColorPast
foreColorSel = self.foreColorPastSelected
backColorSel = self.backColorPastSelected
elif (beginTime is not None) and (beginTime < time()):
foreColor = self.foreColorNow
backColor = self.backColorNow
foreColorSel = self.foreColorNowSelected
backColorSel = self.backColorNowSelected
else:
foreColor = self.foreColor
backColor = self.backColor
foreColorSel = self.foreColorSelected
backColorSel = self.backColorSelected
#don't apply new defaults to old skins:
if not self.skinUsingForeColorByTime:
foreColor = None
foreColorSel = None
if not self.skinUsingBackColorByTime:
backColor = None
backColorSel = None
clock_types = self.getPixmapForEntry(service, eventId, beginTime, duration)
r1 = self.weekday_rect
r2 = self.datetime_rect
r3 = self.descr_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, _(strftime(_("%a"), t)), foreColor, foreColorSel, backColor, backColorSel),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, strftime(_("%e/%m, %-H:%M"), t), foreColor, foreColorSel, backColor, backColorSel)
]
if clock_types:
if self.wasEntryAutoTimer and clock_types in (2, 7, 12):
res.extend((
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, r3.x+r3.w-self.picx - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.clocks[clock_types]),
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, r3.x+r3.w-self.picx*2 - self.gap - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.autotimericon),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w-self.picx*2 - (self.gap*2) - self.posx, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName, foreColor, foreColorSel, backColor, backColorSel)
))
else:
res.extend((
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, r3.x+r3.w-self.picx - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.clocks[clock_types]),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w-self.picx - self.posx, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName, foreColor, foreColorSel, backColor, backColorSel)
))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName, foreColor, foreColorSel, backColor, backColorSel))
return res
def Partnerbox_SimilarEntry(self, service, eventId, beginTime, service_name, duration):
if self.listSizeWidth != self.l.getItemSize().width(): #recalc size if scrollbar is shown
self.recalcEntrySize()
clock_types = self.getPixmapForEntry(service, eventId, beginTime, duration)
r1 = self.weekday_rect
r2 = self.datetime_rect
r3 = self.descr_rect
t = localtime(beginTime)
res = [
None, # no private data needed
(eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, _(strftime(_("%a"), t))),
(eListboxPythonMultiContent.TYPE_TEXT, r2.x, r2.y, r2.w, r1.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, strftime(_("%e/%m, %-H:%M"), t))
]
if clock_types:
if self.wasEntryAutoTimer and clock_types in (2, 7, 12):
res.extend((
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, r3.x+r3.w-self.picx - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.clocks[clock_types]),
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, r3.x+r3.w-self.picx*2 - self.gap - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.autotimericon),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w-self.picx*2 - (self.gap*2) - self.posx, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name)
))
else:
res.extend((
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, r3.x+r3.w-self.picx - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.clocks[clock_types]),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w-self.picx - (self.gap*2) - self.posx, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name)
))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, r3.w, r3.h, 0, RT_HALIGN_LEFT|RT_VALIGN_CENTER, service_name))
return res
def Partnerbox_MultiEntry(self, changecount, service, eventId, beginTime, duration, EventName, nowTime, service_name):
if self.listSizeWidth != self.l.getItemSize().width(): #recalc size if scrollbar is shown
self.recalcEntrySize()
r1 = self.service_rect
r2 = self.progress_rect
r3 = self.descr_rect
r4 = self.start_end_rect
fact1 = 70 * sf
fact2 = 90 * sf
fact3 = 20 * sf
fact4 = 90 * sf
borderw = 1 * sf
res = [None, (eListboxPythonMultiContent.TYPE_TEXT, r1.x, r1.y, r1.w, r1.h, 0, RT_HALIGN_LEFT | RT_VALIGN_CENTER, service_name)] # no private data needed
if beginTime is not None:
clock_types = self.getPixmapForEntry(service, eventId, beginTime, duration)
if nowTime < beginTime:
begin = localtime(beginTime)
end = localtime(beginTime+duration)
res.extend((
(eListboxPythonMultiContent.TYPE_TEXT, r4.x, r4.y, r4.w, r4.h, 1, RT_HALIGN_CENTER|RT_VALIGN_CENTER, _("%02d.%02d - %02d.%02d")%(begin[3], begin[4], end[3], end[4])),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, fact1, r3.h, 1, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, _("%d min") % (duration / 60))
))
else:
percent = (nowTime - beginTime) * 100 / duration
prefix = "+"
remaining = ((beginTime+duration) - int(time())) / 60
if remaining <= 0:
prefix = ""
res.extend((
(eListboxPythonMultiContent.TYPE_PROGRESS, r2.x+fact3, r2.y, r2.w-fact3*2, r2.h, percent, borderw),
(eListboxPythonMultiContent.TYPE_TEXT, r3.x, r3.y, fact1, r3.h, 1, RT_HALIGN_RIGHT|RT_VALIGN_CENTER, _("%s%d min") % (prefix, remaining))
))
if clock_types:
pos = r3.x+r3.w
if self.wasEntryAutoTimer and clock_types in (2, 7, 12):
res.extend((
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + fact4, r3.y, r3.w-fact4-self.picx*2 - (self.gap*2) - self.posx, r3.h, 1, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName),
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, pos-self.picx - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.clocks[clock_types]),
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, pos-self.picx*2 - self.gap - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.autotimericon)
))
else:
res.extend((
(eListboxPythonMultiContent.TYPE_TEXT, r3.x + fact4, r3.y, r3.w-fact4-self.picx - (self.gap*2) - self.posx, r3.h, 1, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName),
(eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, pos-self.picx - self.posx, (r3.h/2-self.posy), self.picx, self.picy, self.clocks[clock_types])
))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, r3.x + fact2, r3.y, r3.w-fact2, r3.h, 1, RT_HALIGN_LEFT|RT_VALIGN_CENTER, EventName))
return res
def getClockTypesEntry(self, service, eventId, beginTime, duration):
if not beginTime:
return None
rec = self.isInTimer(eventId, beginTime, duration, service)
if rec is not None:
return rec[1]
else:
return None
def isInTimer(self, eventid, begin, duration, service):
returnValue = None
type = 0
time_match = 0
bt = None
check_offset_time = not config.recording.margin_before.value and not config.recording.margin_after.value
end = begin + duration
refstr = ':'.join(service.split(':')[:11])
for x in self.timer.timer_list:
check = ':'.join(x.service_ref.ref.toString().split(':')[:11]) == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid:
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = self.epgcache.lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in list(range(num)):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
timer_end = x.end
timer_begin = x.begin
type_offset = 0
if not x.repeated and check_offset_time:
if 0 < end - timer_end <= 59:
timer_end = end
elif 0 < timer_begin - begin <= 59:
timer_begin = begin
if x.justplay:
type_offset = 5
if (timer_end - x.begin) <= 1:
timer_end += 60
if x.always_zap:
type_offset = 10
timer_repeat = x.repeated
# if set 'don't stop current event but disable coming events' for repeat timer
running_only_curevent = x.disabled and x.isRunning() and timer_repeat
if running_only_curevent:
timer_repeat = 0
type_offset += 15
if timer_repeat != 0:
type_offset += 15
if bt is None:
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(x.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = x.begin < begin or begin <= x.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = x.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - x.begin) / 60)
if xend < xbegin:
xend += 1440
if x.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
else:
if begin < timer_begin <= end:
if timer_end < end:
# recording within event
time_match = timer_end - timer_begin
type = type_offset + 3
else:
# recording last part of event
time_match = end - timer_begin
type = type_offset + 1
elif timer_begin <= begin <= timer_end:
if timer_end < end:
# recording first part of event
time_match = timer_end - begin
type = type_offset + 4
else:
# recording whole event
time_match = end - begin
type = type_offset + 2
if time_match:
if not config.plugins.Partnerbox.allicontype.value:
if type in (2, 7, 12, 17, 22, 27):
# When full recording do not look further
returnValue = (time_match, [type])
break
elif returnValue:
if type not in returnValue[1]:
returnValue[1].append(type)
else:
returnValue = (time_match, [type])
else:
if returnValue:
if type not in returnValue[1]:
returnValue[1].append(type)
else:
returnValue = (time_match, [type])
return returnValue
def isInRemoteTimer(self, begin, duration, service):
time_match = 0
chktime = None
chktimecmp = None
chktimecmp_end = None
end = begin + duration
service = getServiceRef(service)
service_str = ':'.join(str(service).split(':')[:11])
for x in partnerboxfunctions.remote_timer_list:
servicereference_str = ':'.join(str(x.servicereference).split(':')[:11])
if servicereference_str.upper() == service_str.upper():
if x.repeated != 0:
if chktime is None:
chktime = localtime(begin)
chktimecmp = chktime.tm_wday * 1440 + chktime.tm_hour * 60 + chktime.tm_min
chktimecmp_end = chktimecmp + (duration / 60)
time = localtime(x.timebegin)
for y in list(range(7)):
if x.repeated & (2 ** y):
timecmp = y * 1440 + time.tm_hour * 60 + time.tm_min
if timecmp <= chktimecmp < (timecmp + ((x.timeend - x.timebegin) / 60)):
time_match = ((timecmp + ((x.timeend - x.timebegin) / 60)) - chktimecmp) * 60
elif chktimecmp <= timecmp < chktimecmp_end:
time_match = (chktimecmp_end - timecmp) * 60
else:
if begin <= x.timebegin <= end:
diff = end - x.timebegin
if time_match < diff:
time_match = diff
elif x.timebegin <= begin <= x.timeend:
diff = x.timeend - begin
if time_match < diff:
time_match = diff
if time_match:
break
return time_match
def getRemoteClockPixmap(self, refstr, beginTime, duration, eventId):
pre_clock = 1
post_clock = 2
clock_type = 0
endTime = beginTime + duration
refstr = getServiceRef(refstr)
ref_str = ':'.join(str(refstr).split(':')[:11])
for x in partnerboxfunctions.remote_timer_list:
servicereference_str = ':'.join(str(x.servicereference).split(':')[:11])
if servicereference_str.upper() == ref_str.upper():
if x.eventId == eventId:
return self.remote_clock_pixmap
beg = x.timebegin
end = x.timeend
if beginTime > beg and beginTime < end and endTime > end:
clock_type |= pre_clock
elif beginTime < beg and endTime > beg and endTime < end:
clock_type |= post_clock
if clock_type == 0:
return self.remote_clock_add_pixmap
elif clock_type == pre_clock:
return self.remote_clock_pre_pixmap
elif clock_type == post_clock:
return self.remote_clock_post_pixmap
else:
return self.remote_clock_prepost_pixmap
def getRemoteClockZapPixmap(self, refstr, beginTime, duration, eventId):
type = 0
time_match = 0
justplay = 0
repeated = 0
endTime = beginTime + duration
ref_str = ':'.join(str(refstr).split(':')[:11])
for x in partnerboxfunctions.remote_timer_list:
servicereference_str = ':'.join(str(x.servicereference).split(':')[:11])
if servicereference_str.upper() == ref_str.upper():
justplay = x.justplay
repeated = x.repeated
beg = x.timebegin
end = x.timeend
if x.justplay:
if (end - beg) <= 1:
end += 60
if beginTime < beg <= endTime:
if end < endTime:
# recording within event
time_match = end - beg
type = 3
else:
# recording last part of event
time_match = endTime - beg
type = 1
elif beg <= beginTime <= end:
if end < endTime:
# recording first part of event
time_match = end - beginTime
type = 4
else:
# recording whole event
time_match = endTime - beginTime
type = 2
if time_match:
if type == 2:
if justplay:
if repeated != 0:
return self.remote_repzapclock_pixmap
else:
return self.remote_zapclock_pixmap
else:
if repeated != 0:
return self.remote_repclock_pixmap
else:
return self.remote_clock_pixmap
elif type == 3:
if justplay:
if repeated != 0:
return self.remote_repzapclock_prepost_pixmap
else:
return self.remote_zapclock_prepost_pixmap
else:
if repeated != 0:
return self.remote_repclock_prepost_pixmap
else:
return self.remote_clock_prepost_pixmap
elif type == 4:
if justplay:
if repeated != 0:
return self.remote_repzapclock_post_pixmap
else:
return self.remote_zapclock_post_pixmap
else:
if repeated != 0:
return self.remote_repclock_post_pixmap
else:
return self.remote_clock_post_pixmap
elif type == 1:
if justplay:
if repeated != 0:
return self.remote_repzapclock_pre_pixmap
else:
return self.remote_zapclock_pre_pixmap
else:
if repeated != 0:
return self.remote_repclock_pre_pixmap
else:
return self.remote_clock_pre_pixmap
if justplay:
if repeated != 0:
return self.remote_repzapclock_add_pixmap
else:
return self.remote_zapclock_add_pixmap
else:
if repeated != 0:
return self.remote_repclock_add_pixmap
else:
return self.remote_clock_add_pixmap
|
import logging
from typing import List, Literal, Optional
from pydantic import Field
from pydantic.class_validators import validator
from pydantic.types import NonNegativeInt
from hydrolib.core.io.ini.models import INIBasedModel, INIGeneral, INIModel
from hydrolib.core.io.ini.util import (
get_split_string_on_delimiter_validator,
make_list_length_root_validator,
make_list_validator,
)
logger = logging.getLogger(__name__)
class OneDFieldGeneral(INIGeneral):
"""The 1D field file's `[General]` section with file meta data."""
class Comments(INIBasedModel.Comments):
fileversion: Optional[str] = Field(
"File version. Do not edit this.", alias="fileVersion"
)
filetype: Optional[str] = Field(
"File type. Should be '1dField'. Do not edit this.",
alias="fileType",
)
comments: Comments = Comments()
_header: Literal["General"] = "General"
fileversion: str = Field("2.00", alias="fileVersion")
filetype: Literal["1dField"] = Field("1dField", alias="fileType")
class OneDFieldGlobal(INIBasedModel):
"""The `[Global]` block with a uniform value for use inside a 1D field file."""
class Comments(INIBasedModel.Comments):
quantity: Optional[str] = Field("The name of the quantity", alias="quantity")
unit: Optional[str] = Field("The unit of the quantity", alias="unit")
value: Optional[str] = Field(
"The global default value for this quantity", alias="value"
)
comments: Comments = Comments()
_header: Literal["Global"] = "Global"
quantity: str = Field(alias="quantity")
unit: str = Field(alias="unit")
value: float = Field(alias="value")
class OneDFieldBranch(INIBasedModel):
"""
A `[Branch]` block for use inside a 1D field file.
Each block can define value(s) on a particular branch.
"""
class Comments(INIBasedModel.Comments):
branchid: Optional[str] = Field("The name of the branch", alias="branchId")
numlocations: Optional[str] = Field(
"Number of locations on branch. The default 0 value implies branch uniform values.",
alias="numLocations",
)
chainage: Optional[str] = Field(
"Space separated list of locations on the branch (m). Locations sorted by increasing chainage. The keyword must be specified if numLocations >0.",
alias="chainage",
)
values: Optional[str] = Field(
"Space separated list of numLocations values; one for each chainage specified. One value required if numLocations =0",
alias="values",
)
comments: Comments = Comments()
_header: Literal["Branch"] = "Branch"
branchid: str = Field(alias="branchId")
numlocations: Optional[NonNegativeInt] = Field(0, alias="numLocations")
chainage: Optional[List[float]] = Field(alias="chainage")
values: List[float] = Field(alias="values")
_split_to_list = get_split_string_on_delimiter_validator(
"chainage",
"values",
delimiter=" ",
)
_check_list_length_values = make_list_length_root_validator(
"chainage",
length_name="numlocations",
list_required_with_length=True,
)
_check_list_length_chainage = make_list_length_root_validator(
"values",
length_name="numlocations",
list_required_with_length=True,
min_length=1,
)
def _get_identifier(self, data: dict) -> Optional[str]:
return data.get("branchid")
class OneDFieldModel(INIModel):
"""
The overall 1D field model that contains the contents of a 1D field file.
This model is typically used when a [FMModel][hydrolib.core.io.mdu.models.FMModel]`.geometry.inifieldfile[..].initial[..].datafiletype==DataFileType.onedfield`.
Attributes:
general (OneDFieldGeneral): `[General]` block with file metadata.
global_ (Optional[OneDFieldGlobal]): Optional `[Global]` block with uniform value.
branch (List[OneDFieldBranch]): Definitions of `[Branch]` field values.
"""
general: OneDFieldGeneral = OneDFieldGeneral()
global_: Optional[OneDFieldGlobal] = Field(
alias="global"
) # to circumvent built-in kw
branch: List[OneDFieldBranch] = []
_split_to_list = make_list_validator(
"branch",
)
@classmethod
def _ext(cls) -> str:
return ".ini"
@classmethod
def _filename(cls) -> str:
return "1dfield"
|
import mlflow
import click
import logging
import pandas as pd
import time
import numpy as np
import torch
import pytorch_lightning as pl
from pathlib import Path
@click.command(help="Make predictions")
@click.option("--model_name", default='mnist_cnn', type=str)
@click.option("--model_version", default=1, type=int)
@click.option("--model_stage", default=None, type=str)
@click.option("--x_inference_path", help="New data",
default='./mnist_sample/test_images.npy', type=str)
def inference(model_name, model_version, model_stage, x_inference_path):
with mlflow.start_run(run_name='inference_batch') as mlrun:
img_rows, img_cols = 28, 28
x_test = np.load(x_inference_path)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols)
if model_stage:
model = mlflow.pytorch.load_model(
model_uri=f"models:/{model_name}/{model_stage}"
)
print(f"Loading model: {model_name}:{model_stage}")
else:
model = mlflow.pytorch.load_model(
model_uri=f"models:/{model_name}/{model_version}"
)
print(f"Loading model: {model_name}:{model_version}")
predictions = predict(model, x_test)
d_preds = {"predictions": predictions}
df_preds = pd.DataFrame(d_preds)
df_preds.to_csv("y_inference.csv", index=False)
with open('x_inference.npy', 'wb') as f:
np.save(f, x_test)
with open('y_inference.npy', 'wb') as f:
np.save(f, predictions)
mlflow.log_param(key='n_predictions', value=len(df_preds))
print(df_preds.head(10))
mlflow.log_artifact('x_inference.npy')
mlflow.log_artifact('y_inference.npy')
mlflow.log_artifact('y_inference.csv')
def predict(model, x_test):
x_test_tensor = torch.Tensor(x_test)
logits = model(x_test_tensor)
preds = torch.argmax(logits, dim=1)
return preds.cpu().detach().numpy()
if __name__ == '__main__':
pl.seed_everything(42)
inference()
|
'''
Product Grad_Cam Heatmap
Paper https://arxiv.org/abs/1610.02391
Copyright (c) Xiangzi Dai, 2020
'''
import os
import cv2
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models
import sys
from model import *
from Resnet import *
from matplotlib import pyplot as plt
from matplotlib.font_manager import *
myfont = FontProperties(fname='/usr/share/fonts/opentype/noto/NotoSansCJK-Medium.ttc')
plt.rcParams['axes.unicode_minus']=False
def get_last_conv(m):
"""
Get the last conv layer in an Module.
"""
convs = filter(lambda k: isinstance(k, torch.nn.Conv2d), m.modules())
# print('convs:', convs)
# print('list(convs)[-1]:', list(convs)[-1])
return list(convs)[-1]
class Grad_Cam:
def __init__(self, model, target_layer_names, use_cuda):
self.model = model
self.target = target_layer_names
self.use_cuda = use_cuda
self.grad_val = []
self.feature = [] # feature dim is same as grad_val
self.hook = []
self.img = []
self.inputs = None
self._register_hook()
def get_grad(self, module, input, output):
self.grad_val.append(output[0].detach())
def get_feature(self, module, input, output):
self.feature.append(output.detach())
def _register_hook(self):
for i in self.target:
self.hook.append(i.register_forward_hook(self.get_feature))
self.hook.append(i.register_backward_hook(self.get_grad))
def _normalize(self, cam, img, img_path, pred_str):
h, w, c = self.inputs.shape
# h, w, c = img.shape
cam = (cam - np.min(cam)) / np.max(cam)
cam = cv2.resize(cam, (w, h))
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(self.inputs)
# cam = heatmap + np.float32(img) / 255
# plt.imshow(cam)
# plt.show()
cam = cam / np.max(cam)
cam = np.uint8(255 * cam)
# 在图像上添加预测标签
text = 'predict:' + pred_str
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(cam, text, (50, 50), font, 1.0, (255, 255, 255), 2)
# 左CAM,右原图
hmerge = np.hstack((cam, np.float32(self.inputs)*255))
# return np.uint8(255 * cam)
return hmerge
def remove_hook(self):
for i in self.hook:
i.remove()
def _preprocess_image(self, img):
# means = [0.485, 0.456, 0.406]
# stds = [0.229, 0.224, 0.225]
means = [0.5, 0.5, 0.5]
stds = [0.5, 0.5, 0.5]
preprocessed_img = img.copy()[:, :, ::-1]
for i in range(3):
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
preprocessed_img = \
np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
preprocessed_img = torch.from_numpy(preprocessed_img)
preprocessed_img.unsqueeze_(0)
input = preprocessed_img.requires_grad_(True)
return input
def __call__(self, img, idx=None):
"""
:param inputs: [w,h,c]
:param idx: class id
:return: grad_cam img list
"""
self.model.zero_grad()
# self.inputs = np.float32(cv2.resize(img, (224, 224))) / 255
# self.inputs = np.float32(cv2.resize(img, (448, 448))) / 255
self.inputs = np.float32(img) / 255
inputs = self._preprocess_image(self.inputs)
if self.use_cuda:
inputs = inputs.cuda()
self.model = self.model.cuda()
# inputs = Varible(inputs)
# output = self.model(inputs)
output_1, output_2, output_3, output_concat = self.model(inputs)
outputs_com = output_1 + output_2 + output_3 + output_concat
# _, predicted = torch.max(output_concat.data, 1)
# _, predicted_com = torch.max(outputs_com.data, 1)
if idx is None:
idx = np.argmax(outputs_com.detach().cpu().numpy()) # predict id
# idx = np.argmax(output.cpu().numpy()) # predict id
target = outputs_com[0][idx]
# 显示预测标签
class_names = {'bg': 0, 'ti-1': 1, 'ti-13': 4, 'ti-5': 2, 'ti-9': 3}
pred_str = list(class_names.keys())[list(class_names.values()).index(int(idx))]
print("index:", pred_str)
target.backward()
# predicted_com.backward()
# computer
weights = []
for i in self.grad_val[::-1]: # i dim: [1,512,7,7]
weights.append(np.mean(i.squeeze().cpu().numpy(), axis=(1, 2)))
for index, j in enumerate(self.feature): # j dim:[1,512,7,7]
cam = (j.squeeze().cpu().numpy() * weights[index][:, np.newaxis, np.newaxis]).sum(axis=0)
cam = np.maximum(cam, 0) # relu
self.img.append(self._normalize(cam, img, img_path, pred_str))
return self.img
def addimg(imgs, size, layout):
"""
多图可视化
"""
# imgs为需要展示的图片数组
# size为展示时每张图片resize的大小
# layout为展示图片的布局例如(3,3)代表3行3列)
w = layout[0]
h = layout[1]
x = imgs[0].shape[2]
if w * h - len(imgs) > 0:
null_img = np.zeros((size[0], size[1], x), dtype='uint8')
# 注意这里的dtype需要声明为'uint8',否则和图片矩阵拼接时会导致图片的矩阵失真
null_img = null_img * 255
# null_img用来填充当图片数量不足时,布局上缺少的部分
for i in range(len(imgs)):
# 和同学交流的过程中发现如果出现有的图片通道不足的时候,会出现合并问题
# 思考了一下,使用下面这段代码将灰度图片等通道数不足的图片补充成3个通道就ok
if len(imgs[i].shape) < 3:
imgs[i] = np.expand_dims(imgs[i], axis=2)
imgs[i] = np.concatenate((imgs[i], imgs[i], imgs[i]), axis=-1)
imgs[i] = cv2.resize(imgs[i], size)
for j in range(h):
for k in range(w):
if j * w + k > len(imgs) - 1:
f = k
while f < w:
if f == 0:
imgw = null_img
else:
imgw = np.hstack((imgw, null_img))
f = f + 1
break
if k == 0:
imgw = imgs[j * w]
else:
imgw = np.hstack((imgw, imgs[j * w + k]))
print(j * w + k)
if j == 0:
imgh = imgw
else:
imgh = np.vstack((imgh, imgw))
return imgh
if __name__ == '__main__':
# model_path = sys.argv[1]
# img_path = sys.argv[2]
# BASE_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = '/media/hz/A2/visual_result'
model_path = '/media/hz/A12/1-1/fish_5_model.pth'
# img_path = 'E:/ubuntu20210117/A1/A1-dataset/mushroom_group/280_Mycena_galericulata_072657/32.jpg'
# img_path = 'E:/ubuntu20210117/1-1/1-10_datasets/mushroom599/mushroom_split/train/class_125'
# img_path = 'E:/ubuntu20210117/1-1/1-10_datasets/mushroom599/test/10'
img_path = '/media/hz/A2/A2-datasets/_split/valid/鳀-1'
output_dir = os.path.join(BASE_DIR, '鳀-1')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
use_cuda = torch.cuda.is_available()
# load model
# checkpoint = torch.load(model_path)
# model = models.resnet50(pretrained=False, num_classes=2)
# model.load_state_dict(checkpoint['state_dict'])
# model.eval()
# checkpoint = torch.load(model_path)
# model = resnet50()
# model = PMG(model, 512, 598)
# model.load_state_dict(checkpoint['model_state_dict'])
# print('loading model...')
# model.eval()
print('loading model...')
model = torch.load(model_path)
model.eval()
count = 1
out_img = []
for pathi in os.listdir(img_path):
pathi = os.path.join(img_path, pathi)
img = cv2.imread(pathi, 1) # 加载彩色图片,这个是默认参数,可以直接写1
# todo 处理再传入model
img = cv2.resize(img, (550, 550))
# cv2.imwrite('r_0.jpg', img)
img = img[51:51 + 448, 51:51 + 448]
# cv2.imwrite('r.jpg',img)
m = get_last_conv(model)
target_layer = [m]
Grad_cams = Grad_Cam(model, target_layer, use_cuda)
grad_cam_list = Grad_cams(img)
img_name = os.path.join(output_dir, 'out' + str(count) + '.jpg')
out_img.append(str(img_name))
count += 1
# target_layer corresponding grad_cam_list
cv2.imwrite(img_name, grad_cam_list[0])
# CV2可视化
files = os.listdir(output_dir)
imgs = []
for file in files:
imgs.append(cv2.imread(os.path.join(output_dir + '/' + str(file))))
imgout = addimg(imgs, (448, 224), (5, 20))
# 拼接为大图后另存为
# cv2.imshow('out', imgout)
cv2.waitKey()
output_img = os.path.join(output_dir + '/' + 'output_img.jpg')
cv2.imwrite(output_img, imgout)
# python test_grad_cam.py D:/1-2-code/Grad_Cam-pytorch-resnet50\best_model.pth E:\ubuntu20210117\1-1\1-10_datasets\kagglecatsanddogs\PetImages\Cat\166.jpg
|
from rlpyt.utils.collections import namedarraytuple
AgentInfo = namedarraytuple("AgentInfo", ["dist_info", "value"])
AgentInfoRnn = namedarraytuple("AgentInfoRnn", ["dist_info", "value", "prev_rnn_state"])
|
from models.modeling.deeplab import DeepLab as DeepLabV3Plus
from models import CAC
|
"""module for strict modifier."""
from .modifier import Modifier
from ..fdef import FDef, Strictness
class StrictModifier(Modifier):
"""A strict modifier marks object to disallow undefined keys."""
def define(self, fdef: FDef) -> None:
fdef._strictness = Strictness.STRICT
|
from django.urls import path
from api.views import category_views as views
urlpatterns = [
path('', views.getCategories, name="categories"),
path('create/', views.createCategory, name="category-create"),
path('<str:category_id>/', views.getCategory, name="category"),
path('update/<str:category_id>/', views.updateCategory, name="category-update"),
path('archive/<str:category_id>/', views.archiveCategory, name="category-archive"),
] |
import unittest
from pymote.algorithms.niculescu2003.trilaterate import Trilaterate
from pymote.simulation import Simulation
from pymote.sensor import TruePosSensor
from pymote.networkgenerator import NetworkGenerator
from pymote.algorithms.niculescu2003.dvhop import DVHop
class TestNiculescu2003(unittest.TestCase):
def setUp(self):
net_gen = NetworkGenerator(100)
self.net = net_gen.generate_random_network()
self.net.algorithms = ((DVHop, {'truePositionKey': 'tp',
'hopsizeKey': 'hs',
'dataKey': 'I'
}),
(Trilaterate, {'truePositionKey': 'tp',
'hopsizeKey': 'hs',
'positionKey': 'pos',
'dataKey': 'I'}),
)
for node in self.net.nodes()[:10]:
node.compositeSensor = (TruePosSensor,)
def test_niculescu2003_sim(self):
"""Test niculescu2003 default simulation."""
sim = Simulation(self.net)
sim.run()
for node in self.net.nodes():
self.assertTrue(len(node.memory.get('pos', [None, None])) == 2\
or 'tp' in node.memory)
|
import unittest
import sympy
from means.util.sympyhelpers import to_sympy_matrix, assert_sympy_expressions_equal, sympy_expressions_equal
from means.util.sympyhelpers import substitute_all
class TestSympyHelpers(unittest.TestCase):
def test_substitute_all_on_matrix(self):
to_substitute = to_sympy_matrix(["a*b","c*d","d*e","e*f"])
pairs = zip(to_sympy_matrix(["a","d","e","c","b"]),
to_sympy_matrix(["z","w","v","x","y"]))
expected = sympy.Matrix(["z*y","x*w","w*v","v*f"])
answer = substitute_all(to_substitute, pairs)
self.assertEqual(answer, expected)
def test_substitute_all_on_expression(self):
to_substitute = sympy.sympify("a*b + c*d + d*e + e*f")
pairs = zip(to_sympy_matrix(["a","d","e","c","b"]),
to_sympy_matrix(["z","w","v","x","y"]))
expected = sympy.sympify("z*y + x*w + w*v + v*f")
answer = substitute_all(to_substitute, pairs)
self.assertEqual(answer, expected)
class TestSympyExpressionsEqual(unittest.TestCase):
def test_matrices(self):
"""
Given two equal matrices, `sympy_expressions_equal` should correctly identify them as equivalent.
Given two different matrices, it should correctly call them different
"""
m1 = sympy.Matrix([[sympy.sympify('x'), 1, 2], [3, 4, 5]])
m2 = sympy.Matrix([[sympy.sympify('x'), 1, 2], [sympy.sympify('3 + x - x'), 4, 5]])
m3 = sympy.Matrix([[sympy.sympify('x'), 1, 2], [sympy.sympify('3 + x - y'), 4, 5]])
self.assertTrue(sympy_expressions_equal(m1, m2))
self.assertFalse(sympy_expressions_equal(m1, m3))
self.assertFalse(sympy_expressions_equal(m2, m3))
def test_expressions(self):
"""
Given two equivalent expressions, `sympy_expresions_equal` should correctly call them equal.
Given two different expressions, the function should say they are not equal.
"""
e1 = sympy.sympify('x+y+z')
e2 = sympy.sympify('0.5 * (2*x + 2*y + 2*z)')
e3 = sympy.sympify('x+y+z - y')
self.assertTrue(sympy_expressions_equal(e1, e2))
self.assertFalse(sympy_expressions_equal(e1, e3))
self.assertFalse(sympy_expressions_equal(e2, e3))
class TestToSympyMatrix(unittest.TestCase):
def test_creation_from_matrix_returns_itself(self):
"""
Given a `sympy.Matrix`, `to_sympy_matrix` should return the said matrix.
"""
m = sympy.Matrix([[1, 2, 3], [4, 5, 6]])
assert_sympy_expressions_equal(m, to_sympy_matrix(m))
def test_creation_from_list_of_integers_returns_matrix(self):
"""
Given a list of integers, to_sympy_matrix should be able to convert it to a matrix of these integers
:return:
"""
m = sympy.Matrix([[1, 2, 3], [4, 5, 6]])
m_as_list = [[1, 2, 3], [4, 5, 6]]
assert_sympy_expressions_equal(m, to_sympy_matrix(m_as_list))
def test_creation_from_list_of_strings_returns_matrix(self):
"""
Given a list of strings, to_sympy_matrix should be able to convert them into a matrix of expressions.
"""
m = sympy.Matrix([[sympy.sympify('x+y+3'), sympy.sympify('x+3')],
[sympy.sympify('y-x'), sympy.sympify('x+y+166')]])
m_as_string = [['x+y+3', 'x+3'], ['y-x', 'x+y+166']]
matrix = to_sympy_matrix(m_as_string)
assert_sympy_expressions_equal(m, matrix)
def test_creation_of_column_matrix_from_list_of_strings(self):
"""
Given a list of strings, to_sympy_matrix should be able to convert them into a column matrix of expresions
"""
m = sympy.Matrix([sympy.sympify('x+y+3'), sympy.sympify('x+3'), sympy.sympify('y-x'),
sympy.sympify('x+y+166')])
m_as_string = ['x+y+3', 'x+3', 'y-x', 'x+y+166']
matrix = to_sympy_matrix(m_as_string)
assert_sympy_expressions_equal(m, matrix)
|
from dataclasses import dataclass
from datetime import date, datetime
from enum import Enum
from typing import List, Optional
from uuid import UUID
from jsonmarshal import json_field
class EntityType(Enum):
INDIVIDUAL = "INDIVIDUAL"
COMPANY = "COMPANY"
class CountryOfIncorperation(Enum):
AFG = "AFG"
GBR = "GBR"
JPN = "JPN"
USA = "USA"
@dataclass
class PreviousName:
start: date
end: date
name: str
@dataclass
class Metadata:
country_of_incorporation: CountryOfIncorperation
name: str
number: str
previous_names: Optional[List[PreviousName]] = json_field(omitempty=True)
class ProfileCategory(Enum):
CUSTOMER = "CUSTOMER"
PREVIOUS_CUSTOMER = "PREVIOUS_CUSTOMER"
APPLICANT = "APPLICANT"
INACTIVE_APPLICANT = "INACTIVE_APPLICANT"
class Role(Enum):
INDIVIDUAL_CUSTOMER = "INDIVIDUAL_CUSTOMER"
INDIVIDUAL_ASSOCIATED = "INDIVIDUAL_ASSOCIATED"
COMPANY_CUSTOMER = "COMPANY_CUSTOMER"
COMPANY_ASSOCIATED = "COMPANY_ASSOCIATED"
@dataclass
class Tag:
id: UUID
is_automatic: bool
name: str
@dataclass
class FullName:
family_name: str
given_names: List[str]
title: Optional[str] = json_field(omitempty=True)
alt_family_names: Optional[List[str]] = json_field(omitempty=True)
@dataclass
class PersonalDetails:
name: FullName
@dataclass
class ProfileCollectedData:
entity_type: EntityType
personal_details: PersonalDetails
class TaskState(Enum):
INCOMPLETE = "INCOMPLETE"
COMPLETED_PASS = "COMPLETED_PASS"
COMPLETED_FAIL = "COMPLETED_FAIL"
class TaskType(Enum):
INDIVIDUAL_VERIFY_IDENTITY = "INDIVIDUAL_VERIFY_IDENTITY"
INDIVIDUAL_VERIFY_ADDRESS = "INDIVIDUAL_VERIFY_ADDRESS"
INDIVIDUAL_VERIFY_SOURCE_OF_FUNDS = "INDIVIDUAL_VERIFY_SOURCE_OF_FUNDS"
INDIVIDUAL_ASSESS_MEDIA_AND_POLITICAL_AND_SANCTIONS_EXPOSURE = (
"INDIVIDUAL_ASSESS_MEDIA_AND_POLITICAL_AND_SANCTIONS_EXPOSURE"
)
INDIVIDUAL_ASSESS_POLITICAL_AND_SANCTIONS_EXPOSURE = "INDIVIDUAL_ASSESS_POLITICAL_AND_SANCTIONS_EXPOSURE"
INDIVIDUAL_ASSESS_POLITICAL_EXPOSURE = "INDIVIDUAL_ASSESS_POLITICAL_EXPOSURE"
INDIVIDUAL_ASSESS_SANCTIONS_EXPOSURE = "INDIVIDUAL_ASSESS_SANCTIONS_EXPOSURE"
INDIVIDUAL_VERIFY_BANK_ACCOUNT = "INDIVIDUAL_VERIFY_BANK_ACCOUNT"
INDIVIDUAL_VERIFY_IMMIGRATION_STATUS = "INDIVIDUAL_VERIFY_IMMIGRATION_STATUS"
INDIVIDUAL_MANUAL_TASK = "INDIVIDUAL_MANUAL_TASK"
INDIVIDUAL_ASSESS_DEVICE_REPUTATION = "INDIVIDUAL_ASSESS_DEVICE_REPUTATION"
INDIVIDUAL_FRAUD_SCREENING = "INDIVIDUAL_FRAUD_SCREENING"
INDIVIDUAL_VERIFY_TAX_STATUS = "INDIVIDUAL_VERIFY_TAX_STATUS"
COMPANY_VERIFY_IDENTITY = "COMPANY_VERIFY_IDENTITY"
COMPANY_VERIFY_ADDRESS = "COMPANY_VERIFY_ADDRESS"
COMPANY_VERIFY_CHARITY = "COMPANY_VERIFY_CHARITY"
COMPANY_IDENTIFY_AUTHORIZED_PERSONS = "COMPANY_IDENTIFY_AUTHORIZED_PERSONS"
COMPANY_IDENTIFY_OFFICERS = "COMPANY_IDENTIFY_OFFICERS"
COMPANY_IDENTIFY_TRUSTEES = "COMPANY_IDENTIFY_TRUSTEES"
COMPANY_IDENTIFY_BENEFICIAL_OWNERS = "COMPANY_IDENTIFY_BENEFICIAL_OWNERS"
COMPANY_REVIEW_FILINGS = "COMPANY_REVIEW_FILINGS"
COMPANY_ASSESS_SANCTIONS_EXPOSURE = "COMPANY_ASSESS_SANCTIONS_EXPOSURE"
COMPANY_ASSESS_MEDIA_EXPOSURE = "COMPANY_ASSESS_MEDIA_EXPOSURE"
COMPANY_ASSESS_MEDIA_AND_SANCTIONS_EXPOSURE = "COMPANY_ASSESS_MEDIA_AND_SANCTIONS_EXPOSURE"
COMPANY_MANUAL_TASK = "COMPANY_MANUAL_TASK"
COMPANY_VERIFY_BANK_ACCOUNT = "COMPANY_VERIFY_BANK_ACCOUNT"
COMPANY_VERIFY_TAX_STATUS = "COMPANY_VERIFY_TAX_STATUS"
COMPANY_ASSESS_FINANCIALS = "COMPANY_ASSESS_FINANCIALS"
COMPANY_FRAUD_SCREENING = "COMPANY_FRAUD_SCREENING"
COMPANY_MERCHANT_FRAUD_SCREENING = "COMPANY_MERCHANT_FRAUD_SCREENING"
@dataclass
class TaskVariant:
id: UUID
task_type: TaskType
alias: str
description: Optional[str] = json_field(omitempty=True)
name: Optional[str] = json_field(omitempty=True)
@dataclass
class Task:
creation_date: datetime
id: UUID
is_complete: bool
is_expired: bool
is_skipped: bool
state: TaskState
type: TaskType
variant: TaskVariant
class UnresolvedEventType(Enum):
PEP_FLAG = "PEP_FLAG"
SANCTION_FLAG = "SANCTION_FLAG"
DOCUMENT_EXPIRY = "DOCUMENT_EXPIRY"
TRANSACTION_ALERT = "TRANSACTION_ALERT"
REVIEW_NEEDED = "REVIEW_NEEDED"
ADVERSE_MEDIA_FLAG = "ADVERSE_MEDIA_FLAG"
REFER_FLAG = "REFER_FLAG"
CHECK_EXPIRY = "CHECK_EXPIRY"
FRAUD_FLAG = "FRAUD_FLAG"
@dataclass
class TaskProgress:
completed_count: int
total_count: int
class Status(Enum):
NORMAL = "NORMAL"
@dataclass
class LinkedProfile:
category: ProfileCategory
collected_data: ProfileCollectedData
creation_date: datetime
display_name: str
has_associates: bool
has_collection_steps: bool
id: str
role: Role
status: Status
tags: List[Tag]
task_progress: TaskProgress
tasks: List[Task]
unresolved_event_types: List[UnresolvedEventType]
@dataclass
class Shareholding:
amount: int
currency: str
percentage: float
provider_name: str
share_class: str
@dataclass
class Officer:
entity_type: EntityType
linked_profile: LinkedProfile
merged_resolver_ids: List[UUID] = json_field(omitempty=True)
resolver_id: UUID = json_field(omitempty=True)
task_variant_ids: List[UUID] = json_field(omitempty=True)
unverified_task_variant_ids: List[UUID] = json_field(omitempty=True)
natures_of_control: Optional[List[str]] = json_field(omitempty=True)
shareholdings: Optional[List[Shareholding]] = json_field(omitempty=True)
@dataclass
class Officers:
directors: Optional[List[Officer]] = json_field(omitempty=True)
other: Optional[List[Officer]] = json_field(omitempty=True)
partners: Optional[List[Officer]] = json_field(omitempty=True)
resigned: Optional[List[Officer]] = json_field(omitempty=True)
secretaries: Optional[List[Officer]] = json_field(omitempty=True)
trustees: Optional[List[Officer]] = json_field(omitempty=True)
@dataclass
class ShareClass:
name: str
currency: str
value: int
votes: int
@dataclass
class OwnershipStructure:
beneficial_owners: List[Officer]
share_classes: Optional[List[ShareClass]] = json_field(omitempty=True)
@dataclass
class Schema:
entity_type: EntityType
metadata: Metadata
officers: Officers
ownership_structure: OwnershipStructure
|
import sys
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
from tiva import TivaController
Tiva = TivaController(units_per_cm=1, arm1_cm=45, arm2_cm=20,
x_offset_cm=33, y_offset_cm=-10, bufsize=8)
n_steps = 50
plot = 'point'
clear = True
if plot == 'random':
x = np.random.randint(10, 60, n_steps)
y = np.random.randint(20, 50, n_steps)
elif plot == 'point':
x = [5]
y = [20]
elif plot == 'line':
x = np.linspace(0, 15, n_steps/2)
y = np.linspace(5, 20, n_steps)
elif plot == 'circle':
r = 15
u = np.linspace(0, np.pi, num=n_steps)
x = r * np.cos(u) + 30
y = r * np.sin(u) + 30
elif plot == 'weird':
t = np.linspace(15, 50, num=n_steps)
x = (t + 2 * np.sin(2 * t)) * 2
y = 12 * np.sin(t) + 30
fig, ax = plt.subplots()
for idx, point in enumerate(zip(x, y)):
sel = np.random.random_integers(0, 1)
# move the arm
Tiva.move_arm(point[0], point[1], negative=True)
sys.stdout.write("Computing step {0}/{1}\r".format(idx, n_steps))
sys.stdout.flush()
# optionally clear the plot of past positions
if clear:
plt.cla()
x1, y1 = [Tiva.x_offset, Tiva.x1], [Tiva.y_offset, Tiva.y1]
x2, y2 = [Tiva.x1, Tiva.x2], [Tiva.y1, Tiva.y2]
ax.plot(x1, y1, marker = 'o', color='b')
ax.plot(x2, y2, marker = 'o', color='b')
ax.plot(point[0], point[1], marker = 'o', color='r')
plt.gca().set_aspect('equal', adjustable='box')
ax.set_xlim(0, 66)
ax.set_ylim(0, 134)
ax.grid('on')
plt.savefig("img/{}.png".format(idx))
print(Tiva.q1)
#Tiva.send([Tiva.q1, Tiva.q2]) # send signal to the Tiva
#print(Tiva.receive())
#sleep(1)
|
import json
path = input("Path of the file (add .json): ")
print("Opening the json file")
config = json.load(open(path))
var_import = """from flask import Flask, jsonify
app = Flask(__name__)
"""
list_func = []
print("Creating the Flask APIs template")
for elem in config['api']:
parameters = []
for k,v in elem['func_input_parameters'].items():
parameters.append(f'{k}={v}')
var_func = f"""@app.route('{elem['route']}', methods = {elem['method']})
def {elem['func_name']}({','.join(parameters)}):
return jsonify(200)
"""
list_func.append(var_func)
var_exec = f"""if __name__ == '__main__':
app.run(host='{config['address']}', port={config['port']})
"""
print("Writing on disk")
with open(config['file']+'.py', 'w') as f:
f.write(var_import + '\n')
for x in list_func:
f.write(x + '\n')
f.write(var_exec + '\n')
f.close() |
import torch
from ..ops import sqrtm
from ..rand import definite
from unittest import TestCase
from torch.autograd import gradcheck
class MatrixSquareRootTest(TestCase):
'''Test sqrtm and MatrixSquareRoot.'''
seed = None
dims = 20
sigma = 10
dtype = torch.float64
device = torch.device('cpu')
def setUp(self):
cls = type(self)
if cls.seed is not None:
torch.manual_seed(cls.seed)
self.x = definite(cls.dims, norm=cls.sigma ** 2,
dtype=cls.dtype, device=cls.device)
def tearDown(self):
del self.x
def test_forward(self):
s = sqrtm(self.x)
y = s.mm(s)
self.assertTrue(
torch.allclose(y, self.x),
((self.x - y).norm() / self.x.norm()).item()
)
def test_backward(self):
msg = ''
try:
x = torch.autograd.Variable(self.x, requires_grad=True)
gradcheck(sqrtm, (x,), rtol=1e-2, atol=1 / type(self).sigma)
except RuntimeError as exc:
msg = str(exc)
if msg != '':
self.fail(msg)
|
from flask import Flask,request
import jwt
import os
app = Flask(__name__)
key ="this is a secret key for encryption"
@app.route('/')
def index():
return("<b>Welcome, send post request <a href='/login'>here</a> to check api")
@app.route("/login/", methods=["POST","GET"])
def login():
email ="dummy@email"
password= "dummy password"
if request.method == "POST":
if request.form['email']:
email = request.form['email']
if request.form['password']:
password = request.form['password']
else:
if request.args.get('email'):
email = request.args.get('email')
if request.args.get('password'):
password = request.args.get('password')
auth = jwt.encode({"email":email,"password":password},key, algorithm="HS256")
value = auth.decode('utf-8')
return("<p>Authentication code is <br><b>{}</b><br>email:{}<br>password:{}</p> see it in verify as <a href='../verify?code={}'>here</a>".format(value,email,password,value))
@app.route('/verify')
def verify():
if not request.args.get('code'):
return("<b> pass token as code</b>")
auth = jwt.decode(request.args.get('code'),key)
return(auth)
if __name__ == "__main__":
port = int(os.environ.get('PORT',5000))
app.run(port=port,debug=True) |
from twitter import search_tweets
from slack import read_last_id, post_message
from const import post_message_template, result_header_template, query_strings
from secret_info import slack_channel_id, slack_direct_message_id
from jinja2 import Template
def post_tweets_to_slack(tweets):
"""
Search tweets and post them to a Slack channel
:param Status tweets: Status object (tweet) to post to Slack
:return: the id of the latest tweet among the tweets
:rtype: int
"""
max_id = 0
for tw in tweets:
tw_template = Template(post_message_template)
post_text = tw_template.render(tweet=tw)
post_message(post_text, slack_channel_id)
# Update max_id when the tweet has larger ID than current one.
if tw.id > max_id:
max_id = tw.id
return max_id
def main():
since_id_str = read_last_id()
# Initialize since_id
since_id = 1
if since_id_str.isdigit():
since_id = int(since_id_str)
# Initialize largest_id which will be new since_id
largest_id = since_id
for q in query_strings:
tweets = search_tweets(q, since_id)
if len(tweets) > 0:
# Post a header
header_template = Template(result_header_template)
header_text = header_template.render(
query=q, num_tweets=len(tweets)
)
post_message(header_text, slack_channel_id)
max_id = post_tweets_to_slack(tweets)
# Update largest_id when one of tweets has larger ID than the
# current.
if max_id > largest_id:
largest_id = max_id
post_message(largest_id, slack_direct_message_id)
if __name__ == '__main__':
main()
|
from dataclasses import dataclass
from typing import Dict, List, Optional
from .result_type import ResultType
@dataclass(frozen=True)
class Queries:
value: float
captured_queries: List[Dict[str, str]]
diff: Optional[float] = None
def from_dict(d):
return Queries(value=d["value"], diff=None, captured_queries=[])
def pretty_diff(self) -> str:
if self.diff is None:
return "new"
elif self.diff == 0:
return "no change"
return f"{self.diff:+.1f}"
def result_type(self) -> ResultType:
if self.diff is None:
return ResultType.NO_PREVIOUS_DATA
elif self.diff == 0:
return ResultType.UNCHANGED
elif self.diff > 0:
return ResultType.REGRESSION
return ResultType.IMPROVEMENT
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from .configs.settings import DEBUG, MEDIA_ROOT, MEDIA_URL
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('djangosige.apps.base.urls')),
url(r'^login/', include('djangosige.apps.login.urls')),
url(r'^cadastro/', include('djangosige.apps.cadastro.urls')),
url(r'^fiscal/', include('djangosige.apps.fiscal.urls')),
url(r'^vendas/', include('djangosige.apps.vendas.urls')),
url(r'^compras/', include('djangosige.apps.compras.urls')),
url(r'^financeiro/', include('djangosige.apps.financeiro.urls')),
url(r'^estoque/', include('djangosige.apps.estoque.urls')),
url(r'^rh/', include('djangosige.apps.rh.urls')),
]
if DEBUG is True:
urlpatterns += static(MEDIA_URL, document_root=MEDIA_ROOT)
|
import os
def fileids(n):
"""
Function to edit test.fileids.
"""
os.system("truncate -s 0 test.fileids")
f = open("./test.fileids", "w+")
for i in range(1, (int(n) + 1)):
f.write("test" + str(i) + "\n")
f.close()
def transcription(n):
"""
Function to edit test.transcription.
"""
os.system("truncate -s 0 test.transcription")
t = open("./test.transcription", "w+")
for i in range(1, (int(n) + 1)):
val = input("Enter the transcription:\n")
type(val)
t.write(str(val) + " (test" + str(i) + ")" + "\n")
t.close()
|
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
from eva.catalog.catalog_manager import CatalogManager
from eva.models.storage.batch import Batch
from eva.readers.opencv_reader import OpenCVReader
from eva.server.command_handler import execute_query_fetch_all
from test.util import create_sample_video, create_dummy_batches, file_remove
NUM_FRAMES = 10
class SelectExecutorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
CatalogManager().reset()
create_sample_video(NUM_FRAMES)
load_query = """LOAD DATA INFILE 'dummy.avi' INTO MyVideo;"""
execute_query_fetch_all(load_query)
@classmethod
def tearDownClass(cls):
file_remove('dummy.avi')
def test_sort_on_nonprojected_column(self):
""" This tests doing an order by on a column
that is not projected. The orderby_executor currently
catches the KeyError, passes, and returns the untouched
data
"""
select_query = "SELECT data FROM MyVideo ORDER BY id;"
actual_batch = execute_query_fetch_all(select_query)
select_query = "SELECT data FROM MyVideo"
expected_batch = execute_query_fetch_all(select_query)
self.assertEqual(actual_batch.batch_size, expected_batch.batch_size)
def test_should_load_and_sort_in_table(self):
select_query = "SELECT data, id FROM MyVideo ORDER BY id;"
actual_batch = execute_query_fetch_all(select_query)
expected_rows = [{'id': i,
'data': np.array(np.ones((2, 2, 3)) *
float(i + 1) * 25, dtype=np.uint8)
} for i in range(NUM_FRAMES)]
expected_batch = Batch(frames=pd.DataFrame(expected_rows))
self.assertEqual(actual_batch, expected_batch)
select_query = "SELECT data, id FROM MyVideo ORDER BY id DESC;"
actual_batch = execute_query_fetch_all(select_query)
expected_batch.reverse()
self.assertEqual(actual_batch, expected_batch)
def test_should_load_and_select_in_table(self):
select_query = "SELECT id FROM MyVideo;"
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
expected_rows = [{"id": i} for i in range(NUM_FRAMES)]
expected_batch = Batch(frames=pd.DataFrame(expected_rows))
self.assertEqual(actual_batch, expected_batch)
select_query = "SELECT id,data FROM MyVideo;"
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
expected_batch = list(create_dummy_batches())
self.assertEqual([actual_batch], expected_batch)
@unittest.skip('Too slow when batch size is small.')
def test_should_load_and_select_real_video_in_table(self):
query = """LOAD DATA INFILE 'data/ua_detrac/ua_detrac.mp4'
INTO MyVideo;"""
execute_query_fetch_all(query)
select_query = "SELECT id,data FROM MyVideo;"
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
video_reader = OpenCVReader('data/ua_detrac/ua_detrac/mp4')
expected_batch = Batch(frames=pd.DataFrame())
for batch in video_reader.read():
expected_batch += batch
self.assertTrue(actual_batch, expected_batch)
def test_select_and_where_video_in_table(self):
select_query = "SELECT id,data FROM MyVideo WHERE id = 5;"
actual_batch = execute_query_fetch_all(select_query)
expected_batch = list(create_dummy_batches(filters=[5]))[0]
self.assertEqual(actual_batch, expected_batch)
select_query = "SELECT data FROM MyVideo WHERE id = 5;"
actual_batch = execute_query_fetch_all(select_query)
expected_rows = [{"data": np.array(
np.ones((2, 2, 3)) * float(5 + 1) * 25, dtype=np.uint8)}]
expected_batch = Batch(frames=pd.DataFrame(expected_rows))
self.assertEqual(actual_batch, expected_batch)
select_query = "SELECT id, data FROM MyVideo WHERE id >= 2;"
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
expected_batch = list(
create_dummy_batches(
filters=range(
2, NUM_FRAMES)))[0]
self.assertEqual(actual_batch, expected_batch)
select_query = "SELECT id, data FROM MyVideo WHERE id >= 2 AND id < 5;"
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
expected_batch = list(create_dummy_batches(filters=range(2, 5)))[0]
self.assertEqual(actual_batch, expected_batch)
def test_nested_select_video_in_table(self):
nested_select_query = """SELECT id, data FROM
(SELECT id, data FROM MyVideo WHERE id >= 2 AND id < 5)
WHERE id >= 3;"""
actual_batch = execute_query_fetch_all(nested_select_query)
actual_batch.sort()
expected_batch = list(create_dummy_batches(filters=range(3, 5)))[0]
self.assertEqual(actual_batch, expected_batch)
def test_select_and_union_video_in_table(self):
select_query = """SELECT id, data FROM MyVideo WHERE id < 3
UNION ALL SELECT id, data FROM MyVideo WHERE id > 7;"""
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
expected_batch = list(create_dummy_batches(
filters=[i for i in range(NUM_FRAMES) if i < 3 or i > 7]))[0]
self.assertEqual(actual_batch, expected_batch)
select_query = """SELECT id, data FROM MyVideo WHERE id < 2
UNION ALL SELECT id, data FROM MyVideo WHERE id > 4 AND id < 6
UNION ALL SELECT id, data FROM MyVideo WHERE id > 7;"""
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
expected_batch = list(create_dummy_batches(
filters=[i for i in range(NUM_FRAMES)
if i < 2 or i == 5 or i > 7]))[0]
self.assertEqual(actual_batch, expected_batch)
def test_select_and_limit(self):
select_query = "SELECT id,data FROM MyVideo ORDER BY id LIMIT 5;"
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
expected_batch = list(create_dummy_batches(
num_frames=10, batch_size=5))
self.assertEqual(actual_batch.batch_size, expected_batch[0].batch_size)
self.assertEqual(actual_batch, expected_batch[0])
def test_select_and_sample(self):
select_query = "SELECT id,data FROM MyVideo SAMPLE 7 ORDER BY id;"
actual_batch = execute_query_fetch_all(select_query)
actual_batch.sort()
expected_batch = list(create_dummy_batches(
filters=range(0, NUM_FRAMES, 7)))
self.assertEqual(actual_batch.batch_size, expected_batch[0].batch_size)
# Since frames are fetched in random order, this test might be flaky
# Disabling it for time being
# self.assertEqual(actual_batch, expected_batch[0])
|
from regex import (
extract_course_times,
split_on_multiple_chars,
get_all_hashtags_and_links,
match_first_paragraph,
find_double_words,
match_ip_v4_address,
)
def test_extract_course_times():
expected = ["01:47", "32:03", "41:51", "27:48", "05:02"]
assert extract_course_times() == expected
def test_split_on_multiple_chars():
expected = [
"2017-11-03T01:00:02",
"challenge time",
"regex!",
"hope you join ... soon",
]
assert split_on_multiple_chars() == expected
def test_get_all_hashtags_and_links():
expected = ["http://pybit.es/requests-cache.html", "#python", "#APIs"]
assert get_all_hashtags_and_links() == expected
def test_match_first_paragraph():
expected = "pybites != greedy"
assert match_first_paragraph() == expected
def test_find_double_words():
expected = "the the"
assert find_double_words() == expected
def test_match_ip_address():
valid_ips = [
"1.1.1.1",
"255.255.255.255",
"192.168.1.1",
"10.10.1.1",
"132.254.111.10",
"26.10.2.10",
"127.0.0.1",
]
bad_ips = ["10.10.10", "10.10", "10", "a.a.a.a", "10.0.0.a"]
for valid_ip in valid_ips:
assert match_ip_v4_address(valid_ip)
for bad_ip in bad_ips:
assert match_ip_v4_address(bad_ip) is None
|
"""
Calibration Plot Widget
-----------------------
"""
from collections import namedtuple
import numpy as np
from AnyQt.QtWidgets import QListWidget
import pyqtgraph as pg
import Orange
from Orange.widgets import widget, gui, settings
from Orange.widgets.evaluate.utils import check_results_adequacy
from Orange.widgets.utils import colorpalette, colorbrewer
from Orange.widgets.widget import Input
from Orange.widgets import report
Curve = namedtuple("Curve", ["x", "y"])
PlotCurve = namedtuple("PlotCurve", ["curve", "curve_item", "rug_item"])
class OWCalibrationPlot(widget.OWWidget):
name = "Calibration Plot"
description = "Calibration plot based on evaluation of classifiers."
icon = "icons/CalibrationPlot.svg"
priority = 1030
class Inputs:
evaluation_results = Input("Evaluation Results", Orange.evaluation.Results)
class Warning(widget.OWWidget.Warning):
empty_input = widget.Msg("Empty result on input. Nothing to display.")
target_index = settings.Setting(0)
selected_classifiers = settings.Setting([])
display_rug = settings.Setting(True)
graph_name = "plot"
def __init__(self):
super().__init__()
self.results = None
self.classifier_names = []
self.colors = []
self._curve_data = {}
box = gui.vBox(self.controlArea, "Plot")
tbox = gui.vBox(box, "Target Class")
tbox.setFlat(True)
self.target_cb = gui.comboBox(
tbox, self, "target_index", callback=self._replot, contentsLength=8
)
cbox = gui.vBox(box, "Classifier")
cbox.setFlat(True)
self.classifiers_list_box = gui.listBox(
box,
self,
"selected_classifiers",
"classifier_names",
selectionMode=QListWidget.MultiSelection,
callback=self._replot,
)
gui.checkBox(
box, self, "display_rug", "Show rug", callback=self._on_display_rug_changed
)
self.plotview = pg.GraphicsView(background="w")
self.plot = pg.PlotItem(enableMenu=False)
self.plot.setMouseEnabled(False, False)
self.plot.hideButtons()
axis = self.plot.getAxis("bottom")
axis.setLabel("Predicted Probability")
axis = self.plot.getAxis("left")
axis.setLabel("Observed Average")
self.plot.setRange(xRange=(0.0, 1.0), yRange=(0.0, 1.0), padding=0.05)
self.plotview.setCentralItem(self.plot)
self.mainArea.layout().addWidget(self.plotview)
@Inputs.evaluation_results
def set_results(self, results):
self.clear()
results = check_results_adequacy(results, self.Error)
if results is not None and not results.actual.size:
self.Warning.empty_input()
else:
self.Warning.empty_input.clear()
self.results = results
if self.results is not None:
self._initialize(results)
self._replot()
def clear(self):
self.plot.clear()
self.results = None
self.classifier_names = []
self.selected_classifiers = []
self.target_cb.clear()
self.target_index = 0
self.colors = []
self._curve_data = {}
def _initialize(self, results):
N = len(results.predicted)
names = getattr(results, "learner_names", None)
if names is None:
names = ["#{}".format(i + 1) for i in range(N)]
self.classifier_names = names
scheme = colorbrewer.colorSchemes["qualitative"]["Dark2"]
if N > len(scheme):
scheme = colorpalette.DefaultRGBColors
self.colors = colorpalette.ColorPaletteGenerator(N, scheme)
for i in range(N):
item = self.classifiers_list_box.item(i)
item.setIcon(colorpalette.ColorPixmap(self.colors[i]))
self.selected_classifiers = list(range(N))
self.target_cb.addItems(results.data.domain.class_var.values)
def plot_curve(self, clf_idx, target):
if (clf_idx, target) in self._curve_data:
return self._curve_data[clf_idx, target]
ytrue = self.results.actual == target
probs = self.results.probabilities[clf_idx, :, target]
sortind = np.argsort(probs)
probs = probs[sortind]
ytrue = ytrue[sortind]
if probs.size:
xmin, xmax = probs.min(), probs.max()
x = np.linspace(xmin, xmax, 100)
f = gaussian_smoother(probs, ytrue, sigma=0.15 * (xmax - xmin))
observed = f(x)
else:
x = np.array([])
observed = np.array([])
curve = Curve(x, observed)
curve_item = pg.PlotDataItem(
x,
observed,
pen=pg.mkPen(self.colors[clf_idx], width=1),
shadowPen=pg.mkPen(self.colors[clf_idx].lighter(160), width=2),
symbol="+",
symbolSize=4,
antialias=True,
)
rh = 0.025
rug_x = np.c_[probs, probs]
rug_x_true = rug_x[ytrue].ravel()
rug_x_false = rug_x[~ytrue].ravel()
rug_y_true = np.ones_like(rug_x_true)
rug_y_true[1::2] = 1 - rh
rug_y_false = np.zeros_like(rug_x_false)
rug_y_false[1::2] = rh
rug1 = pg.PlotDataItem(
rug_x_false,
rug_y_false,
pen=self.colors[clf_idx],
connect="pairs",
antialias=True,
)
rug2 = pg.PlotDataItem(
rug_x_true,
rug_y_true,
pen=self.colors[clf_idx],
connect="pairs",
antialias=True,
)
self._curve_data[clf_idx, target] = PlotCurve(curve, curve_item, (rug1, rug2))
return self._curve_data[clf_idx, target]
def _setup_plot(self):
target = self.target_index
selected = self.selected_classifiers
curves = [self.plot_curve(i, target) for i in selected]
for curve in curves:
self.plot.addItem(curve.curve_item)
if self.display_rug:
self.plot.addItem(curve.rug_item[0])
self.plot.addItem(curve.rug_item[1])
self.plot.plot([0, 1], [0, 1], antialias=True)
def _replot(self):
self.plot.clear()
if self.results is not None:
self._setup_plot()
def _on_display_rug_changed(self):
self._replot()
def send_report(self):
if self.results is None:
return
caption = report.list_legend(
self.classifiers_list_box, self.selected_classifiers
)
self.report_items((("Target class", self.target_cb.currentText()),))
self.report_plot()
self.report_caption(caption)
def gaussian_smoother(x, y, sigma=1.0):
x = np.asarray(x)
y = np.asarray(y)
gamma = 1.0 / (2 * sigma ** 2)
a = 1.0 / (sigma * np.sqrt(2 * np.pi))
if x.shape != y.shape:
raise ValueError
def smoother(xs):
W = a * np.exp(-gamma * ((xs - x) ** 2))
return np.average(y, weights=W)
return np.vectorize(smoother, otypes=[np.float])
def main():
import sip
from AnyQt.QtWidgets import QApplication
from Orange.classification import (
LogisticRegressionLearner,
SVMLearner,
NuSVMLearner,
)
app = QApplication([])
w = OWCalibrationPlot()
w.show()
w.raise_()
data = Orange.data.Table("ionosphere")
results = Orange.evaluation.CrossValidation(
data,
[
LogisticRegressionLearner(penalty="l2"),
LogisticRegressionLearner(penalty="l1"),
SVMLearner(probability=True),
NuSVMLearner(probability=True),
],
store_data=True,
)
results.learner_names = ["LR l2", "LR l1", "SVM", "Nu SVM"]
w.set_results(results)
rval = app.exec_()
sip.delete(w)
del w
app.processEvents()
del app
return rval
if __name__ == "__main__":
main()
|
from ..utils import Object
class ChangeImportedContacts(Object):
"""
Changes imported contacts using the list of current user contacts saved on the device. Imports newly added contacts and, if at least the file database is enabled, deletes recently deleted contacts.Query result depends on the result of the previous query, so only one query is possible at the same time
Attributes:
ID (:obj:`str`): ``ChangeImportedContacts``
Args:
contacts (List of :class:`telegram.api.types.contact`):
The new list of contacts, contact's vCard are ignored and are not imported
Returns:
ImportedContacts
Raises:
:class:`telegram.Error`
"""
ID = "changeImportedContacts"
def __init__(self, contacts, extra=None, **kwargs):
self.extra = extra
self.contacts = contacts # list of contact
@staticmethod
def read(q: dict, *args) -> "ChangeImportedContacts":
contacts = [Object.read(i) for i in q.get('contacts', [])]
return ChangeImportedContacts(contacts)
|
from PIL import Image
import numpy as NP
from matplotlib import pyplot as plt
i=Image.open('i5.jpg')
iar=NP.asarray(i)
#print iar
b= len(iar)
p=0
for ii in iar:
for jj in ii:
p+=1
#print p
'''a=[]
for j in iar:
for k in j:
a.append(k[0])
#print a
s=int(NP.sum(a)/len(a))'''
#print s
ar=[[[0 for _ in range(3)]for _ in range (int(p/b))] for _ in range(b)]
for ii in range(b):
for jj in range (int(p/b)):
ar[ii][jj][2]=0
ar[ii][jj][1]=0
for ii in range(b):
for jj in range (int(p/b)):
ar[ii][jj][0]=iar[ii][jj][0]#-s
'''if(ar[ii][jj][0]<0):
ar[ii][jj][0]=int((ar[ii][jj][0]+s)*255/s)'''
#i.show()
plt.xticks([]),plt.yticks([])
plt.imshow(ar)
plt.show()
#print len(ar) |
import torch
import torch_sparse
from kplex_pool import kplex_cpu, cc_cpu
def connected_components(edge_index, num_nodes=None):
"""Find the connected components of a given graph.
Args:
edge_index (LongTensor): Edge coordinate matrix.
num_nodes (int, optional): Number of nodes. Defaults to None.
Returns:
LongTensor: Vector assigning each node to its component index.
"""
if num_nodes is None:
num_nodes = edge_index.max().item() + 1
device = edge_index.device
row, col = edge_index.cpu()
out = cc_cpu.connected_components(row, col, num_nodes)
return out.to(device)
|
import time
#from datetime import datetime as dt
from dateutil import tz
import datetime
dt = datetime.datetime
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SETUP-UP
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
""" CHOOSE YOUR OS (Windows OR Linux) """
my_OS = "Windows" # write either "Windows" or "Linux"
""" DEFINE _START_ & _END_ HOURS OF WORKING DAY """
_START_ = 9 # 8 in the morning
_END_ = 17 # 5 in the afternoon
# _tz_ = "Europe/London"
""" INSERT WEBSITES YOU WANT TO BLOCK during working hours """
blocked_websites=["www.facebook.com", "facebook.com",
"www.corriere.it", "www.repubblica.it"]
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PROGRAM
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# local host
redirect ="127.0.0.1"
if my_OS == "Windows": # 1. Windows: C:\\Windows\\System32\\drivers\\etc\\hosts
hosts_path = r"C:\Windows\System32\drivers\etc\hosts"
else: # 2. Linux: "/etc/hosts"
hosts_path = "/etc/hosts"
# tz = tz.gettz(_tz_)
while True:
_now = dt.now() # use tznow(_tz_) rather than dt.now() for tz aware datetime
beginning = dt(_now.year, _now.month, _now.day, _START_) # ).replace(tzinfo=tz) # tz replaced
ending = dt(_now.year, _now.month, _now.day, _END_) # , tzinfo=tz) # inside
# WORKING HOURS
if beginning < _now < ending:
print("Time to work and study...")
with open(hosts_path, 'r+') as file:
content = file.read() # read the whole file
for website in blocked_websites:
if website in content: # already blocked, do nothing
pass
else: # write the website to block at the end
file.write(redirect + " " + website + "\n")
# FUN HOURS
else:
with open(hosts_path, 'r+') as file:
content = file.readlines() # read lines
file.seek(0) # go to the top (avoid appending and overwrite)
for line in content: # write a line that does not contain a blocked website
if not any(website in line for website in blocked_websites):
file.write(line)
file.truncate() # delete all remaining lines afterwards
print("Fun hours...")
# run it every 6 seconds
time.sleep(6)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# auxiliary functions (MOVE UP) if they are to be used
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def utcnow():
return datetime.datetime.now(tz=tz.tzutc())
def tznow(time_zone="Europe/London"):
"""
# to get the list of time zones:
#from dateutil.zoneinfo import get_zonefile_instance
#print(list(get_zonefile_instance().zones)) # over 500 timezones
"""
my_tz = tz.gettz(time_zone) # or =tz.gettz() for local zone of computer
return datetime.datetime.now(tz=my_tz)
|
import atexit
import numpy as np
import os
import cv2
import dlib
import keras
from keras import backend as K
dlib_detectors = []
keras_model = None
is_initialized = False
@atexit.register
def onExit():
global dlib_detectors
global keras_model
if keras_model is not None:
del keras_model
K.clear_session()
for detector in dlib_detectors:
del detector
class TorchBatchNorm2D(keras.engine.topology.Layer):
def __init__(self, axis=-1, momentum=0.99, epsilon=1e-3, **kwargs):
super(TorchBatchNorm2D, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
def build(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError('Axis ' + str(self.axis) + ' of ' 'input tensor should have a defined dimension ' 'but the layer received an input with shape ' + str(input_shape) + '.')
shape = (dim,)
self.gamma = self.add_weight(shape=shape, name='gamma', initializer='ones', regularizer=None, constraint=None)
self.beta = self.add_weight(shape=shape, name='beta', initializer='zeros', regularizer=None, constraint=None)
self.moving_mean = self.add_weight(shape=shape, name='moving_mean', initializer='zeros', trainable=False)
self.moving_variance = self.add_weight(shape=shape, name='moving_variance', initializer='ones', trainable=False)
self.built = True
def call(self, inputs, training=None):
input_shape = K.int_shape(inputs)
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape)
broadcast_moving_variance = K.reshape(self.moving_variance, broadcast_shape)
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
broadcast_beta = K.reshape(self.beta, broadcast_shape)
invstd = K.ones (shape=broadcast_shape, dtype='float32') / K.sqrt(broadcast_moving_variance + K.constant(self.epsilon, dtype='float32'))
return (inputs - broadcast_moving_mean) * invstd * broadcast_gamma + broadcast_beta
def get_config(self):
config = { 'axis': self.axis, 'momentum': self.momentum, 'epsilon': self.epsilon }
base_config = super(TorchBatchNorm2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def transform(point, center, scale, resolution):
pt = np.array ( [point[0], point[1], 1.0] )
h = 200.0 * scale
m = np.eye(3)
m[0,0] = resolution / h
m[1,1] = resolution / h
m[0,2] = resolution * ( -center[0] / h + 0.5 )
m[1,2] = resolution * ( -center[1] / h + 0.5 )
m = np.linalg.inv(m)
return np.matmul (m, pt)[0:2]
def crop(image, center, scale, resolution=256.0):
ul = transform([1, 1], center, scale, resolution).astype( np.int )
br = transform([resolution, resolution], center, scale, resolution).astype( np.int )
if image.ndim > 2:
newDim = np.array([br[1] - ul[1], br[0] - ul[0], image.shape[2]], dtype=np.int32)
newImg = np.zeros(newDim, dtype=np.uint8)
else:
newDim = np.array([br[1] - ul[1], br[0] - ul[0]], dtype=np.int)
newImg = np.zeros(newDim, dtype=np.uint8)
ht = image.shape[0]
wd = image.shape[1]
newX = np.array([max(1, -ul[0] + 1), min(br[0], wd) - ul[0]], dtype=np.int32)
newY = np.array([max(1, -ul[1] + 1), min(br[1], ht) - ul[1]], dtype=np.int32)
oldX = np.array([max(1, ul[0] + 1), min(br[0], wd)], dtype=np.int32)
oldY = np.array([max(1, ul[1] + 1), min(br[1], ht)], dtype=np.int32)
newImg[newY[0] - 1:newY[1], newX[0] - 1:newX[1] ] = image[oldY[0] - 1:oldY[1], oldX[0] - 1:oldX[1], :]
newImg = cv2.resize(newImg, dsize=(int(resolution), int(resolution)), interpolation=cv2.INTER_LINEAR)
return newImg
def get_pts_from_predict(a, center, scale):
b = a.reshape ( (a.shape[0], a.shape[1]*a.shape[2]) )
c = b.argmax(1).reshape ( (a.shape[0], 1) ).repeat(2, axis=1).astype(np.float)
c[:,0] %= a.shape[2]
c[:,1] = np.apply_along_axis ( lambda x: np.floor(x / a.shape[2]), 0, c[:,1] )
for i in range(a.shape[0]):
pX, pY = int(c[i,0]), int(c[i,1])
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
diff = np.array ( [a[i,pY,pX+1]-a[i,pY,pX-1], a[i,pY+1,pX]-a[i,pY-1,pX]] )
c[i] += np.sign(diff)*0.25
c += 0.5
return [ transform (c[i], center, scale, a.shape[2]) for i in range(a.shape[0]) ]
def initialize(detector, scale_to=2048):
global dlib_detectors
global keras_model
global is_initialized
if not is_initialized:
dlib_cnn_face_detector_path = os.path.join(os.path.dirname(__file__), "mmod_human_face_detector.dat")
if not os.path.exists(dlib_cnn_face_detector_path):
raise Exception ("Error: Unable to find %s, reinstall the lib !" % (dlib_cnn_face_detector_path) )
if detector == 'cnn' or detector == "all":
dlib_cnn_face_detector = dlib.cnn_face_detection_model_v1(dlib_cnn_face_detector_path)
#DLIB and TF competiting for VRAM, so dlib must do first allocation to prevent OOM error
dlib_cnn_face_detector ( np.zeros ( (scale_to, scale_to, 3), dtype=np.uint8), 0 )
dlib_detectors.append(dlib_cnn_face_detector)
if detector == "hog" or detector == "all":
dlib_face_detector = dlib.get_frontal_face_detector()
dlib_face_detector ( np.zeros ( (scale_to, scale_to, 3), dtype=np.uint8), 0 )
dlib_detectors.append(dlib_face_detector)
keras_model_path = os.path.join( os.path.dirname(__file__) , "2DFAN-4.h5" )
if not os.path.exists(keras_model_path):
print ("Error: Unable to find %s, reinstall the lib !" % (keras_model_path) )
else:
print ("Info: initializing keras model...")
keras_model = keras.models.load_model (keras_model_path, custom_objects={'TorchBatchNorm2D': TorchBatchNorm2D} )
is_initialized = True
#scale_to=2048 with dlib upsamples=0 for 3GB VRAM Windows 10 users
#you should not extract landmarks again from predetected face, because many face data lost, so result will be much different against extract from original big image
def extract(input_image_bgr, detector, verbose, all_faces=True, input_is_predetected_face=False, scale_to=2048):
initialize(detector, scale_to)
global dlib_detectors
global keras_model
(h, w, ch) = input_image_bgr.shape
detected_faces = []
if input_is_predetected_face:
input_scale = 1.0
detected_faces = [ dlib.rectangle(0, 0, w, h) ]
input_image = input_image_bgr[:,:,::-1].copy()
else:
input_scale = scale_to / (w if w > h else h)
input_image_bgr = cv2.resize (input_image_bgr, ( int(w*input_scale), int(h*input_scale) ), interpolation=cv2.INTER_LINEAR)
input_image = input_image_bgr[:,:,::-1].copy() #cv2 and numpy inputs differs in rgb-bgr order, this affects chance of dlib face detection
input_images = [input_image, input_image_bgr]
for current_detector, current_image in ((current_detector, current_image) for current_detector in dlib_detectors for current_image in input_images):
detected_faces = current_detector(current_image, 0)
if len(detected_faces) != 0:
break
landmarks = []
if len(detected_faces) > 0:
for i, d_rect in enumerate(detected_faces):
if i > 0 and not all_faces:
break
if type(d_rect) == dlib.mmod_rectangle:
d_rect = d_rect.rect
left, top, right, bottom = d_rect.left(), d_rect.top(), d_rect.right(), d_rect.bottom()
del d_rect
center = np.array( [ (left + right) / 2.0, (top + bottom) / 2.0] )
center[1] -= (bottom - top) * 0.12
scale = (right - left + bottom - top) / 195.0
image = crop(input_image, center, scale).transpose ( (2,0,1) ).astype(np.float32) / 255.0
image = np.expand_dims(image, 0)
pts_img = get_pts_from_predict ( keras_model.predict (image)[-1][0], center, scale)
pts_img = [ ( int(pt[0]/input_scale), int(pt[1]/input_scale) ) for pt in pts_img ]
landmarks.append ( (( int(left/input_scale), int(top/input_scale), int(right/input_scale), int(bottom/input_scale) ),pts_img) )
elif verbose:
print("Warning: No faces were detected.")
return landmarks
|
# -*- coding: utf-8 -*-
"""
@author: Aghiles Salah
"""
import numpy as np
from .base_method import BaseMethod
from ..utils.common import safe_indexing
from ..experiment.result import CVResult
class CrossValidation(BaseMethod):
"""Cross Validation Evaluation Method.
Parameters
----------
data: ... , required
Input data in the triplet format (user_id, item_id, rating_val).
n_folds: int, optional, default: 5
The number of folds for cross validation.
rating_threshold: float, optional, default: 1.
The minimum value that is considered to be a good rating, \
e.g, if the ratings are in {1, ... ,5}, then rating_threshold = 4.
partition: array-like, shape (n_observed_ratings,), optional, default: None
The partition of ratings into n_folds (fold label of each rating) \
If None, random partitioning is performed to assign each rating into a fold.
rating_threshold: float, optional, default: 1.
The minimum value that is considered to be a good rating used for ranking, \
e.g, if the ratings are in {1, ..., 5}, then rating_threshold = 4.
exclude_unknowns: bool, optional, default: False
Ignore unknown users and items (cold-start) during evaluation and testing
verbose: bool, optional, default: False
Output running log
"""
def __init__(self, data, fmt='UIR', n_folds=5, rating_threshold=1., partition=None,
exclude_unknowns=True, verbose=False, **kwargs):
BaseMethod.__init__(self, data=data, fmt=fmt, rating_threshold=rating_threshold,
exclude_unknowns=exclude_unknowns, verbose=verbose, **kwargs)
self.n_folds = n_folds
self.current_fold = 0
self.current_split = None
self.n_ratings = len(self._data)
if partition is None:
self.partition_data()
else:
self.partition = self._validate_partition(partition)
# Partition ratings into n_folds
def partition_data(self):
fold_size = int(self.n_ratings / self.n_folds)
remain_size = self.n_ratings - fold_size * self.n_folds
self.partition = np.repeat(np.arange(self.n_folds), fold_size)
if remain_size > 0:
remain_partition = np.random.choice(self.n_folds, size=remain_size, replace=True, p=None)
self.partition = np.concatenate((self.partition, remain_partition))
np.random.shuffle(self.partition)
def _validate_partition(self, partition):
if len(partition) != self.n_ratings:
raise Exception('The partition length must be equal to the number of ratings')
elif len(set(partition)) != self.n_folds:
raise Exception('Number of folds in given partition different from %s' % (self.n_folds))
return partition
def _get_train_test(self):
if self.verbose:
print('Fold: {}'.format(self.current_fold + 1))
test_idx = np.where(self.partition == self.current_fold)[0]
train_idx = np.where(self.partition != self.current_fold)[0]
train_data = safe_indexing(self._data, train_idx)
test_data = safe_indexing(self._data, test_idx)
self.build(train_data=train_data, test_data=test_data)
if self.verbose:
print('Total users = {}'.format(self.total_users))
print('Total items = {}'.format(self.total_items))
def _next_fold(self):
if self.current_fold < self.n_folds - 1:
self.current_fold = self.current_fold + 1
else:
self.current_fold = 0
def evaluate(self, model, metrics, user_based):
result = CVResult(model.name)
for fold in range(self.n_folds):
self._get_train_test()
fold_result = BaseMethod.evaluate(self, model, metrics, user_based)
result.append(fold_result)
self._next_fold()
result.organize()
return result
|
import numpy as np
from sklearn.svm import SVC
from controller.c_stock_daily import CStockDaily
class StockDailySvm(object):
def __init__(self):
self.name = 'Svm'
@staticmethod
def train():
StockDailySvm.model = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
StockDailySvm.model.fit(CStockDaily.train_x, CStockDaily.train_y)
@staticmethod
def predict(x):
return StockDailySvm.model.predict(x) |
'''
Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.
Example 1:
Input: 1->2->3->3->4->4->5
Output: 1->2->5
Example 2:
Input: 1->1->1->2->3
Output: 2->3
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
result = ListNode(0)
ans = result
curr = head
while curr:
value = curr.val
count = 0
while curr and curr.val == value:
curr = curr.next
count += 1
if count == 1:
result.next = ListNode(value)
result = result.next
return ans.next |
from captcha.fields import ReCaptchaField
from crispy_forms import helper
from crispy_forms.layout import Submit, Hidden
from django import forms
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from ET.models import Customer, Owner
class FormHelper(helper.FormHelper):
include_media = False
class FormMixin(object):
def __init__(self, *args, **kwargs):
redirect_field_name = kwargs.pop('redirect_field_name', None)
redirect_field_value = kwargs.pop('redirect_field_value', None)
super(FormMixin, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-3'
self.helper.field_class = 'col-lg-8'
if redirect_field_name:
self.helper.add_input(Hidden(redirect_field_name, redirect_field_value))
class RegisterForm(FormMixin, forms.Form):
first_name = forms.CharField(
label=_('Given Name'),
max_length=20,
widget=forms.TextInput(),
strip=True,
required=True
)
last_name = forms.CharField(
label=_('Family Name'),
max_length=20,
widget=forms.TextInput(),
strip=True,
required=True
)
phone_number = forms.CharField(
label=_('Phone Number'),
max_length=12,
strip=True,
required=True
)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False)
)
password_confirm = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
captcha = ReCaptchaField(
label=_("Turin Test"),
)
group = None
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.helper.add_input(Submit('register', 'Register'))
def clean_phone_number(self):
g = self.get_group()
phone = self.cleaned_data['phone_number']
if g == 'customer':
qs = Customer.objects.filter(phone_number__iexact=phone)
elif g == 'owner':
qs = Owner.objects.filter(phone_number__iexact=phone)
else:
return self.cleaned_data['phone_number']
if not qs.exists():
return self.cleaned_data["phone_number"]
raise forms.ValidationError(_("This phone number is already taken. Please input another."))
def clean(self):
if "password" in self.cleaned_data and "password_confirm" in self.cleaned_data:
if self.cleaned_data["password"] != self.cleaned_data["password_confirm"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
def get_group(self):
if self.group is None:
raise ImproperlyConfigured(
'{0} is missing the group attribute. Define {0}.group.'.format(self.__class__.__name__)
)
if isinstance(self.group, six.string_types):
return self.group
raise ImproperlyConfigured(
'{0}.group_required attribute must be a string. Define {0}.group_required, or override '
'{0}.get_required_group().'.format(self.__class__.__name__)
)
class LoginForm(FormMixin, forms.Form):
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False)
)
remember = forms.BooleanField(
label=_("Remember Me"),
required=False,
)
captcha = ReCaptchaField(
label=_("Turin Test"),
)
user = None
group = None
identifier_field = None
authentication_fail_message = _('The information you provided are not correct.')
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.helper.add_input(Submit('login', 'Login'))
def clean(self):
if self._errors:
return
user = auth.authenticate(**self.user_credentials())
if user:
if user.is_active:
self.user = user
else:
raise forms.ValidationError(_('This account is inactive.'))
else:
raise forms.ValidationError(self.authentication_fail_message)
return self.cleaned_data
def get_group(self):
if self.group is None:
raise ImproperlyConfigured(
'{0} is missing the group attribute. Define {0}.group.'.format(self.__class__.__name__)
)
if isinstance(self.group, six.string_types):
return self.group
raise ImproperlyConfigured(
'{0}.group_required attribute must be a string. Define {0}.group_required, or override '
'{0}.get_required_group().'.format(self.__class__.__name__)
)
def user_credentials(self):
if self.identifier_field is None:
raise ImproperlyConfigured(
'{0} is missing the identifier_field attribute. Define {0}.identifier_field. or override '
'{0}.user_credentials().'.format(self.__class__.__name__)
)
return {
"username": self.cleaned_data[self.identifier_field],
"password": self.cleaned_data["password"],
"group": self.get_group()
}
class LoginPhoneNumberForm(LoginForm):
phone_number = forms.CharField(label=_('Phone Number'), strip=True, max_length=12)
authentication_fail_message = _("The phone number and/or password you specified are not correct.")
field_order = ['phone_number', 'password', 'captcha', 'remember']
identifier_field = 'phone_number'
|
"""
This is default module controller
It define default routes
"""
from flask import render_template
from flask import request
from flask import flash
from flask import redirect
from flask import url_for
from flask_login import login_user
from flask_login import logout_user
from flask_login import current_user
from app.modules import default_module
from app.default.forms import LoginForm
from app.services.extension import login_manager
from app.models.core.user import User
from app.services.extension import sqlalchemy as db
@login_manager.user_loader
def load_user(id):
return User.query.filter(User.id==id, User.active==1).first()
@default_module.route('/', methods=['GET', 'POST', ])
def index():
form = LoginForm(request.form)
if current_user.is_authenticated:
return redirect(url_for('dashboard.controllers.index'))
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data, password=form.password.data).first()
if user and user.active:
user.authenticated = True
db.session.add(user)
db.session.commit()
login_user(user)
return redirect(url_for('dashboard.controllers.index'))
else:
form.username.errors.append('Invalid Username/Password.')
flash(form.errors)
return redirect(url_for('default.controllers.index'))
else:
flash(form.errors)
return redirect(url_for('default.controllers.index'))
return render_template('default/index.html', form=form)
@default_module.route('logout')
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
return redirect(url_for('default.controllers.index')) |
# md5 : 2581a649cb309fd3ce31a3eec84539cb
# sha1 : fa0797344e6bb57e08eb6b59c2985462b340fa53
# sha256 : 2deaf7a4b008d62eeb1799576fe1faf311c0fc538622c63f4ceaee1d30b71a8b
ord_names = {
1: b'??0CStringManager@@QAE@ABV0@@Z',
2: b'??0CStringManager@@QAE@XZ',
3: b'??0DataStorageWrapper@@AAE@V?$shared_ptr@VDataStorageWrapperImpl@@@tr1@std@@@Z',
4: b'??0DataStorageWrapper@@QAE@ABV0@@Z',
5: b'??0Library@@QAE@PBD@Z',
6: b'??0LibraryException@@QAE@ABV0@@Z',
7: b'??0LibraryException@@QAE@J@Z',
8: b'??0RegistryStorage@@AAE@V?$shared_ptr@V?$vector@U?$pair@V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@V12@@std@@V?$allocator@U?$pair@V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@V12@@std@@@2@@std@@@tr1@std@@V?$shared_ptr@V?$vector@V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@V?$allocator@V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@@2@@std@@@23@@Z',
9: b'??0RegistryStorage@@QAE@ABV0@@Z',
10: b'??0StringLoader@@QAE@ABV0@@Z',
11: b'??0StringLoader@@QAE@XZ',
12: b'??0StringManager@@AAE@XZ',
13: b'??0WindowsStringLoader@@QAE@ABV0@@Z',
14: b'??0WindowsStringLoader@@QAE@XZ',
15: b'??1CStringManager@@UAE@XZ',
16: b'??1DataStorageWrapper@@UAE@XZ',
17: b'??1Library@@QAE@XZ',
18: b'??1LibraryException@@UAE@XZ',
19: b'??1RegistryStorage@@UAE@XZ',
20: b'??1StringLoader@@UAE@XZ',
21: b'??1WindowsStringLoader@@UAE@XZ',
22: b'??4CStringManager@@QAEAAV0@ABV0@@Z',
23: b'??4DataStorageWrapper@@QAEAAV0@ABV0@@Z',
24: b'??4Library@@QAEAAV0@ABV0@@Z',
25: b'??4LibraryException@@QAEAAV0@ABV0@@Z',
26: b'??4RegistryStorage@@QAEAAV0@ABV0@@Z',
27: b'??4StringLoader@@QAEAAV0@ABV0@@Z',
28: b'??4StringManager@@QAEAAV0@ABV0@@Z',
29: b'??4WindowsStringLoader@@QAEAAV0@ABV0@@Z',
30: b'??_7CStringManager@@6B@',
31: b'??_7DataStorageWrapper@@6B@',
32: b'??_7LibraryException@@6B@',
33: b'??_7RegistryStorage@@6B@',
34: b'??_7StringLoader@@6B@',
35: b'??_7WindowsStringLoader@@6B@',
36: b'?CleanDataStorage@DataStorageWrapper@@UAE_NXZ',
37: b'?CleanDataStorage@RegistryStorage@@UAE_NXZ',
38: b'?CreateIMSSShortcut@@YAJXZ',
39: b'?Crypt@@YA_N_NPBXPAKPAPAX@Z',
40: b'?Decrypt@@YA_NPBXPAKPAPAX@Z',
41: b'?DeleteDataVal@DataStorageWrapper@@UAE_NW4DATA_NAME@@@Z',
42: b'?DeleteDataVal@RegistryStorage@@UAE_NW4DATA_NAME@@@Z',
43: b'?DeleteRegEntry@RegistryStorage@@AAE_NAAU?$pair@V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@V12@@std@@@Z',
44: b'?Encrypt@@YA_NPBXPAKPAPAX@Z',
45: b'?GetDataValue@DataStorageWrapper@@UAE_NW4DATA_NAME@@AAK_N@Z',
46: b'?GetDataValue@DataStorageWrapper@@UAE_NW4DATA_NAME@@AAV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@_N@Z',
47: b'?GetDataValue@DataStorageWrapper@@UAE_NW4DATA_NAME@@AAV?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@_N@Z',
48: b'?GetDataValue@DataStorageWrapper@@UAE_NW4DATA_NAME@@AAV?$vector@DV?$allocator@D@std@@@std@@W4ValueTypes@@_N@Z',
49: b'?GetDataValue@RegistryStorage@@UAE_NW4DATA_NAME@@AAK_N@Z',
50: b'?GetDataValue@RegistryStorage@@UAE_NW4DATA_NAME@@AAV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@_N@Z',
51: b'?GetDataValue@RegistryStorage@@UAE_NW4DATA_NAME@@AAV?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@_N@Z',
52: b'?GetDataValue@RegistryStorage@@UAE_NW4DATA_NAME@@AAV?$vector@DV?$allocator@D@std@@@std@@W4ValueTypes@@_N@Z',
53: b'?GetFromRegistry@@YA_NPBD0PAD@Z',
54: b'?GetFromRegistry@@YA_NPBD0PAEPAK@Z',
55: b'?GetFromRegistry@@YA_NPBD0PAF@Z',
56: b'?GetFromRegistry@@YA_NPBD0PAK@Z',
57: b'?GetFromRegistry@@YA_NPBD0PAPA_W@Z',
58: b'?GetFromRegistry@@YA_NPBDPB_WPA_WPAK@Z',
59: b'?GetFromRegistry@@YA_NPBDV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@PAKK@Z',
60: b'?GetLastError@LibraryException@@QAEJXZ',
61: b'?GetModule@Library@@QAEPAUHINSTANCE__@@XZ',
62: b'?GetRegistryData@RegistryStorage@@AAE_NPAXPAK1V?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@2_N@Z',
63: b'?GetServiceDirectory@@YA_NV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@AAV12@@Z',
64: b'?MacAddressToString@@YA?AV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@QAEI@Z',
65: b'?OsIsWin8OrHigher@@YA_NXZ',
66: b'?SetDataValue@DataStorageWrapper@@UAE_NW4DATA_NAME@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@_N@Z',
67: b'?SetDataValue@DataStorageWrapper@@UAE_NW4DATA_NAME@@ABV?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@_N@Z',
68: b'?SetDataValue@DataStorageWrapper@@UAE_NW4DATA_NAME@@ABV?$vector@DV?$allocator@D@std@@@std@@W4ValueTypes@@_N@Z',
69: b'?SetDataValue@DataStorageWrapper@@UAE_NW4DATA_NAME@@K_N@Z',
70: b'?SetDataValue@RegistryStorage@@UAE_NW4DATA_NAME@@ABV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@_N@Z',
71: b'?SetDataValue@RegistryStorage@@UAE_NW4DATA_NAME@@ABV?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@_N@Z',
72: b'?SetDataValue@RegistryStorage@@UAE_NW4DATA_NAME@@ABV?$vector@DV?$allocator@D@std@@@std@@W4ValueTypes@@_N@Z',
73: b'?SetDataValue@RegistryStorage@@UAE_NW4DATA_NAME@@K_N@Z',
74: b'?SetRegistryData@RegistryStorage@@AAE_NPBXKKV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@1_N@Z',
75: b'?ValueExists@DataStorageWrapper@@UAE_NW4DATA_NAME@@@Z',
76: b'?ValueExists@RegistryStorage@@UAE_NW4DATA_NAME@@@Z',
77: b'?getDateTime@@YA?AV?$basic_string@DU?$char_traits@D@std@@V?$allocator@D@2@@std@@XZ',
78: b'?getString@CStringManager@@QBE?BV?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@I@Z',
79: b'?insertValueToVectorBuffer@RegistryStorage@@AAEXAAV?$vector@DV?$allocator@D@std@@@std@@PADK@Z',
80: b'?instance@StringManager@@SAPAVCStringManager@@XZ',
81: b'?loadString@WindowsStringLoader@@AAE?AV?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@I@Z',
82: b'?loadStrings@CStringManager@@QAEXAAVStringLoader@@AAV?$vector@IV?$allocator@I@std@@@std@@@Z',
83: b'?loadStrings@WindowsStringLoader@@UAEXAAV?$vector@IV?$allocator@I@std@@@std@@AAV?$map@IV?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@U?$less@I@2@V?$allocator@U?$pair@$$CBIV?$basic_string@_WU?$char_traits@_W@std@@V?$allocator@_W@2@@std@@@std@@@2@@3@@Z',
84: b'?m_instance@StringManager@@0V?$shared_ptr@VCStringManager@@@tr1@std@@A',
} |
import os
import pkg_resources
import functools
import comtypes.client
import contextlib
def raises_error(callable, exception=Exception):
try:
callable()
except exception:
return True
return False
def typelibs_generated():
import_SG = functools.partial(
__import__, 'comtypes.gen.DexterLib', fromlist=['SampleGrabber']
)
return not raises_error(import_SG)
@contextlib.contextmanager
def directory_context(dir):
orig = os.getcwd()
os.chdir(dir)
yield
os.chdir(orig)
def generate_typelibs():
fn = functools.partial(
pkg_resources.resource_filename,
__name__,
)
with directory_context(fn('DirectShow')):
list(
map(
comtypes.client.GetModule,
[
'DirectShow.tlb',
'DexterLib.tlb',
],
)
)
if not typelibs_generated():
generate_typelibs()
|
from __future__ import absolute_import
#
# Copyright (c) 2020 Juniper Networks, Inc. All rights reserved.
#
import gevent
import logging
import requests
from testtools.matchers import Equals
import unittest
from flexmock import flexmock
import sseclient
from . import test_case
from vnc_api.vnc_api import *
from cfgm_common.tests import test_utils
from cfgm_common import vnc_cgitb
from vnc_cfg_api_server.event_dispatcher import EventDispatcher
import keystoneclient.v2_0.client as keystone
from keystonemiddleware import auth_token
from .test_perms2 import (
User,
set_perms,
vnc_read_obj,
vnc_aal_create,
vnc_aal_add_rule,
ks_admin_authenticate,
vnc_aal_del_rule)
vnc_cgitb.enable(format='text')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class CustomError(Exception):
pass
class TestWatch(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestWatch, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestWatch, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def setUp(self):
super(TestWatch, self).setUp()
self.listen_ip = self._api_server_ip
self.listen_port = self._api_server._args.listen_port
self.url = 'http://%s:%s/watch' % (self.listen_ip, self.listen_port)
self.mock = None
self.stream_response = None
# end setUp
def tearDown(self):
if self.mock:
self.mock()
super(TestWatch, self).tearDown()
def test_subscribe_exception(self):
param = {"resource_type": "virtual_network"}
self.error = "value error occured"
m = flexmock(EventDispatcher)\
.should_receive('subscribe_client')\
.and_raise(CustomError, self.error)
def reset():
m.reset()
self.mock = reset
response = requests.get(self.url, params=param, stream=True)
self.response_error = "Client queue registration failed with exception %s" % (
self.error)
self.assertThat(response.status_code, Equals(500))
self.assertThat(
response.content.decode('utf-8'),
Equals(
self.response_error))
# end test_subscribe_exception
def test_valid_params(self):
param = {"resource_type": "virtual_network,virtual_machine_interface"}
self.error = "value error occured"
init_sample = {
"event": "init", "data": [{"type": "virtual_network"}], }
m = flexmock(EventDispatcher)
s = m.should_receive('subscribe_client')\
.and_return().once()
i = m.should_receive('initialize')\
.and_return(True, init_sample).twice()
def reset():
i.reset()
s.reset()
self.mock = reset
self.count = 0
self.data = "[{'type': 'virtual_network'}]"
def watch_client():
self.stream_response = requests.get(
self.url, params=param, stream=True)
client = sseclient.SSEClient(self.stream_response)
for event in client.events():
if (event.event == 'init'):
self.count += 1
self.assertThat(event.data, Equals(self.data))
gevent.spawn(watch_client)
gevent.sleep(0.1)
self.assertThat(self.stream_response.status_code, Equals(200))
self.assertEqual(self.count, 2)
# end test_valid_params
def test_invalid_request(self):
response = requests.get(self.url, stream=True)
self.assertEqual(response.status_code, 400)
self.assertThat(response.content.decode('utf-8'), Equals(
'resource_type required in request'))
# end test_invalid_request
def test_invalid_resource(self):
param = {
"resource_type":
"virtual_network,virtual_machine_i"}
response = requests.get(self.url, stream=True, params=param)
self.assertEqual(response.status_code, 404)
# end test_invalid_resource
# end TestWatch
class TestWatchIntegration(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestWatchIntegration, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestWatchIntegration, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def setUp(self):
super(TestWatchIntegration, self).setUp()
self.listen_ip = self._api_server_ip
self.listen_port = self._api_server._args.listen_port
self.url = 'http://%s:%s/watch' % (self.listen_ip, self.listen_port)
self.stream_response = None
# end setUp
@unittest.skip("Flaky test")
def test_watch(self):
param = {
"resource_type":
"virtual_network,virtual_machine_interface,routing_instance"}
expected_event_list = [
"init",
"init",
"init",
"create",
"create",
"create",
"create",
"update"]
self.event_index = 0
def watch_client():
self.stream_response = requests.get(
self.url, params=param, stream=True)
client = sseclient.SSEClient(self.stream_response)
for event in client.events():
logger.info('%s: %s' % (event.event, event.data))
if self.event_index < len(expected_event_list):
self.assertThat(event.event, Equals(
expected_event_list[self.event_index]))
self.event_index += 1
return
try:
greenlet = gevent.spawn(watch_client)
num_vn = 1
vn_objs, ri_objs, vmi_objs, x = self._create_vn_ri_vmi(num_vn)
gevent.sleep(0)
greenlet.get(timeout=5)
except gevent.timeout.Timeout as e:
logger.info("Request failed")
self.assertFalse(False, greenlet.successful())
# end test_watch
# end TestWatchIntegration
class TestWatchPermission(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
extra_mocks = [(keystone.Client,
'__new__', test_utils.FakeKeystoneClient),
(vnc_api.vnc_api.VncApi,
'_authenticate', ks_admin_authenticate),
(auth_token, 'AuthProtocol',
test_utils.FakeAuthProtocol)]
extra_config_knobs = [
('DEFAULTS', 'aaa_mode', 'rbac'),
('DEFAULTS', 'cloud_admin_role', 'cloud-admin'),
('DEFAULTS', 'global_read_only_role', 'read-only-role'),
('DEFAULTS', 'auth', 'keystone'),
]
super(
TestWatchPermission, cls).setUpClass(
extra_mocks=extra_mocks,
extra_config_knobs=extra_config_knobs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestWatchPermission, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def setUp(self):
super(TestWatchPermission, self).setUp()
self.ip = self._api_server_ip
self.port = self._api_server._args.listen_port
self.url = 'http://%s:%s/watch' % (self.ip, self.port)
self.kc = keystone.Client(username='admin', password='contrail123',
tenant_name='admin',
auth_url=self.url)
# end setUp
def test_rbac_cloud_admin_role(self):
self.admin = User(self.ip, self.port, self.kc, 'admin', 'contrail123',
'cloud-admin', 'admin-%s' % self.id())
param = {"resource_type": "virtual_network"}
headers = {'X-Auth-Token': self.admin.vnc_lib.get_auth_token()}
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 200)
# end test_rbac_cloud_admin_role
def test_rbac_read_only_role(self):
self.adminr = User(self.ip, self.port, self.kc, 'adminr',
'contrail123', 'read-only-role', 'adminr-%s' %
self.id())
param = {"resource_type": "virtual_network"}
headers = {'X-Auth-Token': self.adminr.vnc_lib.get_auth_token()}
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 200)
# end test_rbac_read_only_role
def test_rbac_admin_role(self):
self.admin1 = User(self.ip, self.port, self.kc, 'admin1',
'contrail123', 'admin', 'admin1-%s' % self.id())
param = {"resource_type": "virtual_network"}
headers = {'X-Auth-Token': self.admin1.vnc_lib.get_auth_token()}
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 403)
# end test_rbac_admin_role
def test_rbac_member_role(self):
self.admin2 = User(self.ip, self.port, self.kc, 'admin2',
'contrail123', 'member', 'admin2-%s' % self.id())
param = {"resource_type": "virtual_network"}
headers = {'X-Auth-Token': self.admin2.vnc_lib.get_auth_token()}
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 403)
# end test_rbac_member_role
def test_rbac_user_role(self):
self.alice = User(self.ip, self.port, self.kc, 'alice', 'alice123',
'alice-role', 'alice-proj-%s' % self.id())
param = {"resource_type": "virtual_network"}
headers = {'X-Auth-Token': self.alice.vnc_lib.get_auth_token()}
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 403)
# end test_rbac_user_role
def test_rbac_user_role_with_resource_read_access(self):
self.admin = User(self.ip, self.port, self.kc, 'admin', 'contrail123',
'cloud-admin', 'admin-%s' % self.id())
self.alice = User(self.ip, self.port, self.kc, 'alice', 'alice123',
'alice-role', 'alice-proj-%s' % self.id())
user = self.alice
project_obj = Project(user.project)
project_obj.uuid = user.project_uuid
self.admin.vnc_lib.project_create(project_obj)
# read projects back
user.project_obj = vnc_read_obj(self.admin.vnc_lib,
'project', obj_uuid=user.project_uuid)
user.domain_id = user.project_obj.parent_uuid
user.vnc_lib.set_domain_id(user.project_obj.parent_uuid)
logger.info(
'Change owner of project %s to %s' %
(user.project, user.project_uuid))
set_perms(user.project_obj, owner=user.project_uuid, share=[])
self.admin.vnc_lib.project_update(user.project_obj)
user.proj_rg = vnc_aal_create(
self.admin.vnc_lib, self.alice.project_obj)
vnc_aal_add_rule(self.admin.vnc_lib, user.proj_rg,
rule_str='virtual-network %s:CR' % user.role)
logger.info('')
logger.info('alice: trying to create VN in her project')
self.vn_name = "alice-vn-%s" % self.id()
vn = VirtualNetwork(self.vn_name, self.alice.project_obj)
try:
self.alice.vnc_lib.virtual_network_create(vn)
logger.info('Created virtual network: %s' % vn.get_fq_name())
testfail = False
except PermissionDenied as e:
logger.info('Failed to create VN')
testfail = True
self.assertThat(testfail, Equals(False))
param = {"resource_type": "virtual_network"}
headers = {'X-Auth-Token': self.alice.vnc_lib.get_auth_token()}
logger.info("alice has been granted read permission for the resource")
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 200)
vnc_aal_del_rule(self.admin.vnc_lib, self.alice.proj_rg,
rule_str='virtual-network %s:R' % self.alice.role)
logger.info("alice's read permission for the resource revoked")
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 403)
# end test_rbac_user_role_with_resource_read_access
def test_rbac_user_role_with_multiple_resources(self):
self.admin = User(self.ip, self.port, self.kc, 'admin', 'contrail123',
'cloud-admin', 'admin-%s' % self.id())
self.alice = User(self.ip, self.port, self.kc, 'alice', 'alice123',
'alice-role', 'alice-proj-%s' % self.id())
user = self.alice
project_obj = Project(user.project)
project_obj.uuid = user.project_uuid
self.admin.vnc_lib.project_create(project_obj)
# read projects back
user.project_obj = vnc_read_obj(self.admin.vnc_lib,
'project', obj_uuid=user.project_uuid)
user.domain_id = user.project_obj.parent_uuid
user.vnc_lib.set_domain_id(user.project_obj.parent_uuid)
logger.info(
'Change owner of project %s to %s' %
(user.project, user.project_uuid))
set_perms(user.project_obj, owner=user.project_uuid, share=[])
self.admin.vnc_lib.project_update(user.project_obj)
user.proj_rg = vnc_aal_create(
self.admin.vnc_lib, self.alice.project_obj)
vnc_aal_add_rule(self.admin.vnc_lib, user.proj_rg,
rule_str='virtual-network %s:CR' % user.role)
self.vn_name = "alice-vn-%s" % self.id()
vn = VirtualNetwork(self.vn_name, self.alice.project_obj)
try:
self.alice.vnc_lib.virtual_network_create(vn)
logger.info('Created virtual network %s' % vn.get_fq_name())
testfail = False
except PermissionDenied as e:
logger.info('Failed to create VN')
testfail = True
self.assertThat(testfail, Equals(False))
param = {"resource_type": "virtual_network,virtual_machine"}
headers = {'X-Auth-Token': self.alice.vnc_lib.get_auth_token()}
logger.info("alice has read permission for only one resource")
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 403)
vnc_aal_add_rule(self.admin.vnc_lib, user.proj_rg,
rule_str='virtual-machine %s:R' % user.role)
logger.info("alice has read permission for both resources")
response = requests.get(
self.url,
params=param,
stream=True,
headers=headers)
self.assertEqual(response.status_code, 200)
# test_rbac_user_role_with_multiple_resources
# end TestWatchPermission
if __name__ == '__main__':
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
unittest.main()
|
from .base import IODescriptor
from .file import File
from .json import JSON
from .text import Text
from .image import Image
from .numpy import NumpyNdarray
from .pandas import PandasSeries
from .pandas import PandasDataFrame
from .multipart import Multipart
__all__ = [
"File",
"Image",
"IODescriptor",
"JSON",
"Multipart",
"NumpyNdarray",
"PandasDataFrame",
"PandasSeries",
"Text",
]
|
import os
import copy
import shutil
import datetime
import numpy as np
from functools import wraps
from time import time
from pyaspect.specfemio.headers import ForceSolutionHeader
from pyaspect.specfemio.headers import SolutionHeader
from pyaspect.specfemio.headers import StationHeader
from pyaspect.specfemio.headers import RecordHeader
################################################################################
#
# Misc. Helper functions
#
################################################################################
def timer(func):
@wraps(func)
def wrap(*args, **kwargs):
t_start = time()
result = func(*args, **kwargs)
t_end = time()
print(f'Function \'{func.__name__}({args},{kwargs})\' executed in {(t_end-t_start):4.3f}s\n')
return result
return wrap
################################################################################
#
# Helper reading and writing functions:
# SPECFEM3D STATIONS and SOLUTIONS type files
#
################################################################################
def _join_path_fname(fqp,fname):
fqpname = os.path.join(fqp, fname)
return fqpname
def _join_relpath_fname(fqp,start_fqp,fname):
path = os.path.relpath(fqp, start=start_fqp)
fqpname = os.path.join(path, fname)
return fqpname
def _get_file_path(fqp,fname):
return _join_path_fname(fqp, fname)
def _get_header_path(fqp,fname):
return _join_path_fname(fqp, f'pyheader.{fname.lower()}')
def _get_file_header_paths(fqp,fname):
fqpname = _get_file_path(fqp, fname)
header_fqpname = _get_header_path(fqp, fname)
return fqpname, header_fqpname
def _mk_symlink(src,dst):
if not os.path.islink(dst):
os.symlink(src, dst)
def _mk_relative_symlink(src_fqp,start_fqp,dst_fqp):
if not os.path.islink(dst_fqp):
rel_src_path = os.path.relpath(src_fqp, start_fqp)
os.symlink(rel_src_path, dst_fqp)
def _copy_recursive_dir(src_fqp,dst_fqp):
try:
shutil.copytree(src_fqp,dst_fqp)
except Exception as e:
print(e)
################################################################################
#
# Functions for writing SPECFEM3D SOLUTION type files
#
################################################################################
def forcesolution_2_str(fs):
fslines_str = f'{fs.name}\n'
fslines_str += 'time shift: %s\n' %(str(fs.tshift))
fslines_str += 'f0: %s\n' %(str(fs.f0))
fslines_str += 'latorUTM: %s\n' %(str(fs.lat_yc))
fslines_str += 'longorUTM: %s\n' %(str(fs.lon_xc))
fslines_str += 'depth: %s\n' %(str(fs.depth_km)) #VERIFY
fslines_str += 'factor force source: %s\n' %(str(fs.factor_fs))
fslines_str += 'component dir vect source E: %s\n' %(str(fs.comp_src_EX))
fslines_str += 'component dir vect source N: %s\n' %(str(fs.comp_src_NY))
fslines_str += 'component dir vect source Z_UP: %s\n' %(str(fs.comp_src_Zup))
return fslines_str
def cmtsolution_2_str(cmts):
cmtlines_str = f'{cmts.name}\n'
cmtlines_str += 'event name: %s\n' %(str(cmts.ename))
cmtlines_str += 'time shift: %s\n' %(str(cmts.tshift))
cmtlines_str += 'half duration: %s\n' %(str(cmts.hdur))
cmtlines_str += 'latorUTM: %s\n' %(str(cmts.lat_yc))
cmtlines_str += 'longorUTM: %s\n' %(str(cmts.lon_xc))
cmtlines_str += 'depth: %s\n' %(str(cmts.depth_km))
cmtlines_str += 'Mrr: %s\n' %(str(1e7*cmts.mrr)) #Nm to dyne-cm
cmtlines_str += 'Mtt: %s\n' %(str(1e7*cmts.mtt)) #Nm to dyne-cm
cmtlines_str += 'Mpp: %s\n' %(str(1e7*cmts.mpp)) #Nm to dyne-cm
cmtlines_str += 'Mrt: %s\n' %(str(1e7*cmts.mrt)) #Nm to dyne-cm
cmtlines_str += 'Mrp: %s\n' %(str(1e7*cmts.mrp)) #Nm to dyne-cm
cmtlines_str += 'Mtp: %s' %(str(1e7*cmts.mtp)) #Nm to dyne-cm
return cmtlines_str
################################################################################
#
# Functions for writing SPECFEM3D STATION type files
#
################################################################################
def station_auto_name(s):
return f't{str(s.trid).zfill(6)}g{str(s.gid).zfill(2)}'
def network_auto_name(s):
return f's{str(s.sid).zfill(2)}'
def station_auto_data_fname_id(s):
net_code = network_auto_name(s)
stat_code = station_auto_name(s)
return f'{net_code}.{stat_code}'
def station_to_str(s,auto_name=False,auto_network=False):
sname = s.name
if auto_name:
sname = station_auto_name(s)
#sname = f't{str(s.trid).zfill(6)}g{str(s.gid).zfill(2)}'
if 10 < len(sname):
# the SPECFEM default is 32 characters, but in my testing I could
# only use 10 characters -> It might be a FORTRAN complier optimization
# issue. The "read(IIN,'(a)',iostat) line" in read_stations is where
# things go heywire
raise Exception('trace name is too long -> less than 32 chars requied')
snet = s.network
if auto_network:
snet = network_auto_name(s)
#snet = f's{str(s.sid).zfill(2)}'
slat = s.lat_yc # or Y coordinate
slon = s.lon_xc # or X coordinate
selev = s.elevation
sbur = s.depth
return '%s %s %.2f %.2f %.2f %.2f\n' %(sname,snet,slat,slon,selev,sbur)
def station_list_to_str(l_stations,auto_name=False,auto_network=False):
str_stations = ''
for i in range(len(l_stations)):
str_stations += station_to_str(l_stations[i],
auto_name=auto_name,
auto_network=auto_network)
return str_stations
################################################################################
#
# Functions for makeing different lists and groups of stations
#
################################################################################
def make_station_half_cross_members(station=None,delta=None):
if delta == 0:
raise ValueError('delta cannot equal zero')
station_members = []
l_gid = np.array([1,2,3])
if delta < 0:
l_gid += 3
# add x + delta
station_xp = copy.deepcopy(station)
station_xp.lon_xc += delta
station_xp.gid = l_gid[0]
station_members.append(station_xp)
# add y + delta
station_yp = copy.deepcopy(station)
station_yp.lat_yc += delta
station_yp.gid = l_gid[1]
station_members.append(station_yp)
# add z + delta
station_zp = copy.deepcopy(station)
station_zp.depth += delta
station_zp.gid = l_gid[2]
station_members.append(station_zp)
return station_members
def make_station_cross_members(station=None,delta=None):
station_members = []
# positive incremented coordinate group
station_members += make_station_half_cross_members(station,delta)
# negative incremented coordinate group
station_members += make_station_half_cross_members(station,-delta)
return station_members
def make_station_cross_group(station=None,delta=None):
station_group = []
# add "central" member (no coordinate change)
cpy_station = copy.deepcopy(station)
cpy_station.gid = station.gid = 0
cpy_station['delta'] = delta
station_group.append(cpy_station)
# add pos/neg coordinate members
station_group += make_station_cross_members(cpy_station,delta)
return station_group
def make_station_half_cross_group(station=None,delta=None):
station_group = []
# add "central" member (no coordinate change)
cpy_station = copy.deepcopy(station)
cpy_station.gid = station.gid = 0
cpy_station['delta'] = delta
station_group.append(cpy_station)
# add pos/neg coordinate members
station_group += make_station_half_cross_members(cpy_station,delta)
return station_group
def make_grouped_station_headers(stations,delta,full_cross=True):
group_station_list = []
if full_cross:
for i in range(len(stations)):
group_station = make_station_cross_group(station=stations[i],delta=delta)
#group_station_list += group_station
group_station_list.append(group_station)
else:
for i in range(len(stations)):
group_station = make_station_half_cross_group(station=stations[i],delta=delta)
#group_station_list += group_station
group_station_list.append(group_station)
#return list(set(group_station_list))
return group_station_list
def make_grouped_cross_station_headers(stations,delta):
return make_grouped_station_headers(stations,delta,True)
def make_grouped_half_cross_station_headers(stations,delta):
return make_grouped_station_headers(stations,delta,False)
def flatten_grouped_headers(l_group):
if not isinstance(l_group[0], list):
raise Exception('group list must be at least 2 dimensional')
return list(sum(l_group, []))
def flatten_grouped_headers_unique(l_group):
return list(set(flatten_grouped_headers(l_group)))
def is_grouped_headers(headers):
is_grouped = False
if isinstance(headers[0], list):
is_grouped = True
return is_grouped
################################################################################
#
# Functions for getting coordinates
#
################################################################################
def prune_header_list(l_headers,key,val):
if key not in l_headers[0].keys():
raise KeyError(f'field: {key}, does not exist')
pruned = []
for h in l_headers:
if h[key] != val:
pruned.append(h)
return pruned
def get_xyz_coords_from_header(h):
return np.array([h.lon_xc,h.lat_yc,h.depth])
def get_xyz_coords_from_station(s):
return get_xyz_coords_from_header(s)
def get_xyz_coords_from_solution(s):
return get_xyz_coords_from_header(s)
def get_xyz_coords_from_header_list(l_headers,unique=False):
xyz = np.zeros((len(l_headers),3))
for i in range(len(l_headers)):
h = l_headers[i]
xyz[i,:] = get_xyz_coords_from_header(h)[:]
if unique:
xyz = np.unique(tuple(xyz),axis=0)
return xyz
def get_unique_xyz_coords_from_header_list(l_headers):
return get_xyz_coords_from_header_list(l_headers,unique=True)
def get_xyz_coords_from_station_list(l_stations):
return get_xyz_coords_from_header_list(l_stations)
def get_xyz_coords_from_solution_list(l_stations):
return get_xyz_coords_from_header_list(l_stations)
def get_unique_xyz_coords_from_station_list(l_stations):
return get_unique_xyz_coords_from_header_list(l_stations)
def get_unique_xyz_coords_from_solution_list(l_stations):
return get_unique_xyz_coords_from_header_list(l_stations)
def get_xyz_coords_from_headers_except(l_headers,key=None,val=None,unique=False):
p_headers = prune_header_list(l_headers,key,val)
return get_xyz_coords_from_header_list(p_headers,unique=unique)
def get_xyz_coords_from_station_list_except(l_stations,key=None,val=None):
return get_xyz_coords_from_headers_except(l_stations,key=key,val=val)
def get_unique_xyz_coords_from_headers_except(l_headers,key=None,val=None):
return get_xyz_coords_from_headers_except(l_stations,key=key,val=val,unique=True)
def get_unique_xyz_coords_from_station_list_except(l_stations,key=None,val=None):
return get_xyz_coords_from_headers_except(l_stations,key=key,val=val,unique=True)
################################################################################
#
# Functions for getting SOLUTION groups and lists
#
################################################################################
def make_triplet_force_solution_members(src=None):
#from pyaspect.specfemio.headers import ForceSolutionHeader
solution_members = []
# create Force 001 (North/Y axis)
force_solution_1 = src.copy()
force_solution_1.sid = 1
force_solution_1.comp_src_EX = 0
force_solution_1.comp_src_NY = src.comp_src_EX
force_solution_1.comp_src_Zup = 0
hstr_1 = ForceSolutionHeader.create_header_name(eid=force_solution_1.eid,
sid=force_solution_1.sid,
date=force_solution_1.date,
lat_yc=force_solution_1.lat_yc,
lon_xc=force_solution_1.lon_xc,
depth=force_solution_1.depth)
force_solution_1.name = hstr_1
solution_members.append(force_solution_1)
# create Force 002 (Z axis)
force_solution_2 = src.copy()
force_solution_2.sid = 2
force_solution_2.comp_src_EX = 0
force_solution_2.comp_src_NY = 0
force_solution_2.comp_src_Zup = src.comp_src_EX
hstr_2 = ForceSolutionHeader.create_header_name(eid=force_solution_2.eid,
sid=force_solution_2.sid,
date=force_solution_2.date,
lat_yc=force_solution_2.lat_yc,
lon_xc=force_solution_2.lon_xc,
depth=force_solution_2.depth)
force_solution_2.name = hstr_2
solution_members.append(force_solution_2)
return solution_members
def make_triplet_force_solution_group(src=None):
if src.sid != 0:
raise ValueError('arg: \'src\' had non-zero solution_id (src.sid)')
if src.comp_src_EX == 0:
raise ValueError('arg: \'src\' has comp_src_EX=0')
return [src] + make_triplet_force_solution_members(src)
def make_grouped_triplet_force_solution_headers(solutions=None):
triplet_solution_list = []
for i in range(len(solutions)):
triplet_solution = make_triplet_force_solution_group(solutions[i])
triplet_solution_list.append(triplet_solution)
return triplet_solution_list
################################################################################
#
# Functions for replicating Stations by Solution list
#
################################################################################
def make_replicated_station_headers_from_src_list(l_srcs,l_recs):
#from pyaspect.specfemio.headers import SolutionHeader
#from pyaspect.specfemio.headers import StationHeader
if not isinstance(l_srcs[0],SolutionHeader):
raise TypeError('l_srcs[:] elements must be of type:{type(SolutionHeader}')
if not isinstance(l_recs[0],StationHeader):
raise TypeError('l_recs[:] elements must be of type:{type(StationHeader}')
l_grp_recs_by_srcs = []
for i in range(len(l_srcs)):
src = l_srcs[i]
cpy_recs = copy.deepcopy(l_recs)
for rec in cpy_recs:
rec.sid = src.sid
rec.eid = src.eid
l_grp_recs_by_srcs.append(cpy_recs)
return l_grp_recs_by_srcs
################################################################################
#
# Functions for creating Reciprocal Stations and Solution lists
#
################################################################################
def make_grouped_reciprocal_station_headers_from_cmt_list(l_cmt,delta,full_cross=True):
# Make the main stations
l_vrecs = []
for cmt in l_cmt:
tr_bname = 'tr'
new_r = StationHeader(name=tr_bname,
network='NL', #FIXME
lat_yc=cmt.lat_yc,
lon_xc=cmt.lon_xc,
elevation=0.0,
depth=cmt.depth,
trid=cmt.eid)
l_vrecs.append(new_r)
# Make the group cross stations for the derivatives
return make_grouped_station_headers(stations=l_vrecs,delta=delta,full_cross=full_cross)
def make_grouped_cross_reciprocal_station_headers_from_cmt_list(l_cmt,delta):
return make_grouped_reciprocal_station_headers_from_cmt_list(l_cmt,delta,full_cross=True)
def make_grouped_half_cross_reciprocal_station_headers_from_cmt_list(l_cmt,delta):
return make_grouped_reciprocal_station_headers_from_cmt_list(l_cmt,delta,full_cross=False)
def make_grouped_reciprocal_force_solution_triplet_headers_from_rec_list(l_rec):
#import datetime
#from pyaspect.specfemio.headers import ForceSolutionHeader
# First we make a single virtual source per receiver
l_vsrcs = []
for rec in l_rec:
new_s = ForceSolutionHeader(ename=f'Event-{str(rec.trid).zfill(4)}',
lat_yc=rec.lat_yc,
lon_xc=rec.lon_xc,
depth=rec.depth,
tshift=0.0,
date=datetime.datetime.now(),
f0=0.0,
factor_fs=1,
comp_src_EX=1,
comp_src_NY=0,
comp_src_Zup=0,
proj_id=0,
eid=rec.trid,
sid=0)
l_vsrcs.append(new_s)
# Second: Make a force "triplets" used for calculationg moment tensor derivatives
return make_grouped_triplet_force_solution_headers(solutions=l_vsrcs)
def make_grouped_reciprocal_force_solution_triplet_headers_from_xyz_coords(xyz):
# First we make a single virtual source per receiver
l_vsrcs = []
for i in range(len(xyz)):
lon_xc = xyz[i,0]
lat_yc = xyz[i,1]
depth = xyz[i,2]
new_s = ForceSolutionHeader(ename=f'Event-{str(i).zfill(4)}',
lat_yc=lat_yc,
lon_xc=lon_xc,
depth=depth,
tshift=0.0,
date=datetime.datetime.now(),
f0=0.0,
factor_fs=1,
comp_src_EX=1,
comp_src_NY=0,
comp_src_Zup=0,
proj_id=0,
eid=i,
sid=0)
l_vsrcs.append(new_s)
# Second: Make a force "triplets" used for calculationg moment tensor derivatives
return make_grouped_triplet_force_solution_headers(solutions=l_vsrcs)
def make_replicated_reciprocal_station_headers_from_src_triplet_list(l_vsrc,l_vrec):
#from pyaspect.specfemio.headers import ForceSolutionHeader
#from pyaspect.specfemio.headers import StationHeader
if not isinstance(l_vsrc[0][0],ForceSolutionHeader):
raise TypeError('l_vsrc[:][:] elements must be of type:{type(ForceSolutionHeader}')
if not isinstance(l_vrec[0][0],StationHeader):
raise TypeError('l_vrec[:][:] elements must be of type:{type(StationHeader}')
l_grp_vrecs_by_vsrcs = []
for i in range(len(l_vsrc)):
vsgrp = l_vsrc[i]
vrecs_per_vsrc = []
for vsrc in vsgrp:
cpy_vrecs = copy.deepcopy(flatten_grouped_headers(l_vrec))
for vrec in cpy_vrecs:
vrec.sid = vsrc.sid
vrec.eid = vsrc.eid
vrecs_per_vsrc.append(cpy_vrecs)
l_grp_vrecs_by_vsrcs.append(vrecs_per_vsrc)
return l_grp_vrecs_by_vsrcs
################################################################################
#
# Functions for creating Records and lists of Records
#
################################################################################
def df_to_header_list(df,HeaderCls):
return [HeaderCls.from_series(row) for index, row in df.iterrows()]
def make_record_headers(l_src=None,l_rec=None):
if not isinstance(l_src,list):
raise Exception('l_src must be a list type')
if not isinstance(l_rec,list):
raise Exception('l_rec must be a list type')
if len(l_src) != len(l_rec):
raise Exception('Lengths of l_src and l_rec must be equal')
#from pyaspect.specfemio.headers import SolutionHeader
#from pyaspect.specfemio.headers import StationHeader
#from pyaspect.specfemio.headers import RecordHeader
l_records = []
is_src_list = isinstance(l_src[0],list)
if is_src_list:
if not isinstance(l_src[0][0],SolutionHeader):
raise TypeError('l_src[:][:] elements must be of type:{type(SolutionHeader}')
if not isinstance(l_rec[0][0],list):
raise Exception('Sources are grouped, but receivers appear not to be.')
if not isinstance(l_rec[0][0][0],StationHeader):
raise TypeError('l_rec[:][:][:] elements must be of type:{type(StationHeader}')
for i in range(len(l_src)):
sgrp = l_src[i]
for j in range(len(sgrp)):
s = sgrp[j]
for r in l_rec[i][j]:
r.eid = s.eid
r.sid = s.sid
record = RecordHeader(solutions_h=sgrp,stations_h=flatten_grouped_headers(l_rec[i]),rid=i)
l_records.append(record)
else:
if not isinstance(l_src[0],SolutionHeader):
raise TypeError('l_src[:] elements must be of type:{type(SolutionHeader}')
if not isinstance(l_rec[0],list):
raise Exception('l_rec is not complient with l_src')
if not isinstance(l_rec[0][0],StationHeader):
raise TypeError('l_rec[:][:] elements must be of type:{type(StationHeader}')
for i in range(len(l_src)):
s = l_src[i]
for r in l_rec[i]:
r.eid = s.eid
r.sid = s.sid
record = RecordHeader(solutions_h=s,stations_h=l_rec[i],rid=i)
l_records.append(record)
return l_records
|
from django.urls import include, path
from rest_framework import routers
from .viewsets import (
DeviceViewSet,
NotificationSettingsViewSet,
ParameterViewSet,
TestHistoryViewSet,
TestViewSet,
)
router = routers.DefaultRouter()
router.register("devices", DeviceViewSet, basename="device")
router.register(
"notification_settings", NotificationSettingsViewSet, basename="notification_settings"
)
router.register("parameters", ParameterViewSet, basename="parameter")
router.register("tests", TestViewSet, basename="test")
router.register("test_history", TestHistoryViewSet, basename="test_history")
urlpatterns = [
path("", include(router.urls)),
path("accounts/", include("rest_registration.api.urls")),
]
|
def _generate_function(identifier):
class Function:
def __init__(self, field='*'):
self.field = field
def evaluate(self):
if hasattr(self.field, 'evaluate'):
field = self.field.evaluate()
else:
field = self.field
return '{}({})'.format(identifier, field)
return Function
def _generate_function_with_param(identifier):
class Function:
def __init__(self, n, *fields):
self.n = n
self.fields = fields
def evaluate(self):
fields = ', '.join(self.fields)
return '{}({}, {})'.format(identifier, fields, self.n)
return Function
|
import numpy as np
import cv2
import tritonclient.grpc as grpc_client
import tritonclient.http as httpclient
MODEL_NAME = 'thymio_model'
URL = 'localhost:8000/v2/models/'
TRITON_CLIENT = httpclient.InferenceServerClient(url=URL)
INPUT_NAME = "input_tensor:0"
def get_bytes_img_request(batch=1):
img = np.random.randint(low=0, high=255, size=(320, 320, 3), dtype=np.uint8)
#_, buffer = cv2.imencode('.jpg', img)
#input_bytes = np.array([buffer.tobytes()])
#input_bytes = np.tile(input_bytes, (batch, 1))
batch = np.array([img], dtype="float32")
bytes_input = grpc_client.InferInput(INPUT_NAME, batch, 'FP32')
bytes_input.set_data_from_numpy(input_bytes)
return [bytes_input]
def send_request(inputs):
return TRITON_CLIENT.infer(model_name=MODEL_NAME, inputs=inputs)
#inputs = get_bytes_img_request(batch=1)
img = np.random.randint(low=0, high=255, size=(320, 320, 3), dtype=np.uint8).astype(np.float32)
batch = np.array([img], dtype="float32")
print(batch.shape)
inputs=[httpclient.InferInput(INPUT_NAME, batch.shape, "FP32")]
inputs[0].set_data_from_numpy(batch)
outputs = [httpclient.InferRequestedOutput("detection_scores")]
response = TRITON_CLIENT.infer(MODEL_NAME,inputs,request_id=str(1),outputs=outputs)
print(response)
#output = send_request(inputs)
#print(output.get_response())
|
from flask import Flask, render_template, session, request, redirect
app = Flask(__name__)
app.secret_key = 'my_secret_key'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process/<id>', methods = ['POST'])
def process(id):
print ("I love flask! And {}".format(id))
return redirect('/')
if __name__ == '__main__':
app.run(debug = True)
"""
Will this work? What
What will this print???
no it will not work if you wanted it to show on the page, when it prints, it prints in the terminal. In order to print onto the page we would have to render the template index.html and use sessions, or other such methods. If the terminal was your goal then it does work, it prints I love flask! and peaches
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.