text stringlengths 8 6.05M |
|---|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests BaseNotificationPipeline."""
import mock
import tempfile
import unittest
from datetime import datetime
import MySQLdb
from google.cloud.security.notifier.pipelines import base_notification_pipeline as bnp
from tests.unittest_utils import ForsetiTestCase
class FakePipeline(bnp.BaseNotificationPipeline):
def run():
pass
class BaseNotificationPipelineTest(ForsetiTestCase):
"""Tests for base_notification_pipeline."""
@mock.patch(
'google.cloud.security.common.data_access._db_connector.DbConnector',
autospec=True)
def setUp(self, mock_conn):
"""Setup."""
fake_global_conf = {
'db_host': 'x',
'db_name': 'y',
'db_user': 'z',
}
fake_pipeline_conf = {
'gcs_path': 'gs://blah'
}
self.fake_pipeline = FakePipeline(
'abc', '123', None, fake_global_conf, {}, fake_pipeline_conf)
@mock.patch(
'google.cloud.security.common.data_access.violation_dao.ViolationDao',
autospec=True)
def test_get_violation_dao(self, mock_violation_dao):
"""Test _get_violation_dao()."""
self.fake_pipeline._get_violation_dao()
mock_violation_dao.assert_called_once_with(self.fake_pipeline.global_configs)
@mock.patch.object(bnp.BaseNotificationPipeline, '_get_violation_dao')
def test_get_violations(self, mock_violation_dao):
"""Test _get_violations()."""
fake_timestamp = '1111'
got_violations = ['a', 'b', 'c']
got_bucket_acl_violations = ['x', 'y', 'z']
mock_get_all_violations = mock.MagicMock(
side_effect=[got_violations, got_bucket_acl_violations])
mock_violation_dao.return_value.get_all_violations = mock_get_all_violations
expected = {
'violations': got_violations,
'bucket_acl_violations': got_bucket_acl_violations
}
actual = self.fake_pipeline._get_violations(fake_timestamp)
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import ast
import datetime
# returns a list with [who_is_playing ('R'/'C'), start_time, child selection, end_time, game result, number of moves, total time of game, who_is_playing, ...]
def analyze_tangram_game(filename, pathname='./processed_data/txt/'):
who_is_playing = 'R' # can be 'R' or 'C'
child_play_flag = False
number_of_moves = 0
game_result = 0 # 0 for failure, 1 for success
first_solved_flag = False
result_list = []
#result_list.append(who_is_playing)
with open(pathname + filename, 'r') as fp:
for line in fp:
#print line[6:]
dic = ast.literal_eval(line[6:])
if len(dic['comment'])>0:
# if dic['comment'][0] == 'select_treasure':
# #print dic['comment']
# #print dic['comment'][1][0]
if dic['comment'][0] == 'not_solved':
# print dic['comment']
#print dic['comment'][0]
#if child_play_flag is True:
number_of_moves = number_of_moves + 1
if dic['comment'][0] == 'solved':
if first_solved_flag is False:
first_solved_flag = True
#print dic['comment'][0] + ' ' + dic['time']
end_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
total_time = end_time - start_time
#print 'total time: '
#print total_time
game_result = 1 # win
result_list.append(end_time)
result_list.append(game_result)
result_list.append(number_of_moves)
result_list.append(total_time.total_seconds())
if who_is_playing == 'R':
who_is_playing = 'C'
else:
who_is_playing = 'R'
number_of_moves = 0
if dic['comment'][0] == 'finish':
#print dic['comment'][0] + ' ' + dic['time']
if first_finish_flag is False:
first_finish_flag = True
end_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
total_time = end_time - start_time
game_result = 0 # lose
result_list.append(end_time)
result_list.append(game_result)
result_list.append(number_of_moves)
result_list.append(total_time.total_seconds())
if who_is_playing == 'R':
who_is_playing = 'C'
else:
who_is_playing = 'R'
number_of_moves = 0
# if dic['comment'][0] == 'generate_selection':
# print dic['comment'][0]
if dic['comment'][0] == 'press_treasure' and dic['comment'][2]=='game':
# if dic['comment'][3][0] != 'child_selection' or dic['comment'][2] != 'robot':
#print 'child_selected ' + str(dic['comment'][1][0]) + ' ' + dic['time']
#child_play_flag = True
start_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
result_list.append(who_is_playing)
result_list.append(start_time)
result_list.append(dic['comment'][1][0])
first_solved_flag = False
first_finish_flag = False
# else:
# child_play_flag = False
# start_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
# result_list.append(dic['comment'][1][0])
if dic['obj']=='stop_button':
if first_finish_flag is False:
first_finish_flag = True
end_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
total_time = end_time - start_time
game_result = 0 # lose
result_list.append(end_time)
result_list.append(game_result)
result_list.append(number_of_moves)
result_list.append(total_time.total_seconds())
if who_is_playing == 'R':
who_is_playing = 'C'
else:
who_is_playing = 'R'
number_of_moves = 0
return result_list
result = analyze_tangram_game('bag_tangram_test31.txt', pathname='./processed_data/txt/')
# result = analyze_tangram_game('maor_test_bag.bag.txt', pathname='./processed_data/')
# print len(result)
if len(result) < 70:
for n in range(70-len(result)):
result.append('NULL')
print result
# old algorithm that output only the child results
#
# with open(pathname + filename, 'r') as fp:
# for line in fp:
# print line[6:]
# dic = ast.literal_eval(line[6:])
# if len(dic['comment']) > 0:
# # if dic['comment'][0] == 'select_treasure':
# # #print dic['comment']
# # #print dic['comment'][1][0]
# if dic['comment'][0] == 'not_solved':
# # print dic['comment']
# # print dic['comment'][0]
# if child_play_flag is True:
# number_of_moves = number_of_moves + 1
# if dic['comment'][0] == 'child_win':
# # print dic['comment'][0] + ' ' + dic['time']
# end_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
# total_time = end_time - start_time
# # print 'total time: '
# # print total_time
# game_result = 1 # child win
# result_list.append(game_result)
# result_list.append(number_of_moves)
# result_list.append(total_time.total_seconds())
# child_play_flag = False
# number_of_moves = 0
# if dic['comment'][0] == 'finish':
# # print dic['comment'][0] + ' ' + dic['time']
# if child_play_flag is True:
# end_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
# total_time = end_time - start_time
# game_result = 0 # child lose
# result_list.append(game_result)
# result_list.append(number_of_moves)
# result_list.append(total_time.total_seconds())
# child_play_flag = False
# number_of_moves = 0
# # if dic['comment'][0] == 'generate_selection':
# # print dic['comment'][0]
#
# if dic['comment'][0] == 'press_treasure':
# if dic['comment'][3][0] == 'child_selection' and dic['comment'][2] == 'robot':
# # print 'child_selected ' + str(dic['comment'][1][0]) + ' ' + dic['time']
# child_play_flag = True
# start_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
# result_list.append(dic['comment'][1][0])
# else:
# child_play_flag = False
# start_time = datetime.datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
# result_list.append(dic['comment'][1][0]) |
import torch
import numpy as np
import time
import math
from random import randint
#Define constants.
TRAIN_DATA_SIZE = 60000
TEST_DATA_SIZE = 10000
DATA_SIZE = 28*28
BATCH_SIZE = 200
GENS_PER_DIGIT = 10
MIN_EPOCHS = 50
ABSOLUTE_EPOCHS = 150
LABEL_SMOOTHING = 0.9
#Read the MNIST dataset.
def read_mnist():
train_images_file = open("MNIST_TRAIN_IMAGES", "rb")
train_labels_file = open("MNIST_TRAIN_LABELS", "rb")
test_images_file = open("MNIST_TEST_IMAGES", "rb")
test_labels_file = open("MNIST_TEST_LABELS", "rb")
train_images_barray = []
train_labels_barray = []
test_images_barray = []
test_labels_barray = []
files = [train_images_file, train_labels_file, test_images_file, test_labels_file]
barrays = [train_images_barray, train_labels_barray, test_images_barray, test_labels_barray]
for f, ba in zip(files, barrays):
byte = f.read(1)
while byte:
ba.append(int.from_bytes(byte, byteorder="big")/256)
byte = f.read(1)
for i in range(0, 16):
train_images_barray.pop(0)
test_images_barray.pop(0)
for i in range(0, 8):
train_labels_barray.pop(0)
test_labels_barray.pop(0)
print("MNIST loaded.")
return train_images_barray, train_labels_barray, test_images_barray, test_labels_barray
#Declare the structure of the Discriminator.
class Discriminator(torch.nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.linear = torch.nn.Sequential(
torch.nn.Linear((784+10)*2, 800),
torch.nn.ReLU(),
torch.nn.Linear(800, 200),
torch.nn.ReLU(),
torch.nn.Linear(200, 30),
torch.nn.ReLU(),
torch.nn.Linear(30, 1),
torch.nn.Sigmoid()
)
def forward(self, labels, inp):
out = self.linear(torch.cat((labels.float(), inp), 1))
return out
#Declare the structure of the Generator.
class Generator(torch.nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.linear = torch.nn.Sequential(
torch.nn.Linear(20, 80),
torch.nn.Tanh(),
torch.nn.Linear(80, 300),
torch.nn.Tanh(),
torch.nn.Linear(300, 784),
torch.nn.Sigmoid()
)
def forward(self, labels, random):
inp = torch.cat((labels.float(), random.float()))
out = self.linear(inp)
return out
'''
Arrange the MNIST data into 4 arrays of tensors, 2 for the training data and 2 for the test data.
For the pair of arrays for training and testing, 1 is for the images and 1 is for the labels.
In the image array, each tensor is 784 elements, and the array is 60000 elemenets.
In the label array, each tensor is a single element, and the array is 60000 elements.
'''
def arrange_data(train_images_barray, train_labels_barray, test_images_barray, test_labels_barray):
data = []
for i in range(0, 4):
t_array = []
if (i == 0):
for i in range(0, TRAIN_DATA_SIZE):
t_array.append(torch.from_numpy(np.asarray(train_images_barray[i*784:(i+1)*784])))
elif (i == 1):
for i in range(0, TRAIN_DATA_SIZE):
t_array.append(torch.from_numpy(np.asarray(train_labels_barray[i])))
elif (i == 2):
for i in range(0, TEST_DATA_SIZE):
t_array.append(torch.from_numpy(np.asarray(test_images_barray[i*784:(i+1)*784])))
elif (i == 3):
for i in range(0, TEST_DATA_SIZE):
t_array.append(torch.from_numpy(np.asarray(test_labels_barray[i])))
data.append(t_array)
print("Data arranged.")
return data
#Declare one-hot transform function.
def one_hot(index):
l = [0]*10
l[int(index)] = 1
return torch.tensor(l)
def train_model(data):
#Declare and train the network.
discriminator = Discriminator()
generator = Generator()
discriminator_opt = torch.optim.Adadelta(discriminator.parameters(), lr=4.0)
generator_opt = torch.optim.Adadelta(generator.parameters())
current_milli_time = lambda: int(round(time.time() * 1000))
before_time = current_milli_time()
discriminator_incorrect = 0
epoch = 0
while (discriminator_incorrect < 0.4 or epoch < MIN_EPOCHS) and epoch < ABSOLUTE_EPOCHS:
discriminator_batch_loss = 0
generator_batch_loss = 0
for batch in range(0, int(TRAIN_DATA_SIZE/BATCH_SIZE)):
discriminator_opt.zero_grad()
generator_opt.zero_grad()
real_images = data[0][batch*BATCH_SIZE:(batch+1)*BATCH_SIZE]
real_labels = data[1][batch*BATCH_SIZE:(batch+1)*BATCH_SIZE]
real_labels = [one_hot(real_labels[i].item()) for i in range(0, len(real_labels))]
gen_labels = [one_hot(randint(0, 9)) for i in range(0, BATCH_SIZE)]
fake_images = [generator.forward(gen_labels[i], torch.rand(10)).double() for i in range(0, BATCH_SIZE)]
input_tensor = torch.stack(real_images+fake_images)
real_label_tensor = torch.stack(real_labels).view(-1).view(int(BATCH_SIZE/2), 10*2)
fake_label_tensor = torch.stack(gen_labels).view(-1).view(int(BATCH_SIZE/2), 10*2)
label_tensor = torch.cat((real_label_tensor, fake_label_tensor))
doubled_up_input = input_tensor.view(BATCH_SIZE, DATA_SIZE*2)
doubled_up_input += (torch.randn(doubled_up_input.size())/6).double()
decision = discriminator(label_tensor, doubled_up_input.float())
decision = decision.view(-1)
discriminator_label = torch.tensor([LABEL_SMOOTHING]*int(BATCH_SIZE/2)+[0]*int(BATCH_SIZE/2))
discriminator_incorrect = 2*(torch.sum(torch.abs(decision-discriminator_label.float()))/(2*BATCH_SIZE)).item()
discriminator_train_loss = torch.nn.functional.binary_cross_entropy(decision, discriminator_label.float(), reduction="sum")
discriminator_train_loss.backward()
discriminator_opt.step()
discriminator_batch_loss += discriminator_train_loss.data.item()
new_gen_labels = [one_hot(randint(0, 9)) for i in range(0, BATCH_SIZE)]
generated = torch.stack([generator.forward(gen_labels[i], torch.rand(10)).double() for i in range(0, BATCH_SIZE)]).float()
new_fake_label_tensor = torch.stack(new_gen_labels).view(-1).view(int(BATCH_SIZE/2), 10*2)
generator_pred = discriminator(new_fake_label_tensor, generated.view(int(BATCH_SIZE/2), DATA_SIZE*2))
generator_label = torch.ones(int(BATCH_SIZE/2))
generator_train_loss = torch.nn.functional.binary_cross_entropy(generator_pred.resize(generator_pred.size()[0]), generator_label.float(), reduction="sum")
generator_train_loss.backward()
generator_opt.step()
generator_batch_loss += generator_train_loss.item()
if ((batch+1)%int((TRAIN_DATA_SIZE/BATCH_SIZE)/10) == 0):
print("DBL : "+str(discriminator_batch_loss/(batch+1))+" GBL : "+str(generator_batch_loss/(batch+1)))
print("")
print("Discriminator Epoch "+str(epoch+1)+" Loss : "+str(discriminator_batch_loss/(TRAIN_DATA_SIZE/BATCH_SIZE)))
print("Generator Epoch "+str(epoch+1)+" Loss : "+str(generator_batch_loss/(TRAIN_DATA_SIZE/BATCH_SIZE)))
print("Portion Discriminator Incorrect : "+str(discriminator_incorrect))
print("")
epoch += 1
after_time = current_milli_time()
seconds = math.floor((after_time-before_time)/1000)
minutes = math.floor(seconds/60)
seconds = seconds % 60
print(str(epoch)+" epochs took "+str(minutes)+" minute(s) "+str(seconds)+" second(s).")
return discriminator, generator;
def generate_images(generator):
image_file = open("LGAN_GENERATED_IMAGES", "wb+")
label_file = open("LGAN_GENERATED_LABELS", "wb+")
for digit in range(0, 10):
gen_label = one_hot(digit)
for i in range(GENS_PER_DIGIT):
image_tensor = generator.forward(gen_label, torch.rand(10))
image_tensor = image_tensor*torch.tensor(256)
image_tensor = torch.min(image_tensor, (torch.ones(image_tensor.size())*255).float())
image_tensor = torch.max(image_tensor, (torch.zeros(image_tensor.size())).float())
image_file.write(bytearray(list(map(int, (image_tensor.tolist())))))
label_file.write(bytearray(int(digit)))
image_file.close()
if __name__ == "__main__":
train_images_barray, train_labels_barray, test_images_barray, test_labels_barray = read_mnist()
data = arrange_data(train_images_barray, train_labels_barray, test_images_barray, test_labels_barray)
generator, discriminator = train_model(data)
generate_images(generator)
|
"""
Script maintain all machines
Usage: sysadmin.py [options] <machines>...
sysadmin.py [options]
Arguments:
machines specifies individual machines to run commands on
Options:
--update update all specified machines.
--upgrade upgrade all specified machines
--reboot reboot all specified machines
--check_mem prints free memory to screen across all machines
--timeout=<tme> a timeout option so that ssh will move on if machine is blocked [default: 600]
--file_name=<fle> which file to pull machines from use [default: sysadmin.yml]
--install=<pkg> installs a package across all machines
--command=<cmd> runs a generic command across all machines
Examples:
Check the memory on all machines with a timeout of 10 seconds:
python sysadmin.py --check_mem --timeout=10
When using things that might take longer it is probably worth using a timeout of around 5 minutes:
python sysadmin.py --check_mem --timeout=300
By default the commands will be run for the machines found in 'sysadmin.yml'. This command will reboot all machines listed in that file:
python sysadmin --reboot
A different admin file can be specified:
python sysadmin.py --reboot --file_name=other_admin_file.yml
Specific machines can be entered as arguments:
python sysadmin.py --reboot pg11 pg14
"""
from subprocess import call
from docopt import docopt
from signal import SIGALRM, signal, alarm
import getpass
import yaml
class Alarm(Exception):
"""
A class for an exception that will kick in if timeout is reached
"""
pass
def alarm_handler(signum, frame):
"""
A function to raise an alarm
"""
raise Alarm
def run_command_with_timeout(cmd, timeout):
"""
Run a command with a timeout
"""
signal(SIGALRM, alarm_handler)
alarm(timeout)
if timeout:
try:
call(['ssh','-t' ,machine, cmd])
alarm(0) # reset the alarm
except Alarm:
print "Timeout for: %s" % cmd
else:
call(['ssh','-t' ,machine, cmd])
arguments = docopt(__doc__)
file_name = arguments['--file_name']
upgrade= arguments['--upgrade']
update = arguments['--update']
reboot = arguments['--reboot']
check_mem = arguments['--check_mem']
install = arguments['--install']
timeout = int(arguments['--timeout'])
machines = arguments['<machines>']
command = arguments['--command']
if not machines:
fle = open(file_name,'r')
machines = yaml.load(fle)
fle.close()
machines = machines['machines']
if __name__ == '__main__':
if upgrade or update or install or update:
password = getpass.getpass()
for machine in machines:
print 'Attempting to access %s' %machine
if update:
cmd = 'echo %s | sudo -S apt-get update' %(password)
run_command_with_timeout(cmd, timeout)
if upgrade:
cmd = 'echo %s | sudo -S apt-get -y upgrade' %(password)
run_command_with_timeout(cmd, timeout)
if install:
cmd = 'echo %s | sudo -S apt-get -y install %s' %(password,install)
run_command_with_timeout(cmd, timeout)
if reboot:
cmd = 'echo %s | sudo -S reboot' %(password)
run_command_with_timeout(cmd, timeout)
if check_mem:
cmd = ' cat /proc/meminfo | grep MemFree'
run_command_with_timeout(cmd, timeout)
if command:
cmd = command
run_command_with_timeout(cmd, timeout)
|
from zeroconf import ServiceBrowser, Zeroconf
from pprint import pprint
class Listener:
def __init__(self, callback):
self.callback = callback
def remove_service(self, zeroconf, type, name):
pass
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
self.callback({
"Bond ID": info.name.split('.')[0],
"IP Address": '.'.join(
[ str(ord(chr(byte))) for byte in info.addresses[0] ]),
})
class Scanner(object):
def __init__(self, callback):
self.zeroconf = Zeroconf()
self.listener = Listener(callback=callback)
browser = ServiceBrowser(self.zeroconf,
"_bond._tcp.local.", self.listener)
def __del__(self):
del self.listener
self.zeroconf.close()
|
#!/usr/bin/env python
#from src.mp_metapath import *
# check if python module 'src.mp_bioclite_wrapper' is available
try:
from src.mp_bioclite_wrapper import bioconductor
bioc = bioconductor()
except:
print "could not import python module 'src.mp_bioclite_wrapper'"
quit()
list = ['TFAP2A','Arnt','Arnt::Ahr','Ar','T','Pax5','NR2F1','Ddit3::Cebpa',
'E2F1','NFIL3','En1','ELK1','Evi1','FOXF2','FOXD1','FOXC1','FOXL1','GATA2',
'GATA3','Gfi','Foxq1','Foxd3','FOXI1','HLF','HNF1A','NHLH1','IRF1','IRF2',
'MEF2A','Myf','MZF1_1-4','MZF1_5-13','MAX','MYC::MAX','NFYA','NF-kappaB',
'Nkx2-5','PPARG','Pax2','Pax4','Pax6','PBX1','RORA_1','RORA_2','RREB1',
'RXRA::VDR','Prrx2','ELK4','SOX9','Sox17','SPIB','SRF','SRY','Sox5','znf143',
'NFE2L1::MafG','TEAD1','TAL1::TCF3','Hand1::Tcfe2a','USF1','YY1','ETS1','Myb',
'REL','ZEB1','NFKB1','TP53','RELA','TBP','Hltf','Spz1','NR3C1','HNF4A',
'NR1H2::RXRA','Zfp423','Mafb','TLX1::NFIC','Nkx3-2','NKX3-1','Nobox','ZNF354C',
'MIZF','Pdx1','BRCA1','Lhx3','ELF5','CTCF','Tal1::Gata1','Esrrb','Pou5f1','Sox2',
'Stat3','Tcfcp2l1 ','Zfx','Myc','FOXA1','EWSR1-FLI1','GABPA','Gata1','Klf4',
'REST','RUNX1','STAT1','Mycn','Foxa2','ESR1','PPARG::RXRA','NFE2L2','ARID3A',
'NFATC2','HNF1B','EBF1','INSM1','FEV','FOXO3','HOXA5','RXR::RAR_DR5',
'NR4A2','NFIC','Egr1','PLAG1','Nr2e3','SPI1','CREB1','AP1','SP1',
'CEBPA','ESR2','HIF1A::ARNT','SOX10']
# convert to EntrezIDs
list, black_list = bioc.convert_geneids(
input_file = "test.csv", input_format = 'entrezid',
output_file = "E Symbol.txt", output_format = 'symbol')
print list
print black_list |
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
temp = self.head
while temp:
print(temp.data)
temp = temp.next
List = LinkedList()
List.head = Node(1)
second = Node(2)
third = Node(3)
fourth = Node(4)
fifth = Node(5)
List.head.next = second
second.next = third
third.next = fourth
fourth.next = fifth
List.print_list()
|
import time
from timeit import default_timer as timer
from datetime import timedelta
def bubble_sort(elements):
elements_length = len(elements)
# Loop through all elements - essentially the passes
for item in range(elements_length):
# Loop the list from 0 to item-i-1
for i in range(0, elements_length-item-1):
# Swap if the element found is greater
# than the next element
if elements[i] > elements[i+1] :
elements[i], elements[i+1] = elements[i+1], elements[i]
# Change the value of control since we had a swap
# and we need to recheck if there are more to do
def bubble_sort_optimized(elements):
elements_length = len(elements)
# Loop through all elements - essentially the passes
for item in range(elements_length):
# Add control
swapped = False
# Loop the list from 0 to item-i-1
for i in range(0, elements_length-item-1):
# Swap if the element found is greater
# than the next element
if elements[i] > elements[i+1] :
elements[i], elements[i+1] = elements[i+1], elements[i]
# Change the value of control since we had a swap
# and we need to recheck if there are more to do
swapped = True
# If there were no element swapped
# by inner loop, then stop the execution
if swapped == False:
break
a = [5,2,123,6,900,23,1,6,234,123,0,4]
timer_start = timer()
bubble_sort(a)
timer_end = timer()
print(timedelta(minutes=timer_end-timer_start))
timer_start = timer()
bubble_sort_optimized(a)
timer_end = timer()
print(timedelta(minutes=timer_end-timer_start))
|
from collections import Counter
from operator import le, lt, ge, gt, eq, ne
def string_evaluation(s, conditions):
cnt = Counter(s)
ops = {'<=': le, '<': lt, '>=': ge, '>': gt, '==': eq, '!=': ne}
result = []
for condition in conditions:
left = condition[0]
right = condition[-1]
result.append(ops[condition[1:-1]](
cnt[left] if not left.isdigit() else int(left),
cnt[right] if not right.isdigit() else int(right)
))
return result
|
import random
import dice
dice_two = random.randint(1,6 )
total = dice.dice_one + dice_two
print('Your total of two dices is: ' + str(total)) |
######################################################################
# HELPERS / UTILS #
######################################################################
from constants import valid_install_exit_codes, valid_uninstall_exit_codes
from subprocess import Popen, PIPE, CalledProcessError, check_call
from Classes.PathManager import PathManager
from timeit import default_timer as timer
from Classes.Metadata import Metadata
from Classes.Packet import Packet
from viruscheck import virus_check
from datetime import datetime
import pyperclip as clipboard
from signal import SIGTERM
from colorama import Back, Fore
from switch import Switch
from extension import *
import webbrowser
import subprocess
import keyboard
import requests
import tempfile
import registry
import difflib
import zipfile
import hashlib
import ctypes
import random
import click
import json
import sys
import os
import re
index = 0
final_value = None
path = ''
manager = PathManager()
parent_dir = manager.get_parent_directory()
current_dir = manager.get_current_directory()
def is_admin():
try:
is_admin = (os.getuid() == 0)
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
class HiddenPrints:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
def get_download_url(packet):
if sys.platform == 'win32':
return packet.win64
elif sys.platform == 'darwin':
return packet.darwin
elif sys.platform == 'linux':
return packet.linux
def download(url, noprogress, silent, download_type):
path = f'{tempfile.gettempdir()}\\Setup{download_type}'
while os.path.isfile(path):
path = f'{tempfile.gettempdir()}\\Setup{random.randint(200, 10000)}'
with open(path, "wb") as f:
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.content)
else:
dl = 0
full_length = int(total_length)
for data in response.iter_content(chunk_size=7096):
dl += len(data)
f.write(data)
if noprogress:
sys.stdout.write(
f"\r{round(dl / 1000000, 2)} / {round(full_length / 1000000, 2)} MB")
sys.stdout.flush()
elif not noprogress and not silent:
complete = int(20 * dl / full_length)
fill_c, unfill_c = '#' * complete, ' ' * (20 - complete)
sys.stdout.write(
f"\r[{fill_c}{unfill_c}] ⚡ {round(dl / full_length * 100, 1)} % ⚡ {round(dl / 1000000, 1)} / {round(full_length / 1000000, 1)} MB")
sys.stdout.flush()
return path
def get_error_cause(error: str, method: str) -> str:
if method == 'installation':
for code in valid_install_exit_codes:
if f'exit status {code}' in error:
return ['no-error']
if method == 'uninstallation':
for code in valid_uninstall_exit_codes:
if f'exit status {code}' in error:
return ['no-error']
if '[WinError 740]' in error and 'elevation' in error:
# Process Needs Elevation To Execute
click.echo(click.style(f'\nAdministrator Elevation Requied. Exit Code [0001]', fg='red'))
return get_error_message('0001', 'installation')
if 'exit status 2' in error or 'exit status 1' in error:
# User Declined Prompt Asking For Permission
click.echo(click.style(f'\nAdministrative Privileges Declined. Exit Code [0101]', fg='red'))
return get_error_message('0101', 'installation')
if 'exit status 4' in error:
# Fatal Error During Installation
click.echo(click.style(f'\nFatal Error. Exit Code [1111]', fg='red'))
return get_error_message('1111', 'installation')
if 'exit status 3010' or 'exit status 2359301' in error:
# Installer Requesting Reboot
return get_error_message('1010', 'installation')
else:
click.echo(click.style(f'\nUnknown Error. Exited With Code [0000]', fg='red'))
handle_unknown_error(error)
return get_error_message('0000', 'installation')
def run_cmd(command: str, metadata: Metadata, method: str):
try:
check_call(command, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except (CalledProcessError, OSError, FileNotFoundError) as err:
keyboard.add_hotkey(
'ctrl+c', lambda: os._exit(0))
disp_error_msg(get_error_cause(str(err), method), metadata)
def install_package(path, packet: Packet, metadata: Metadata) -> str:
download_type = packet.win64_type
custom_install_switch = packet.custom_location
directory = packet.directory
package_name = packet.json_name
switches = packet.install_switches
if sys.platform == 'win32':
if download_type == '.exe':
if '.exe' not in path:
if not os.path.isfile(path + '.exe'):
os.rename(path, f'{path}.exe')
path = path + '.exe'
command = path + ' '
if custom_install_switch:
if directory and directory != '':
if '/D=' in custom_install_switch:
command += ' ' + custom_install_switch + f'{directory}'
else:
command += ' ' + custom_install_switch + \
f'"{directory}"'
if directory == '':
click.echo(click.style(
f'Installing {package_name} To Default Location, Custom Installation Directory Not Supported By This Installer!', fg='yellow'))
for switch in switches:
command = command + ' ' + switch
run_cmd(command, metadata, 'installation')
elif download_type == '.msi':
command = 'msiexec.exe /i ' + path + ' '
for switch in switches:
command = command + ' ' + switch
if not is_admin():
click.echo(click.style(
'\nAdministrator Elevation Required. Exit Code [0001]', fg='red'))
disp_error_msg(get_error_message('0001', 'installation'))
handle_exit('ERROR', None, metadata)
run_cmd(command, metadata, 'installation')
elif download_type == '.zip':
if not metadata.no_color:
click.echo(click.style(
f'Unzipping File At {path}', fg='green'))
if metadata.no_color:
click.echo(click.style(
f'Unzipping File At {path}'))
zip_directory = fR'{tempfile.gettempdir()}\\{package_name}'
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall(zip_directory)
executable_list = []
for name in os.listdir(zip_directory):
if name.endswith('.exe'):
executable_list.append(name)
executable_list.append('Exit')
file_path = fR'{tempfile.gettempdir()}\\{package_name}'
def trigger():
click.clear()
for executable in executable_list:
if executable == executable_list[index]:
print(Back.CYAN + executable + Back.RESET)
else:
print(executable)
trigger()
def up():
global index
if len(executable_list) != 1:
index -= 1
if index >= len(executable_list):
index = 0
trigger()
return
trigger()
def down():
global index
if len(executable_list) != 1:
index += 1
if index >= len(executable_list):
index = 0
trigger()
return
trigger()
def enter():
if executable_list[index] == 'Exit':
os._exit(0)
return
else:
path = file_path + "\\" + executable_list[index]
click.echo(click.style(
f'Running {executable_list[index]}. Hit Control + C to Quit', fg='magenta'))
subprocess.call(path, stdout=PIPE, stdin=PIPE, stderr=PIPE)
quit()
keyboard.add_hotkey('up', up)
keyboard.add_hotkey('down', down)
keyboard.add_hotkey('enter', enter)
keyboard.wait()
# # TODO: Implement the macOS side.
# if sys.platform == 'darwin':
# mount_dmg = f'hdiutil attach -nobrowse {file_name}'
def get_correct_package_names(res: str) -> list:
package_names = []
for package in res:
package_names.append(package)
return package_names
def get_hash_algorithm(checksum: str):
# A function to detect the hash algorithm used in checksum
hashes = {32: "md5", 40: "sha1", 64: "sha256", 128: "sha512"}
return hashes[len(checksum)] if len(checksum) in hashes else None
def get_checksum(bytecode: bytes, hash_algorithm: str):
# A function to get the checksum from bytecode
hash_type = getattr(hashlib, hash_algorithm, None)
if hash_type:
return hash_type(bytecode).hexdigest()
return None
def send_req_all() -> dict:
REQA = 'https://electric-package-manager.herokuapp.com/packages/'
time = 0.0
response = requests.get(REQA, timeout=15)
res = json.loads(response.text.strip())
time = response.elapsed.total_seconds()
return res, time
def get_pid(exe_name):
proc = subprocess.Popen('tasklist', stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = proc.communicate()
output = output.decode('utf-8')
lines = output.splitlines()
for line in lines:
if exe_name in line:
return line.split()[1]
def find_approx_pid(exe_name) -> str:
proc = subprocess.Popen('tasklist', stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = proc.communicate()
output = output.decode('utf-8')
lines = output.splitlines()
cleaned_up_names = []
for line in lines:
try:
cleaned_up_names.append(line.split()[0].strip('.exe'))
except IndexError:
continue
matches = difflib.get_close_matches(exe_name, cleaned_up_names)
if matches != []:
for line in lines:
if matches[0] in line:
return line.split()[1]
return 1
def handle_exit(status: str, setup_name: str, metadata: Metadata):
if status == 'Downloaded' or status == 'Installing' or status == 'Installed':
exe_name = setup_name.split('\\')[-1]
os.kill(int(get_pid(exe_name)), SIGTERM)
write('SafetyHarness Successfully Created Clean Exit Gateway',
'green', metadata)
write('\nRapidExit Using Gateway From SafetyHarness Successfully Exited With Code 0',
'light_blue', metadata)
os._exit(0)
if status == 'Got Download Path':
write('\nRapidExit Successfully Exited With Code 0', 'green', metadata)
os._exit(0)
else:
write('\nRapidExit Successfully Exited With Code 0', 'green', metadata)
os._exit(0)
def kill_running_proc(package_name: str, metadata: Metadata):
parts = package_name.split('-')
name = ' '.join([p.capitalize() for p in parts])
pid = int(find_approx_pid(package_name))
if pid == 1:
return
if pid and pid != 1:
if metadata.yes:
write(f'Terminating {name}.', 'green', metadata)
os.kill(pid, SIGTERM)
return
if metadata.silent:
os.kill(pid, SIGTERM)
return
terminate = click.prompt(
f'Electric Detected {name} Running In The Background. Would You Like To Terminate It? [y/n]')
if terminate == 'y':
write(f'Terminating {name}.', 'green', metadata)
os.kill(pid, SIGTERM)
else:
write('Aborting Installation!', 'red', metadata)
write_verbose(
f'Aborting Installation Due To {name} Running In Background', metadata)
write_debug(
f'Aborting Installation Due To {name} Running In Background. Process Was Not Terminated.', metadata)
os._exit(1)
def kill_proc(proc, metadata: Metadata):
if proc is not None:
proc.terminate()
write('SafetyHarness Successfully Created Clean Exit Gateway',
'green', metadata)
write('\nRapidExit Using Gateway From SafetyHarness Successfully Exited With Code 0',
'light_blue', metadata)
os._exit(0)
else:
write('\nRapidExit Successfully Exited With Code 0',
'green', metadata)
os._exit(0)
def assert_cpu_compatible() -> int:
cpu_count = os.cpu_count()
print(cpu_count)
def find_existing_installation(package_name: str, display_name: str):
key = registry.get_uninstall_key(package_name)
if key:
return True
else:
key = registry.get_uninstall_key(display_name.lower())
if key:
return True
else:
key = registry.get_uninstall_key(display_name)
if key:
return True
return False
def refresh_environment_variables() -> bool:
proc = Popen(Rf'{current_dir}\scripts\refreshvars.cmd',
stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
output, err = proc.communicate()
if 'Finished' in output.decode('utf-8'):
return True
else:
print('An error occurred')
print(err.decode('utf-8'))
def check_virus(path: str, metadata: Metadata):
detected = virus_check(path)
if detected:
for value in detected.items():
if not metadata.silent and not metadata.no_color:
click.echo(click.style(f'\n{value[0]} => {value[1]}', fg='yellow'))
elif metadata.no_color and not metadata.silent:
click.echo(click.style(f'\n{value[0]} => {value[1]}', fg='white'))
else:
continue_install = 'y'
if not metadata.silent:
continue_install = click.prompt('Would You Like To Continue? [y/n]')
if continue_install == 'y':
pass
else:
handle_exit('Virus Check', '', metadata)
else:
click.echo(click.style('No Viruses Detected!', fg='green'))
def setup_supercache():
res, time = send_req_all()
res = json.loads(res)
with open(Rf'{parent_dir}supercache.json', 'w+') as file:
del res['_id']
file.write(json.dumps(res, indent=4))
return res, time
def update_supercache(res):
filepath = Rf'{parent_dir}supercache.json'
file = open(filepath, 'w+')
file.write(json.dumps(res, indent=4))
file.close()
logpath = Rf'{parent_dir}\superlog.txt'
logfile = open(logpath, 'w+')
now = datetime.now()
logfile.write(str(now))
logfile.close()
def check_supercache_valid():
filepath = Rf'{parent_dir}superlog.txt'
if os.path.isfile(filepath):
with open(filepath, 'r') as f:
contents = f.read()
date = datetime.strptime(contents, '%Y-%m-%d %H:%M:%S.%f')
if (datetime.now() - date).days < 1:
return True
return False
def handle_cached_request():
filepath = Rf'{parent_dir}supercache.json'
if os.path.isfile(filepath):
file = open(filepath)
start = timer()
res = json.load(file)
file.close()
end = timer()
if res:
return res, (end - start)
else:
res, time = setup_supercache()
return res, time
else:
res, time = setup_supercache()
return res, time
def generate_metadata(no_progress, silent, verbose, debug, no_color, yes, logfile, virus_check, reduce):
return Metadata(no_progress, no_color, yes, silent, verbose, debug, logfile, virus_check, reduce)
def disp_error_msg(messages: list, metadata: Metadata):
if 'no-error' in messages:
return
reboot = False
websites = []
commands = []
idx = 0
for msg in messages:
if idx == 0:
click.echo(click.style(msg, fg='yellow'))
idx += 1
continue
if 'Reboot' in msg:
reboot = True
break
if 'http' in msg:
websites.append(msg.strip())
click.echo(click.style(msg, fg='blue'))
idx += 1
continue
if 'electric install' in msg:
commands.append(re.findall(r'\`(.*?)`', msg))
else:
click.echo(msg)
idx += 1
if reboot:
reboot = click.confirm('Would you like to reboot? [y/n]')
if reboot:
os.system('shutdown /R')
if commands:
run = click.prompt('Would You Like To Install Node? [y/n]')
if run == 'y':
print('\n')
os.system(commands[0][0])
if websites:
website = click.prompt('Would You Like To Visit Any Of The Above Websites? [y/n]')
if website == 'y':
try:
webpage = int(click.prompt('Which Webpage Would You Like To Visit? ')) - 1
except:
handle_exit('ERROR', None, metadata)
try:
webbrowser.open(websites[webpage][8:])
except:
pass
handle_exit('ERROR', None, metadata)
def get_error_message(code: str, method: str):
attr = method.replace('ation', '')
with Switch(code) as code:
if code('0001'):
return [
f'\n[0001] => {method.capitalize()} failed because the software you tried to {attr} requires administrator permissions.',
f'\n\nHow To Fix:\n\nRun Your Command Prompt Or Powershell As Administrator And Retry {method.capitalize()}.\n\nHelp:',
'\n[1] <=> https://www.howtogeek.com/194041/how-to-open-the-command-prompt-as-administrator-in-windows-8.1/',
'\n[2] <=> https://www.top-password.com/blog/5-ways-to-run-powershell-as-administrator-in-windows-10/\n\n']
if code('0002'):
return [
f'\n[0002] => {method.capitalize()} failed because the installer provided an incorrect command for {attr}.\nFile a support ticket at https://www.electric.sh/support\n\nHelp:\nhttps://www.electric.sh/troubleshoot'
]
if code('0000'):
return [
f'\n[0000] => {method.capitalize()} failed due to an unknown reason.',
'\nFile a support ticket at https://www.electric.com/support',
'\n\nHelp:',
'\nhttps://www.electric.sh/troubleshoot'
]
if code('0011'):
clipboard.copy('electric install node')
return [
'\n[0011] => Node(npm) is not installed on your system.',
'\n\nHow To Fix:\n',
'Run `electric install node` [ Copied To Clipboard ] To Install Node(npm)'
]
if code('0010'):
clipboard.copy('electric install python3')
return [
'\n[0010] => Python(pip) is not installed on your system.',
'\n\nHow To Fix:\n',
'Run `electric install python3` [ Copied To Clipboard ] To install Python(pip).\n\nHelp:',
'\n[1] <=> https://www.educative.io/edpresso/how-to-add-python-to-path-variable-in-windows',
'\n[2] <=> https://stackoverflow.com/questions/23708898/pip-is-not-recognized-as-an-internal-or-external-command'
]
if code('1010'):
return [
f'\n[1010] => Installer Has Requested A Reboot In Order To Complete {method.capitalize()}.\n'
]
if code('1111'):
return [
f'\n[1111] => The {attr.capitalize()}er For This Package Failed Due To A Fatal Error. This is likely not an issue or error with electric.',
'\n\nWe recommend you raise a support ticket with the data generated below:',
generate_report(),
'\n\nHelp:\n',
'\n[1] <=> https://www.electric.sh/errors/1111',
'\n[2] <=> https://www.electric.sh/support',
]
if code('0101'):
return [
f'\n[0101] => The installer / uninstaller was denied of Administrator permissions And failed to initialize successfully.',
'\n\nHow To Fix:\n',
'Make sure you accept prompt asking for administrator privileges or alternatively: \n',
f'Run Your Command Prompt Or Powershell As Administrator And Retry {method.capitalize()}.\n\n\nHelp:',
'\n[1] <=> https://www.electric.sh/errors/0101',
'\n[2] <=> https://www.howtogeek.com/194041/how-to-open-the-command-prompt-as-administrator-in-windows-8.1/',
'\n[3] <=> https://www.top-password.com/blog/5-ways-to-run-powershell-as-administrator-in-windows-10/\n\n'
]
def handle_unknown_error(err: str):
error_msg = click.prompt('Would You Like To See The Error Message? [y/n]')
if error_msg == 'y':
print(err)
proc = subprocess.Popen('tasklist', stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = proc.communicate()
output = output.decode('utf-8')
lines = output.splitlines()
cleaned_up_names = []
for line in lines:
try:
cleaned_up_names.append(line.split()[0].strip('.exe'))
except IndexError:
continue
count = 0
for name in cleaned_up_names:
if name == 'powershell' or name == 'cmd':
count += 1
return count >= 2
|
import importlib
# import csp_bitstring as csp_module
csp_module = importlib.import_module('csp_bitstring')
def recursive_backtracking(csp, assignment):
if csp_module.is_complete(csp, assignment):
return assignment
var = select_unassigned_variable(csp, assignment)
for value in csp_module.order_domain_values(csp, var):
if csp_module.is_consistent(csp, assignment, value):
new_assignment = csp_module.assign_value(assignment, value, var)
result = recursive_backtracking(csp, new_assignment)
if result is not None:
return result
return None
def backtracking_search(csp, csp_impl):
global csp_module
csp_module = importlib.import_module(csp_impl)
return recursive_backtracking(csp, csp_module.null_assignment())
def select_unassigned_variable(csp, assignment):
return len(assignment)
def solution_is_consistent(csp, solution):
w, h, horiz_constr, vert_constr = csp
if len(solution) != h or len(solution[0]) != w:
return False
for col in range(len(vert_constr)):
if not csp_module.col_is_consistent(csp, solution, vert_constr[col], col):
return False
for row in range(len(horiz_constr)):
if not csp_module.row_is_consistent(csp, solution[row], horiz_constr[row]):
return False
return True
|
import re
pattern = r"(@|#)([A-Za-z]{3,})\1\1([A-Za-z]{3,})\1"
data = input()
mirror_words = []
match = re.findall(pattern, data)
for m in match:
first_word = m[1] # при findall индексите са като при листовете, т.е. с едно назад от реалните
second_word = m[2]
if first_word == second_word[::-1]:
mirror_words.append(first_word + " <=> " + second_word)
if len(match) == 0:
print(f"No word pairs found!")
else:
print(f"{len(match)} word pairs found!")
if len(mirror_words) == 0:
print("No mirror words!")
else:
print(f"The mirror words are:")
print(", ".join(mirror_words)) |
print("Hello Poland!")
|
import unittest
import numpy as np
from core.available_turn_coordinates_finder import AvailableTurnCoordinatesFinder
class AvailableTurnsComputerTest(unittest.TestCase):
def test_for_empty_field_returns_array_of_field_size(self):
field = np.matrix('0 0 0; 0 0 0; 0 0 0')
found_turns = self.find_turns(field)
assert len(found_turns) == 9
def test_for_full_field_return_empty_array(self):
field = np.matrix('1 1 1; 1 1 1; 1 1 1')
found_turns = self.find_turns(field)
assert len(found_turns) == 0
def test_returns_correct_positions_when_one_turn_left(self):
field = np.matrix('0 1 1; 1 1 1; 1 1 1')
found_turns = self.find_turns(field)
assert len(found_turns) == 1
assert found_turns[0] == (0, 0)
def test_returns_correct_positions_when_multiple_turns_left(self):
field = np.matrix('0 1 1; 1 0 1; 1 1 0')
found_turns = self.find_turns(field)
assert len(found_turns) == 3
assert found_turns == [(0, 0), (1, 1), (2, 2)]
def find_turns(self, field):
finder = AvailableTurnCoordinatesFinder()
return finder.find(field)
|
import json
from os.path import dirname, realpath, join
from flask import Blueprint, request, render_template, jsonify
import shared_variables as var
routes_module = Blueprint('routes_module', __name__)
parent_dir_path = dirname(dirname(realpath(__file__)))
user_file = join(parent_dir_path, "data", "topUsers.json")
tuuser_file = join(parent_dir_path, "data", "topUsersOld.json")
@routes_module.route('/', methods=["GET"])
def homePage():
if request.method == 'GET':
with open(user_file) as f:
data = json.load(f)
users = [int(u[0]) for u in data]
with open(tuuser_file) as f:
data = json.load(f)
users += [int(u[0]) for u in data]
db = var.mongo.db
print(len(users))
users = db.twitusers.find({'user_id':{'$in' : users}})
return render_template('home.html', users=users)
@routes_module.route('/tag/', methods=["POST"])
def tagUser():
if request.method == 'POST':
db = var.mongo.db
user_handle = request.form["user_handle"]
tag_value = request.form["tag_value"]
res = db.twitusers.update_one(
{"user_handle": user_handle},
{"$set": {"manual_tag": tag_value}}
)
return jsonify({"user_handle": user_handle, "tag_value": tag_value})
|
from __future__ import division
#defines function
def get_at_content(dna):
length = len(dna)
#.upper changes lowercase to capitals so function can count them
a_count = dna.upper().count('A')
t_count = dna.upper().count('T')
at_content = (a_count + t_count) / length
#round to 2 decimal places
return round (at_content, 2)
#Defines new variable to measure function content
my_at_content = get_at_content("ATTTGGGCCCCCTTTCCC")
#print code
print(str(my_at_content))
#print AT content
print(get_at_content("ATTTGGGCCCCCTTTAAAGG"))
#print at content lowercase
print(get_at_content("aatttttcccccgggggga"))
#bonus count
print(get_at_content("tnnacgnnat"))
#other bonus count
my_at_content = get_at_contentC("TTCGNNN")
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from common.multi_layer_net_extend import MultiLayerNetExtend
(a_train, b_train), (a_test, b_test) = load_mnist(normalize=True, one_hot_label=True)
network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100], output_size=10,
use_batchnorm=True)
a_batch = a_train[:2]
b_batch = b_train[:2]
grad_backprop = network.gradient(a_batch, b_batch)
grad_numerical = network.numerical_gradient(a_batch, b_batch)
for key in grad_numerical.keys():
diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
print(key + ":" + str(diff))
|
class Animal:
def eat(self):
print("吃")
class Dog(Animal):
def drak(self):
print("叫")
class Xiaotq(Dog):
def fly(self):
print("飞")
xiaotq = Xiaotq()
xiaotq.fly()
xiaotq.drak()
xiaotq.eat()
|
import TextAnalysis
def openfile(filename):
file = open(filename)
file = file.read()
words = file.split()
limit = len(words)
return file
text=openfile("scam2.txt")
#EMAIL ANALYSIS algorithm in TextAnalysis
def emailanalysis(text):
if (TextAnalysis.getemail(text))is not None:
mail = TextAnalysis.getemail(text)
print mail
print "Found Email Searching Databases"
if TextAnalysis.emailsearch(mail)!="Not found email is clean":
print "Scammer Found by Email Address"
terminate=1
else:
print "No email found"
#ADDRESS FINDER AND CHECKER
def addressfinder(text):
if (TextAnalysis.getaddress(text)) is not None:
address = TextAnalysis.getaddress(text)
if TextAnalysis.AddressGeo(address) is not None:
print "Address Exists"
else:
print "We couldn Lookup the address"
text=openfile("scam2.txt")
addressfinder(text) |
lst = [1,2,3]
lst_ = [1,2,3]
_lst = lst
print(id(lst))
print(id(lst_))
print(_lst is lst) |
__author__ = 'Caro Strickland'
import sys
import random
print("\nWelcome to Python Hangman! \n")
#Creation of the 'correct_guess' and 'incorrect_guess' lists
correct_guess = []
incorrect_guess = []
#Adding words to the list of guessable words
word_list = []
f = open('words.txt')
for word in f.read().split():
word_list.append(word)
#Gets the size of the word user wants to guess
def get_input_number():
try:
var = int(input("Enter your word length: "))
return var
except:
print("Input invalid. Please enter an integer value.")
get_input_number()
#Checks the guessed letter and determines
def get_letter_guess():
var = raw_input("Enter a string value, or 'guess' to guess: ")
if var == "guess":
guess_checker()
elif len(var) !=1:
print("Only one letter, please.")
get_letter_guess()
elif len(var) ==1:
if var.isalpha():
if var in random_word:
correct_guess.append(var)
elif var not in random_word:
incorrect_guess.append(var)
elif var.isalnum():
print("Only letters, please.")
get_letter_guess()
def guess_checker():
myGuess = raw_input("Enter your guess: ")
if myGuess == random_word:
win_game()
else:
print("Incorrect guess.")
def win_game():
print("Congratulations! You've won!")
sys.exit()
def lose_game():
s = "Sorry, you lost. The word was "
print s, random_word
sys.exit()
def print_progress():
string = ""
for y in random_word:
if correct_guess.__contains__(y):
string += " "
string+= y
else:
string+= " _ "
return string
num = get_input_number()
numbered_word_list = []
for word in word_list:
if len(word) == num:
numbered_word_list.append(word)
random_word = random.choice(numbered_word_list)
#print(random_word)
for x in range(0, num+3):
s= "You have"
s1= "guesses left."
print s, num+3 - x, s1
s2 = "previous incorrect guesses: "
print s2, incorrect_guess
get_letter_guess()
print(print_progress())
if x == num+2:
lose_game()
|
# h is the separation distance
# r is the range or distance parameter (r>0) which measures how quickly the correlations decay with distance
import numpy
def Spherical(h, r):
n = numpy.size(h)
corelation = numpy.zeros(n,dtype=numpy.double)
for i in range(n):
if h[i] == 0.0:
corelation[i] = 1.0
elif h[i] >= r:
corelation[i] = 0.0
else:
hr = numpy.double(h[i])/r
corelation[i] = 1.0 - hr * (1.5 - 0.5 * hr * hr)
return corelation
#print Spherical([28000], 30000)
|
class Field:
def __init__(self, name):
self.name = name
self.players = []
@property
def height(self):
return len(self.players)
@property
def top_player(self):
if len(self.players) == 0:
return None
return self.players[-1]
def place(self, player, dice):
if self.fits(dice):
self.players.append(player)
return True
return False
def fits_height(self, dice):
return 5 - self.height >= dice.throws
def __repr__(self):
return self.name
class CountField(Field):
def __init__(self, name, count):
Field.__init__(self, name)
self.count = count
def fits(self, dice):
if max(dice.counts.values()) >= self.count:
return self.fits_height(dice)
return False
class ThreeOfAKind(CountField):
def __init__(self):
CountField.__init__(self, "3-of-a-kind", 3)
class FourOfAKind(CountField):
def __init__(self):
CountField.__init__(self, "4-of-a-kind", 4)
class Yahtzee(CountField):
def __init__(self):
CountField.__init__(self, "Yahtzee", 5)
class FullHouse(Field):
def __init__(self):
Field.__init__(self, "Full House")
def fits(self, dice):
if max(dice.counts.values()) == 3 and min(dice.counts.values()) == 2:
return self.fits_height(dice)
return False
class Straight(Field):
def __init__(self):
Field.__init__(self, "Straight")
def fits(self, dice):
s = set(dice.faces)
if s == set([1, 2, 3, 4, 5]) or s == set([2, 3, 4, 5, 6]):
return self.fits_height(dice)
return False
class NumberField(Field):
def __init__(self, name, number):
Field.__init__(self, name)
self.number = number
def fits(self, dice):
if dice.counts[self.number] >= 2:
return self.fits_height(dice)
return False
One = lambda: NumberField("Ones", 1)
Two = lambda: NumberField("Twos", 2)
Three = lambda: NumberField("Threes", 3)
Four = lambda: NumberField("Fours", 4)
Five = lambda: NumberField("Fives", 5)
Six = lambda: NumberField("Sixes", 6)
|
from http.cookiejar import LWPCookieJar
import matplotlib.pyplot as plt
import requests
# 保存Cookie
session = requests.Session()
# 创建cookie实例
session.cookies = LWPCookieJar('cookie')
# 验证码
captcha = 'http://www.tipdm.org/captcha.svl'
# 验证码保存路径
path = 'captcha/'
rq = session.get(captcha)
with open(path + 'captcha.jpg', 'wb') as f:
f.write(rq.content)
pic = plt.imread(path + 'captcha.jpg')
plt.imshow(pic)
plt.show()
captcha_code = input("请输入验证码\n")
# 模拟登陆
url = 'http://www.tipdm.org/login.jspx'
login = {'username': '18182737073', 'password': '1239877mq', 'captcha': captcha_code}
rq2 = session.post(url, data=login)
# 登录状态
print(rq2.status_code)
# 跳转网页
print(rq2.url)
# 保存cookie
session.cookies.save(ignore_discard=True, ignore_expires=True)
# 加载保存的cookie
session.cookies.load(ignore_discard=True, ignore_expires=True)
# 用session保持登录状态
newHtml = session.get('http://www.tipdm.org/member/index.jspx')
with open('newHtml.html', 'w',encoding='utf8') as f:
f.write(newHtml.content.decode('utf8'))
f.close()
|
import uuid
from django.db import models
class Currency(models.Model):
"""Currency model"""
name = models.CharField(max_length=120, null=False,
blank=False, unique=True)
code = models.CharField(max_length=3, null=False, blank=False, unique=True)
symbol = models.CharField(max_length=5, null=False,
blank=False, default='$')
def __str__(self) -> str:
return self.code
class Transaction(models.Model):
uid = models.UUIDField(default=uuid.uuid4, editable=False)
name = models.CharField(max_length=50, null=False, blank=False)
email = models.EmailField(max_length=50, null=False, blank=False)
creation_date = models.DateTimeField(auto_now_add=True)
currency = models.ForeignKey(
Currency, null=False, blank=False, on_delete=models.PROTECT)
payment_intent_id = models.CharField(
max_length=100, null=True, blank=False, default=None)
message = models.TextField(null=True, blank=True)
def __str__(self) -> str:
return f"{self.name} - {self.id} : {self.currency}"
@property
def link(self):
"""
Link to a payment form for the transaction
"""
return f'http://127.0.0.1:8000/payment/{str(self.id)}'
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
""""
версия 1.0.1
классы для работы со входными данными.
для тренировки модели - набор маршрутав
"""
import logging
import numpy as np
import pandas as pd
from torch.utils import data
from utils.data_structures import GeoImage
log = logging.getLogger(__name__)
class QueryDataset(data.Dataset):
"""
Датасет для обработки запросов в формате gps координат точек
"""
def __init__(self, query_file_name, gps_to_pixel_transformer, pixel_to_tensor_transformer):
super(QueryDataset, self).__init__()
self.gps = []
self.gps_to_pixel_transformer = gps_to_pixel_transformer
self.to_tensor_transformer = pixel_to_tensor_transformer
log.info(f"Open query file {query_file_name}")
with open(query_file_name, "rt") as fin:
for line in fin:
self.gps.append(line.strip())
log.info(f" {len(self.gps)} lines read")
def __len__(self):
return len(self.gps)
def __getitem__(self, idx):
gps_coord = list(map(float, self.gps[idx].strip('[ ]\n').split(", ")))
pix_coord = self.gps_to_pixel_transformer(gps_coord)
return {"gps": self.gps[idx],
"pixels": pix_coord,
"tensor": self.to_tensor_transformer(pix_coord)
}
class WalkLinesToDataset(data.Dataset):
"""
Датасет для обучения модели. На преобразует маршруты по замельным участкам в последовательность изображений карты
"""
def __init__(self, image_file_name, walk_file_name, crop_size=16, walk_step=5, transforms=None):
super(WalkLinesToDataset, self).__init__()
self.transforms = transforms
self.rectangle_size = crop_size
self.map_image = GeoImage(image_file_name, crop_size)
self.targets = []
x_coords = []
y_coords = []
walks_df = pd.read_csv(walk_file_name, sep="\t", header=0)
walks_df = walks_df.fillna(0).astype(np.int)
for _, row in walks_df.iterrows():
class_num = row[0]
walk_points = row[1:].to_numpy().reshape(-1, 2)
from_x, from_y = walk_points[0, 0], walk_points[0, 1]
for to_x, to_y in walk_points[1:]:
if to_x == 0 or to_y == 0:
break
d_x = to_x - from_x
d_y = to_y - from_y
distance = (d_x ** 2 + d_y ** 2) ** 0.5
steps = np.arange(0, distance, walk_step)
size = steps.shape[0]
x_steps = from_x + steps * d_x / distance
y_steps = from_y + steps * d_y / distance
self.targets.append(np.full((size,), class_num, dtype=np.int64))
x_coords.append(x_steps.astype(np.int))
y_coords.append(y_steps.astype(np.int))
from_x, from_y = to_x, to_y
self.targets = np.concatenate(self.targets)
x_coords = np.concatenate(x_coords)
y_coords = np.concatenate(y_coords)
self.coords = np.stack([x_coords, y_coords], axis=1)
assert len(self.targets) == self.coords.shape[0]
def __getitem__(self, idx):
sample = {"targets": self.targets[idx]}
points = {"coord": self.coords[idx]}
if self.transforms is not None:
points = self.transforms(points)
sample["image"] = self.map_image.get_rectangle(points["coord"],
self.rectangle_size)
sample["realcoord"] = points["coord"]
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def __len__(self):
return len(self.targets)
|
import string
from typing import Type
import pytest
from spectree import SecurityScheme
from spectree._pydantic import ValidationError
from spectree.config import Configuration, EmailFieldType
from .common import SECURITY_SCHEMAS, WRONG_SECURITY_SCHEMAS_DATA
def test_config_license():
config = Configuration(license={"name": "MIT"})
assert config.license.name == "MIT"
config = Configuration(
license={"name": "MIT", "url": "https://opensource.org/licenses/MIT"}
)
assert config.license.name == "MIT"
assert config.license.url == "https://opensource.org/licenses/MIT"
with pytest.raises(ValidationError):
Configuration(license={"name": "MIT", "url": "url"})
def test_config_contact():
config = Configuration(contact={"name": "John"})
assert config.contact.name == "John"
config = Configuration(contact={"name": "John", "url": "https://example.com"})
assert config.contact.name == "John"
assert config.contact.url == "https://example.com"
config = Configuration(contact={"name": "John", "email": "hello@github.com"})
assert config.contact.name == "John"
assert config.contact.email == "hello@github.com"
with pytest.raises(ValidationError):
Configuration(contact={"name": "John", "url": "url"})
@pytest.mark.skipif(EmailFieldType == str, reason="email-validator is not installled")
def test_config_contact_invalid_email():
with pytest.raises(ValidationError):
Configuration(contact={"name": "John", "email": "hello"})
def test_config_case():
# lower case
config = Configuration(title="Demo")
assert config.title == "Demo"
# upper case
config = Configuration(TITLE="Demo")
assert config.title == "Demo"
# capitalized
config = Configuration(Title="Demo")
assert config.title == "Demo"
@pytest.mark.parametrize(("secure_item"), SECURITY_SCHEMAS)
def test_update_security_scheme(secure_item: Type[SecurityScheme]):
# update and validate each schema type
config = Configuration(
security_schemes=[SecurityScheme(name=secure_item.name, data=secure_item.data)]
)
assert config.security_schemes == [
{"name": secure_item.name, "data": secure_item.data}
]
def test_update_security_schemes():
# update and validate ALL schemas types
config = Configuration(security_schemes=SECURITY_SCHEMAS)
assert config.security_schemes == SECURITY_SCHEMAS
@pytest.mark.parametrize(("secure_item"), SECURITY_SCHEMAS)
def test_update_security_scheme_wrong_type(secure_item: SecurityScheme):
# update and validate each schema type
with pytest.raises(ValidationError):
secure_item.data.type += "_wrong" # type: ignore
@pytest.mark.parametrize(
"symbol", [symb for symb in string.punctuation if symb not in "-._"]
)
@pytest.mark.parametrize(("secure_item"), SECURITY_SCHEMAS)
def test_update_security_scheme_wrong_name(secure_item: SecurityScheme, symbol: str):
# update and validate each schema name
with pytest.raises(ValidationError):
secure_item.name += symbol
with pytest.raises(ValidationError):
secure_item.name = symbol + secure_item.name
@pytest.mark.parametrize(("secure_item"), WRONG_SECURITY_SCHEMAS_DATA)
def test_update_security_scheme_wrong_data(secure_item: dict):
# update and validate each schema type
with pytest.raises(ValidationError):
SecurityScheme(**secure_item)
|
products = {}
count_products = 0
while True:
command = input()
if command == "statistics":
break
product_name, quantity = command.split(": ")
if product_name in products.keys():
products[product_name] += int(quantity)
else:
count_products += 1
products[product_name] = int(quantity)
print("Products in stock:")
for product, quantity in products.items():
print(f"- {product}: {quantity}")
print(f"Total Products: {count_products}")
print(f"Total Quantity: {sum(products.values())}") |
import urllib
def read_txt():
quests = open("C:\Users\Administrator\Desktop\movie_quotes\movie_quotes.txt")
content = quests.read()
print content
quests.close()
check(content)
def check(check_text):
connection = urllib.urlopen("http://www.wdylike.appspot.com/?q="+check_text)
value = connection.read()
print value
connection.close()
read_txt() |
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from ethtx.models.decoded_model import DecodedCall, DecodedTransactionMetadata, Proxy
from ethtx.semantics.standards.erc20 import ERC20_TRANSFORMATIONS
from ethtx.semantics.standards.erc721 import ERC721_TRANSFORMATIONS
from ethtx.utils.measurable import RecursionLimit
from .abc import SemanticSubmoduleAbc
from .helpers.utils import (
create_transformation_context,
semantically_decode_parameter,
get_badge,
)
RECURSION_LIMIT = 2000
class SemanticCallsDecoder(SemanticSubmoduleAbc):
"""Semantic Calls Decoder."""
def decode(
self,
call: DecodedCall,
tx_metadata: DecodedTransactionMetadata,
proxies: Dict[str, Proxy],
) -> DecodedCall:
standard = self.repository.get_standard(call.chain_id, call.to_address.address)
function_transformations = self.repository.get_transformations(
call.chain_id, call.to_address.address, call.function_signature
)
if function_transformations:
call.function_name = (
function_transformations.get("name") or call.function_name
)
else:
function_transformations = {}
# prepare context for transformations
context = create_transformation_context(
call.to_address.address,
call.arguments,
call.outputs,
tx_metadata,
self.repository,
)
standard = self.repository.get_standard(call.chain_id, call.to_address.address)
# perform parameters transformations
for i, parameter in enumerate(call.arguments):
semantically_decode_parameter(
self.repository,
parameter,
f"__input{i}__",
function_transformations,
proxies,
context,
)
for i, parameter in enumerate(call.outputs):
semantically_decode_parameter(
self.repository,
parameter,
f"__output{i}__",
function_transformations,
proxies,
context,
)
if standard == "ERC20":
# decode ERC20 calls if transformations for them are not defined
if call.function_signature in ERC20_TRANSFORMATIONS and (
not function_transformations
or call.function_signature not in function_transformations
):
function_transformations = ERC20_TRANSFORMATIONS.get(
call.function_signature
)
if function_transformations:
for i, parameter in enumerate(call.arguments):
semantically_decode_parameter(
self.repository,
parameter,
f"__input{i}__",
function_transformations,
proxies,
context,
)
for i, parameter in enumerate(call.outputs):
semantically_decode_parameter(
self.repository,
parameter,
f"__output{i}__",
function_transformations,
proxies,
context,
)
elif standard == "ERC721":
# decode ERC721 calls if transformations for them are not defined
if call.function_signature in ERC721_TRANSFORMATIONS and (
not function_transformations
or call.function_signature not in function_transformations
):
function_transformations = ERC721_TRANSFORMATIONS.get(
call.function_signature
)
if function_transformations:
for i, parameter in enumerate(call.arguments):
semantically_decode_parameter(
self.repository,
parameter,
f"__input{i}__",
function_transformations,
proxies,
context,
)
for i, parameter in enumerate(call.outputs):
semantically_decode_parameter(
self.repository,
parameter,
f"__output{i}__",
function_transformations,
proxies,
context,
)
call.from_address.badge = get_badge(
call.from_address.address, tx_metadata.sender, tx_metadata.receiver
)
call.to_address.badge = get_badge(
call.to_address.address, tx_metadata.sender, tx_metadata.receiver
)
# remove ignored parameters
call.arguments = [
parameter for parameter in call.arguments if parameter.type != "ignore"
]
call.outputs = [
parameter for parameter in call.outputs if parameter.type != "ignore"
]
with RecursionLimit(RECURSION_LIMIT):
if call.subcalls:
for sub_call in call.subcalls:
self.decode(sub_call, tx_metadata, proxies)
return call
|
# Sources: https://www.techbeamers.com/create-python-irc-bot/
import socket
import time
errors = {
"ERR_NICKNAMEINUSE": "433"
}
# Define the IRC class
class IRC:
irc = socket.socket()
# Define the socket
def _init_(self):
self.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Send message data to the server
def send(self, channel, msg):
self.irc.send(bytes("PRIVMSG " + channel + " :" + msg + "\n", "UTF-8"))
# Connect to a server
def connect(self, server, port, username, botnick, mode, realname, channel):
# Connect to the server
print("Connecting to " + server + ", on port " + str(port) + "...")
self.irc.connect((server, port))
# User authentication
print("Authenticating as '" + username + "'...")
self.irc.send(bytes("NICK " + botnick + "\n", "UTF-8"))
self.irc.send(bytes("USER " + username + " " + str(mode) + " * " + ":" + realname + "\n", "UTF-8"))
resp = self.get_response()
print(resp)
if errors["ERR_NICKNAMEINUSE"] in resp:
self.irc.send(bytes("NICK " + botnick + str(2) + "\n", "UTF-8"))
# Joins the specified channel
print("Joining channel: " + channel + "...")
self.irc.send(bytes("JOIN " + channel + "\n", "UTF-8"))
# Get response from server
def get_response(self):
time.sleep(1)
resp = self.irc.recv(4096).decode("UTF-8")
# Responds if the server pings the bot
if resp.find('PING') != -1:
self.irc.send(bytes('PONG ' + resp.split()[1] + '\r\n', "UTF-8"))
return resp
|
print('Melting and Boiling Points of Alkanes\n{:<10}{:<25}{:<25}'.format('Name',\
'Melting Point(deg C)', "Boiling Poing(deg C)"))
print('{:<9} {:<23d} {:}\n\
{:<9} {:<23d} {:}\n\
{:<9} {:<23d} {:}\n\
{:<9} {:<23.1f} {:}\n'.format("Methane",-162,-183, "Ethane",-89,-172,"Propane",-4220,-188,"Butane",-0.5,-135))
|
import dash.testing.wait as wait
from dash import Dash, html
from dash_bootstrap_components import (
Popover,
PopoverBody,
PopoverHeader,
themes,
)
from selenium.webdriver.common.action_chains import ActionChains
def test_dbpo001_popover_click(dash_duo):
app = Dash(external_stylesheets=[themes.BOOTSTRAP])
app.layout = html.Div(
[
Popover(
[PopoverHeader("Test Header"), PopoverBody("Test content")],
id="popover",
target="popover-target",
trigger="click",
),
html.Div("Target", id="popover-target"),
],
className="container p-5",
)
dash_duo.start_server(app)
dash_duo.wait_for_element_by_id("popover-target").click()
dash_duo.wait_for_text_to_equal(".popover-body", "Test content", timeout=4)
def test_dbpo002_popover_hover(dash_duo):
app = Dash(external_stylesheets=[themes.BOOTSTRAP])
app.layout = html.Div(
[
Popover(
[PopoverHeader("Test Header"), PopoverBody("Test content")],
id="popover",
target="popover-target",
trigger="hover",
),
html.Div("Target", id="popover-target"),
],
className="container p-5",
)
dash_duo.start_server(app)
hover = ActionChains(dash_duo.driver).move_to_element(
dash_duo.wait_for_element_by_id("popover-target")
)
hover.perform()
dash_duo.wait_for_text_to_equal(".popover-body", "Test content", timeout=4)
def test_dbpo003_popover_legacy(dash_duo):
app = Dash(external_stylesheets=[themes.BOOTSTRAP])
app.layout = html.Div(
[
html.Div("No Target Here", id="not-a-target"),
html.Hr(),
Popover(
[PopoverHeader("Test Header"), PopoverBody("Test content")],
id="popover",
target="popover-target",
trigger="legacy",
),
html.Div("Target", id="popover-target"),
],
className="container p-5 w-50",
)
dash_duo.start_server(app)
dash_duo.wait_for_element_by_id("popover-target").click()
dash_duo.wait_for_text_to_equal(".popover-body", "Test content", timeout=4)
# Try clicking on the popover - shouldn't dismiss
dash_duo.wait_for_element_by_id("popover").click()
dash_duo.wait_for_text_to_equal(".popover-body", "Test content", timeout=4)
# Try clicking outside the popover - should dismiss
dash_duo.wait_for_element_by_id("not-a-target").click()
wait.until(
lambda: len(dash_duo.find_elements("#popover")) == 0,
timeout=4,
)
|
#Importing the libaries
import face_recognition
import cv2
import os
from google. colab. patches import cv2_imshow
#Image preprocessing
def img_resize(path):
img = cv2. imread(path)
(h, w) = img. shape[:2]
width = 500
ratio = width / float(w)
height = int(h * ratio)
#resizing the image with custom width and height
return cv2. resize(img, (width, height))
#list to store the face encodings
train_enc = []
#list to store the names of person
train_names = []
#Training the model
training_images = 'train'
for file in os. listdir(training_images):
img = img_resize(training_images + '/' + file)
img_enc = face_recognition. face_encodings(img)[0]
train_enc. append(img_enc)
train_names. append(file.split('.')[0])
#Testing the model
testing_images = 'test'
for file in os. listdir(testing_images):
img = img_resize(testing_images + '/' + file)
img_enc = face_recognition. face_encodings(img)[0]
outputs = face_recognition. compare_faces(train_enc, img_enc)
#Displaying the results
for i in range(len(outputs)):
if outputs[i]:
name = train_names[i]
(top, right, bottom, left) = face_recognition.face_locations(img)[0]
cv2. rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
cv2. putText(img, name, (left+2, bottom+20), cv2. FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2_imshow(img) |
import requests
from bs4 import BeautifulSoup
def weather(html):
return html.find("div", {"class" : "link__condition day-anchor i-bem"}).text
def temp(html):
return html.find("span", {"class" : "temp__value"}).text
url = "https://yandex.ru/pogoda/moscow?from=serp_title"
response = requests.get(url)
html = BeautifulSoup(response.content,"lxml")
print(weather(html))
print("Сегодня",temp(html),"Состояние", weather(html))
hours = 3
sunset = 13
sunrise = 2
if hours >= sunrise and hours <= sunset:
print("На улице солнце")
|
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 1:
return 1
hash = {}
n = len(s)
result = 0
for i in range(n-1):
if s[i] not in hash:
hash[s[i]] = True
right = i + len(hash)
while right < n and s[right] not in hash:
hash[s[right]] = True
right += 1
result = max(len(hash), result)
del hash[s[i]]
return result |
def convertToDecimal(n):
arr = list(str(n))
print arr
i = len(arr) - 1
#print i
dec = 0
while i >= 0:
dec = dec + 2**i*int(arr[(len(arr)-i-1)])
#print i, 2**i,dec
i = i - 1
return dec
print convertToDecimal('0101011010') |
# -*-coding:utf-8 -*-
__author__ = '$'
import numpy as np
import re
import itertools
from collections import Counter
import os
import csv
import jieba
import random
import collections
import gensim
from gensim import *
def count_tf():
data1 = []
data2 = []
data3 = []
data4 = []
data5 = []
with open('data.csv','r') as fr:
reader = csv.reader(fr)
for i in reader:
# print(i)
if i[2].strip()=='1':
data1.append([i[0],i[1]])
elif i[2].strip()=='2':
data2.append([i[0],i[1]])
elif i[2].strip()=='3':
data3.append([i[0],i[1]])
elif i[2].strip()=='4':
data4.append([i[0],i[1]])
else:
data5.append([i[0],i[1]])
for index,k in enumerate([data1,data2,data3,data4,data5]):
dict1 = {}
for i in k:
s1 = clear_data(i[0])
s2 = clear_data(i[1])
s1[0:0] = s2
# print(s1)
for j in s1:
if j not in dict1.keys():
dict1[j] = 1
else:
dict1[j] +=1
# print(dict1.keys())
path = 'tf_count/' + str(index) +'_tf.csv'
print(index)
with open(path,'w') as fw:
writer = csv.writer(fw)
for k,v in dict1.items():
# print(k,v)
writer.writerow([k,v])
def data_process():
data_list = os.listdir('data')
print(data_list)
data_list.sort()
print(data_list)
with open('data.csv','w',newline='') as fw:
writer = csv.writer(fw)
for i in data_list:
label = i[0]
print(label)
i ='data/'+i
with open(i,'r') as fr:
a=[]
for line in fr:
# print(line)
if '%T' in line:
a.append(line.strip().split('%T')[1])
# print(line)
elif '%X' in line:
# print(line)
a.append(line.strip().split('%X')[1])
elif len(line.strip())==0:
if len(a)>1:
a.append(label)
writer.writerow(a)
a = []
# print(line)
else:
pass
def DATA_train_test():
with open('train.csv','w',newline='') as fw1:
writer1 = csv.writer(fw1)
with open('test.csv','w',newline='') as fw2:
writer2 = csv.writer(fw2)
D_train=[]
D_test=[]
D1 = []
D2 = []
D3 = []
D4 = []
D5 = []
with open('data.csv','r') as fr:
reader = csv.reader(fr)
for i in reader:
if i[2]=='1':
print(i)
D1.append(i)
if i[2]=='2':
print(i)
D2.append(i)
if i[2]=='3':
print(i)
D3.append(i)
if i[2]=='4':
print(i)
D4.append(i)
if i[2]=='5':
print(i)
D5.append(i)
for index,key in enumerate(D1):
if index < 0.8*len(D1):
D_train.append([key[0],key[1],key[2]])
else:
D_test.append([key[0],key[1],key[2]])
for index,key in enumerate(D2):
if index < 0.8*len(D2):
D_train.append([key[0], key[1], key[2]])
else:
D_test.append([key[0], key[1], key[2]])
for index,key in enumerate(D3):
if index < 0.8*len(D3):
D_train.append([key[0], key[1], key[2]])
else:
D_test.append([key[0], key[1], key[2]])
for index,key in enumerate(D4):
if index < 0.8*len(D4):
D_train.append([key[0], key[1], key[2]])
else:
D_test.append([key[0], key[1], key[2]])
for index,key in enumerate(D5):
if index < 0.8*len(D5):
D_train.append([key[0], key[1], key[2]])
else:
D_test.append([key[0], key[1], key[2]])
writer1.writerows(D_train)
writer2.writerows(D_test)
def clear_data(str):
ss = jieba.cut(str.strip())
stop_words = []
with open('stop_words.txt') as f:
for i in f:
# print(i)
stop_words.append(i.strip())
string = []
for i in ss:
if i in stop_words:
pass
else:
if re.sub(r'[^A-Za-z0-9(),!?\'\`]', "", i):
pass
else:
if len(i.strip())==0:
pass
else:
string.append(i)
# print(string)
return string
def load_data_and_labels(train_data_file , test_data_file):
with open(train_data_file,'r') as fr:
reader = csv.reader(fr)
negative_data_examples= open(test_data_file,'r').readlines()
# 分词
train_data_example = []
train_data_labels = []
data = []
for i in reader:
data.append(i)
data = np.random.permutation(data)
print(type(data))
count_5star = 0
for i in data:
train_data_example.append(clear_data(i[1]))
# if int(i[2].strip())==1:
# train_data_labels.append([1,0,0,0,0])
# elif int(i[2].strip())==2:
# train_data_labels.append([0 ,1, 0, 0, 0])
# elif int(i[2].strip())==3:
# train_data_labels.append([0 ,0, 1, 0, 0])
# elif int(i[2].strip())==4:
# train_data_labels.append([0 ,0, 0, 1, 0])
# else :
# train_data_labels.append([0 ,0, 0, 0, 1])
if int(i[2].strip())==5:
train_data_labels.append([1])
count_5star +=1
else:
train_data_labels.append([0])
print('总数:',len(data))
print('5star : ',count_5star)
return [train_data_example ,train_data_labels]
def batch_iter(data , batch_size , num_epochs , shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int( (len(data)-1)/batch_size) + 1 # 每一次周期 有多少批的数据
for epoch in range(num_epochs): # 在一个 epoch ,训练所有数据
print('epoch : ', epoch)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffle_data = data[shuffle_indices]
else:
shuffle_data = data
for batch_num in range(num_batches_per_epoch): # 遍历 每一批 数据
start_index = batch_num * batch_size
end_index = min((batch_num +1)* batch_size,data_size)
yield shuffle_data[start_index:end_index]
if __name__=='__main__':
# data_process()
# DATA_train_test()
# load_data_and_labels('train.csv','test.csv')
count_tf()
|
num1 = input("첫 번째 실수 : ")
num2 = input("두 번째 실수 : ")
print(float(num1) + float(num2))
num1 = float(input("첫 번째 실수 : "))
num2 = float(input("두 번째 실수 : "))
print(num1 + num2)
|
from django import forms
from django.forms import ModelForm
class UploadFileForm(forms.Form):
file = forms.FileField() |
from typing import Optional, List, Callable, Tuple
import torch
from torch import Tensor
from torch.utils.data import Dataset, DataLoader
from utils import make_batch_one_hot
import numpy as np
def icarl_accuracy_measure(test_dataset: Dataset, class_means: Tensor,
val_fn: Callable[[Tensor, Tensor], Tuple[Tensor, Tensor, Tensor]], top1_acc_list: Tensor,
iteration: int, iteration_total: int, type_data: str,
make_one_hot: bool = False, n_classes: int = -1, device: Optional[torch.device] = None,
**kwargs) -> (float, Optional[float]):
test_loader: DataLoader = DataLoader(test_dataset, **kwargs)
stat_hb1: List[bool] = []
stat_icarl: List[bool] = []
stat_ncm: List[bool] = []
if make_one_hot and n_classes <= 0:
raise ValueError("n_class must be set when using one_hot_vectors")
with torch.no_grad():
patterns: Tensor
labels: Tensor
targets: Tensor
output: Tensor
for patterns, labels in test_loader:
if make_one_hot:
targets = make_batch_one_hot(labels, n_classes)
else:
targets = labels
# Send data to device
if device is not None:
patterns = patterns.to(device)
targets = targets.to(device)
_, pred, pred_inter = val_fn(patterns, targets)
pred = pred.detach().cpu()
pred_inter = (pred_inter.T / torch.norm(pred_inter.T, dim=0)).T
# Lines 191-195: Compute score for iCaRL
sqd = torch.cdist(class_means[:, :, 0].T, pred_inter)
score_icarl = (-sqd).T
# Compute score for NCM
sqd = torch.cdist(class_means[:, :, 1].T, pred_inter)
score_ncm = (-sqd).T
# Compute the accuracy over the batch
stat_hb1 += (
[ll in best for ll, best in zip(labels, torch.argsort(pred, dim=1)[:, -1:])])
stat_icarl += (
[ll in best for ll, best in zip(labels, torch.argsort(score_icarl, dim=1)[:, -1:])])
stat_ncm += (
[ll in best for ll, best in zip(labels, torch.argsort(score_ncm, dim=1)[:, -1:])])
# https://stackoverflow.com/a/20840816
stat_hb1_numerical = torch.as_tensor([float(int(val_t)) for val_t in stat_hb1])
stat_icarl_numerical = torch.as_tensor([float(int(val_t)) for val_t in stat_icarl])
stat_ncm_numerical = torch.as_tensor([float(int(val_t)) for val_t in stat_ncm])
print("Final results on " + type_data + " classes:")
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format(torch.mean(stat_icarl_numerical) * 100))
print(" top 1 accuracy Hybrid 1 :\t\t{:.2f} %".format(torch.mean(stat_hb1_numerical) * 100))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format(torch.mean(stat_ncm_numerical) * 100))
top1_acc_list[iteration, 0, iteration_total] = torch.mean(stat_icarl_numerical) * 100
top1_acc_list[iteration, 1, iteration_total] = torch.mean(stat_hb1_numerical) * 100
top1_acc_list[iteration, 2, iteration_total] = torch.mean(stat_ncm_numerical) * 100
return top1_acc_list
def icarl_cifar100_augment_data(img: Tensor) -> Tensor:
# as in paper :
# pad feature arrays with 4 pixels on each side
# and do random cropping of 32x32
img = img.numpy()
padded = np.pad(img, ((0, 0), (4, 4), (4, 4)), mode='constant')
random_cropped = np.zeros(img.shape, dtype=np.float32)
crop = np.random.random_integers(0, high=8, size=(2,))
# Cropping and possible flipping
if np.random.randint(2) > 0:
random_cropped[:, :, :] = padded[:, crop[0]:(crop[0] + 32), crop[1]:(crop[1] + 32)]
else:
random_cropped[:, :, :] = padded[:, crop[0]:(crop[0] + 32), crop[1]:(crop[1] + 32)][:, :, ::-1]
return torch.tensor(random_cropped)
|
""" Heat Relaxation
A horizontal plate at the top is heated and a sphere at the bottom is cooled.
Control the heat source using the sliders at the bottom.
"""
from phi.flow import *
DOMAIN = dict(x=64, y=64, extrapolation=0)
DT = 1.0
x = control(32, (14, 50))
y = control(20, (4, 40))
radius = control(4, (2, 10))
temperature = CenteredGrid(0, **DOMAIN)
for _ in view(temperature, framerate=30, namespace=globals()).range():
temperature -= DT * CenteredGrid(Box(x=None, y=(44, 46)), **DOMAIN)
temperature += DT * CenteredGrid(Sphere(x=x, y=y, radius=radius), **DOMAIN)
temperature = diffuse.explicit(temperature, 0.5, DT, substeps=4)
|
from flask_jsonpify import jsonify
def hello():
return jsonify({'text':'Hello World!'}) |
"""
Testes de internacionalização
"""
import gettext
x = gettext.bindtextdomain('mensagens')
print(x)
_ = gettext.gettext
print(_('this'))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16/4/24 下午1:38
# @Author : ZHZ
import pandas as pd
if1 = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_if1.csv",index_col = 0)
# isf1 = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_isf1.csv",index_col = 0)
config = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_config1.csv",index_col = 0)
config_less_dic = {}
config_more_dic = {}
def getCost_dict():
for i,j in config.groupby([config['item_id'],config['store_code']]):
key = str(i[0])+'_'+str(i[1])
value_less = j.a_b.max().split('_')[0]
value_more = j.a_b.max().split('_')[1]
config_less_dic[key] = value_less
config_more_dic[key] = value_more
def addLessMoreTo_if():
i = 0
if __name__ == '__main__':
getCost_dict()
print len(config_less_dic),len(config_more_dic)
|
from app import app
from flask import jsonify
import os
@app.route('/')
def index():
SECRET_KEY = os.environ.get("SECRET_KEY")
msg = ''
if not SECRET_KEY:
msg = "<h1>Hello from Flask</h1>"
else:
msg= f'<h1>Hello from Flask. You secret is {SECRET_KEY}</h1'
return msg
@app.route('/api/products')
def get_products():
products = [{
"id": 1,
"make": "BMW",
"model": "320",
"year": 2010,
},
{
"id": 2,
"make": "Merc",
"model": "200",
"year": 2011
},
]
return jsonify({"products": products}) |
import numpy as np
import pandas as pd
#write function that takes in two strings and returns if strings are equal
def check_two_strings(string1,string2):
#split strings 1 and 2 and get distance
length_string1_split = len(list(string1.upper()))
length_string2_split = len(list(string2.upper()))
return(length_string1_split == length_string2_split)
#test the function
test_string1 = 'ABCD'
test_string2 = 'ABCD'
print(check_two_strings(test_string1,test_string2))
def check_two_strings_case_insenstive(s1,s2):
#check the lengths
if len(s1) != len(s2):
return False
#check first character in string
if list(s1)[0] != list(s2)[0]:
return(false)
#make s1 and s2 iter objects
s1_iter = iter(s1)
s2_iter = iter(s2)
s1_c = s1_iter.next()
s2_c = s2_iter.next()
if s1_c != st2_c:
return False
print(check_two_strings_case_insenstive(test_string1,test_string2))
|
from employee import Employee
emp_1 = Employee(1, "Sunny", "M.tech", 56000, "CS")
emp_2 = Employee(2, "Bunny", "M.tech", 46000, "IS")
emp_1.show_info()
emp_2.show_info()
emp_1.increment_salary(3000)
emp_1.show_info()
emp_2.show_info()
|
#!/usr/bin/env python3
import os
import sys
print(sys.argv)
cmd = " ".join(["ttracer_invoker", "start"] + sys.argv[1:])
print(cmd)
os.system(cmd)
|
def F (W:list,X:list) -> float:
retVal = 0.
for i in range(len(X)):
retVal += W[i] * X[i]
return retVal
def signum (val:float)->int:
return +1 if val > 0 else -1 if val < 0 else 0
def recalcWeight(Wold, Xvec, Y, C, error):
newW = []
for i in range(len(Wold)):
newW.append(Wold[i]+C*(error-Y) * Xvec[i])
return newW
def main():
X = [[5.7, 6.3, 1],
[9.3, 8.7, 1],
[4.6, 5.2, 1],
[10.1, 7.3, 1 ]]
Y = []
for i in range(len(X)):
if i % 2 == 0:
Y.append(+1)
else:
Y.append(-1)
print ("Y[{0}]={1}".format(i,Y[i]))
W = [0.1, 0.4, 0.3]
C = 0.2 #learning tempo
recalcFlag = True
while (recalcFlag):
input()
recalcFlag = False
for i in range(len(X)):
ans = signum(F(W,X[i]))
print("F=sign({0}\t;{1}={2}".format(W,X[i],ans))
if ans != Y[i]:
print("Vector {0}, sign() = {1} doesn't match Y = {2} ".format(X[i], ans, Y[i]))
W = recalcWeight(W, X[i], Y[i], C, ans)
W = [round(element, 4) for element in W ]
print("Recalculating weight ...\nW = {0}".format(W))
#recalcFlag = True
#else:
#recalcFlag = False
for x in X:
if signum(F(W,x)) != Y[X.index(x)]:
recalcFlag = True
break
if __name__ == "__main__":
main() |
# Generated by Django 2.2.6 on 2019-12-05 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('work', '0054_auto_20191203_1652'),
]
operations = [
migrations.RemoveField(
model_name='progressqty',
name='review',
),
migrations.RemoveField(
model_name='progressqtyextra',
name='review',
),
migrations.AddField(
model_name='progressqty',
name='review_text',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='progressqtyextra',
name='review_text',
field=models.TextField(blank=True, null=True),
),
]
|
# Generated by Django 3.2.3 on 2021-06-12 05:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pizza_app', '0011_ingredientsize_orden'),
]
operations = [
migrations.RemoveField(
model_name='pizza',
name='size',
),
migrations.DeleteModel(
name='PizzaSize',
),
]
|
from kafka import KafkaProducer
from kafka import KafkaConsumer
from kafka.errors import KafkaError
from elasticsearch5 import Elasticsearch
import json
import datetime
class KafkaC:
"""
消费模块: 通过不同groupid消费topic里面的消息
"""
def __init__(self, kafka_host, kafka_port, kafka_topic, group_id):
self.kafka_host = kafka_host
self.kafka_port = kafka_port
self.kafka_topic = kafka_topic
self.group_id = group_id
self.consumer = KafkaConsumer(self.kafka_topic, group_id=self.group_id,
bootstrap_servers='{kafka_host}:{kafka_port}'.format(
kafka_host=self.kafka_host,
kafka_port=self.kafka_port),
auto_offset_reset='latest',
enable_auto_commit=True)
# ps = [TopicPartition(self.kafka_topic, p) for p in self.consumer.partitions_for_topic(self.kafka_topic)]
# self.consumer.assign(ps)
#for partition in ps:
# self.consumer.seek(partition, 0)
def consume_data(self):
try:
for message in self.consumer:
yield message
except KeyboardInterrupt as e:
print(e)
class KafkaP:
"""
生产模块:根据不同的key,区分消息
"""
def __init__(self, kafka_host, kafka_port, kafka_topic, key=None):
self.kafka_host = kafka_host
self.kafka_port = kafka_port
self.kafka_topic = kafka_topic
self.key = key
self.producer = KafkaProducer(bootstrap_servers=['{kafka_host}:{kafka_port}'.format(
kafka_host=self.kafka_host,
kafka_port=self.kafka_port), ]
)
def send_json_data(self, params):
try:
#parmas_message = json.dumps(params)
producer = self.producer
producer.send(self.kafka_topic, key=self.key, value=params)
producer.flush()
except KafkaError as e:
print(e)
def read_log(topic):
consumer_inner = KafkaC("172.16.10.214", 9092, topic, 'log-test')
message = consumer_inner.consume_data()
es = Elasticsearch(hosts='elasticsearch-logging.logging.svc.cluster.local')
for msg in message:
offset = msg.offset
print(offset)
value = msg.value
value_dic = json.loads(value)
date_today = datetime.datetime.now().strftime('%Y-%m-%d')
timestrap = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f+08:00')
value_dic['timestrap'] = timestrap
if 'profile' in value_dic:
print(value_dic)
index = "java-log-{env}-{date}".format(env=value_dic['profile'].lower(), date=date_today)
try:
es.index(index=index, doc_type='javalog', body=value_dic)
except Exception as e:
print(value_dic)
if __name__ == '__main__':
"""
从远程电信机房服务器news-data 读取kafka消息对列中的内容
写入到本地kafka队列
"""
read_log('sample')
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 14:28:53 2019
@author: Vall
"""
import iv_plot_module as ivp
import iv_utilities_module as ivu
import matplotlib.pyplot as plt
from math import sqrt
from numpy import pi
import numpy as np
from scipy.optimize import curve_fit
#%%
def getValueError(values, errors=None):
"""Given a np.array -and its error bars if known-, gives mean and error
Parameters
----------
values : np.array
Array of values.
errors : np.array
Array of known error bars.
Returns
-------
(value, error) : tuple
Mean value and associated error.
"""
value = np.mean(values)
if errors is not None:
error = max(np.std(values), np.mean(errors))
else:
error = np.std(values)
return (value, error)
#%%
def roundMatlab(x):
"""Returns round value in Matlab 2014's style.
In Pyhon 3.7.3...
>> round(80.5) = 80
>> round(81.5) = 82
But in Matlab 2014...
>> round(80.5) = 81
>> round(81.5) = 82
Parameters
----------
x : float
Number to be rounded.
Returns
-------
y : int
Rounded number.
"""
isRoundMatlabNeeded = round(80.5) == 81
if isRoundMatlabNeeded:
xround = int(x)
even = xround/2 == int(xround/2) # True if multiple of 2
if even:
y = round(x) + 1
else:
y = round(x)
return int(y)
else:
return int(round(x))
#%%
def cropData(t0, t, *args, **kwargs):
"""Crops data according to a certain logic specifier.
By default, the logic specifier is '>=', so data 't' is cropped from
certain value 't0'. It is flexible though. For example, if a parameter
'logic="<="' is delivered as argument to this function, then data 't' will
be cropped up to certain value 't0'.
Parameters
----------
t0 : int, float
Value to apply logic specifier to.
t : np.array
Data to apply logic specifier to.
logic='>=' : str
Logic specifier.
Returns
-------
new_args : np.array
Resultant data.
"""
try:
logic = kwargs['logic']
except:
logic = '>='
if t0 not in t:
raise ValueError("Hey! t0 must be in t")
index = eval("t{}t0".format(logic))
new_args = []
for a in args:
try:
a = np.array(a)
except:
raise TypeError("Extra arguments must be array-like")
if a.ndim == 1:
new_args.append(a[index])
else:
try:
new_args.append(a[index, :])
except:
raise ValueError("This function takes only 1D or 2D arrays")
t = t[index]
new_args = [t, *new_args]
return new_args
#%%
def chiSquared(data, curve):
return sum(curve-data)**2 / len(data)
#%%
def linearFit(X, Y, dY=None, showplot=True,
plot_some_errors=(False, 20), **kwargs):
"""Applies linear fit and returns m, b and Rsq. Can also plot it.
By default, it applies minimum-square linear fit 'y = m*x + b'. If
dY is specified, it applies weighted minimum-square linear fit.
Parameters
----------
X : np.array, list
Independent X data to fit.
Y : np-array, list
Dependent Y data to fit.
dY : np-array, list
Dependent Y data's associated error.
shoplot : bool
Says whether to plot or not.
plot_some_errors : tuple (bool, int)
Says wehther to plot only some error bars (bool) and specifies
the number of error bars to plot.
Returns
-------
rsq : float
Linear fit's R Square Coefficient.
(m, dm): tuple (float, float)
Linear fit's slope: value and associated error, both as float.
(b, db): tuple (float, float)
Linear fit's origin: value and associated error, both as float.
Other Parameters
----------------
txt_position : tuple (horizontal, vertical), optional
Indicates the parameters' text position. Each of its values
should be a number (distance in points measured on figure).
But vertical value can also be 'up' or 'down'.
mb_units : tuple (m_units, b_units), optional
Indicates the parameter's units. Each of its values should be a
string.
mb_error_digits : tuple (m_error_digits, b_error_digits), optional
Indicates the number of digits to print in the parameters'
associated error. Default is 3 for slope 'm' and 2 for intercept
'b'.
mb_string_scale : tuple (m_string_scale, b_string_scale), optional
Indicates whether to apply string prefix's scale to printed
parameters. Each of its values should be a bool; i.e.: 'True'
means 'm=1230.03 V' with 'dm = 10.32 V' would be printed as
'm = (1.230 + 0.010) V'. Default is '(False, False)'.
rsq_decimal_digits : int, optional.
Indicates the number of digits to print in the Rsq. Default: 3.
Warnings
--------
The returned Rsq doesn't take dY weights into account.
"""
# ¿Cómo hago Rsq si tengo pesos?
if dY is None:
W = None
else:
W = 1/dY**2
fit_data = np.polyfit(X, Y, 1, cov=True, w=W)
m = fit_data[0][0]
dm = sqrt(fit_data[1][0,0])
b = fit_data[0][1]
db = sqrt(fit_data[1][1,1])
rsq = 1 - sum( (Y - m*X - b)**2 )/sum( (Y - np.mean(Y))**2 )
try:
kwargs['text_position']
except KeyError:
if m > 1:
aux = 'up'
else:
aux = 'down'
kwargs['text_position'] = (.02, aux)
if showplot:
plt.figure()
if dY is None:
plt.plot(X, Y, 'b.', zorder=0)
else:
if plot_some_errors[0] == False:
plt.errorbar(X, Y, yerr=dY, linestyle='', marker='.',
ecolor='b', elinewidth=1.5, zorder=0)
else:
plt.errorbar(X, Y, yerr=dY, linestyle='', marker='.',
color='b', ecolor='b', elinewidth=1.5,
errorevery=len(Y)/plot_some_errors[1],
zorder=0)
plt.plot(X, m*X+b, 'r-', zorder=100)
plt.legend(["Ajuste lineal ponderado","Datos"])
kwargs_list = ['mb_units', 'mb_string_scale',
'mb_error_digits', 'rsq_decimal_digits']
kwargs_default = [('', ''), (False, False), (3, 2), 3]
for key, value in zip(kwargs_list, kwargs_default):
try:
kwargs[key]
except KeyError:
kwargs[key] = value
if kwargs['text_position'][1] == 'up':
vertical = [.9, .82, .74]
elif kwargs['text_position'][1] == 'down':
vertical = [.05, .13, .21]
else:
if kwargs['text_position'][1] <= .08:
fact = .08
else:
fact = -.08
vertical = [kwargs['text_position'][1]+fact*i for i in range(3)]
plt.annotate('m = {}'.format(ivu.errorValueLatex(
m,
dm,
error_digits=kwargs['mb_error_digits'][0],
units=kwargs['mb_units'][0],
string_scale=kwargs['mb_string_scale'][0],
one_point_scale=True)),
(kwargs['text_position'][0], vertical[0]),
xycoords='axes fraction')
plt.annotate('b = {}'.format(ivu.errorValueLatex(
b,
db,
error_digits=kwargs['mb_error_digits'][1],
units=kwargs['mb_units'][1],
string_scale=kwargs['mb_string_scale'][1],
one_point_scale=True)),
(kwargs['text_position'][0], vertical[1]),
xycoords='axes fraction')
rsqft = r'$R^2$ = {:.' + str(kwargs['rsq_decimal_digits']) + 'f}'
plt.annotate(rsqft.format(rsq),
(kwargs['text_position'][0], vertical[2]),
xycoords='axes fraction')
plt.show()
return rsq, (m, dm), (b, db)
#%%
def nonLinearFit(X, Y, fitfunction, initial_guess=None,
bounds=(-np.infty, np.infty), dY=None,
showplot=True, plot_some_errors=(False, 20),
**kwargs):
"""Applies nonlinear fit and returns parameters and Rsq. Plots it.
By default, it applies minimum-square fit. If dY is specified, it
applies weighted minimum-square fit.
Parameters
----------
X : np.array, list
Independent X data to fit.
Y : np-array, list
Dependent Y data to fit.
fitfunction : function
The function you want to apply. Its arguments must be 'X' as
np.array followed by the other parameters 'a0', 'a1', etc as
float. Must return only 'Y' as np.array.
initial_guess=None : list, optional
A list containing a initial guess for each parameter.
bounds=(-np.infty, np.infty) : tuple, optional
A tuple containing bounds for each parameter;
i.e. ([-np.inf,0],[np.inf,2]) sets the 1st free and the 2nd between 0
and 2.
dY : np-array, list, optional
Dependent Y data's associated error.
shoplot : bool
Says whether to plot or not.
plot_some_errors : tuple (bool, int)
Says wehther to plot only some error bars (bool) and specifies
the number of error bars to plot.
Returns
-------
rsq : float
Fit's R Square Coefficient.
parameters : list of tuples
Fit's parameters, each as a tuple containing value and error,
both as tuples.
Other Parameters
-----------------
txt_position : tuple (horizontal, vertical), optional
Indicates the parameters' text position. Each of its values
should be a number (distance in points measured on figure).
But vertical value can also be 'up' or 'down'.
par_units : list, optional
Indicates the parameters' units. Each of its values should be a
string.
par_error_digits : list, optional
Indicates the number of digits to print in the parameters'
associated error. Default is 3 for all of them.
par_string_scale : list, optional
Indicates whether to apply string prefix's scale to printed
parameters. Each of its values should be a bool. Default is
False for all of them.
rsq_decimal_digits : int, optional.
Indicates the number of digits to print in the Rsq. Default: 3.
Warnings
--------
The returned Rsq doesn't take dY weights into account.
"""
if not isinstance(X, np.ndarray):
raise TypeError("X should be a np.array")
if not isinstance(Y, np.ndarray):
raise TypeError("Y should be a np.array")
if not isinstance(dY, np.ndarray) and dY is not None:
raise TypeError("dY shouuld be a np.array")
if len(X) != len(Y):
raise IndexError("X and Y must have same lenght")
if dY is not None and len(dY) != len(Y):
raise IndexError("dY and Y must have same lenght")
if dY is None:
W = None
else:
W = 1/dY**2
parameters, covariance = curve_fit(fitfunction, X, Y,
p0=initial_guess, bounds=bounds,
sigma=W)
rsq = sum( (Y - fitfunction(X, *parameters))**2 )
rsq = rsq/sum( (Y - np.mean(Y))**2 )
rsq = 1 - rsq
n = len(parameters)
if showplot:
plt.figure()
if dY is None:
plt.plot(X, Y, 'b.', zorder=0)
else:
if plot_some_errors[0] == False:
plt.errorbar(X, Y, yerr=dY, linestyle='b', marker='.',
ecolor='b', elinewidth=1.5, zorder=0)
else:
plt.errorbar(X, Y, yerr=dY, linestyle='-', marker='.',
color='b', ecolor='b', elinewidth=1.5,
errorevery=len(Y)/plot_some_errors[1],
zorder=0)
plt.plot(X, fitfunction(X, *parameters), 'r-', zorder=100)
plt.legend(["Ajuste lineal ponderado","Datos"])
kwargs_list = ['text_position', 'par_units', 'par_string_scale',
'par_error_digits', 'rsq_decimal_digits']
kwargs_default = [(.02,'up'), ['' for i in range(n)],
[False for i in range(n)],
[3 for i in range(n)], 3]
for key, value in zip(kwargs_list, kwargs_default):
try:
kwargs[key]
if key != 'text_position':
try:
if len(kwargs[key]) != n:
print("Wrong number of parameters",
"on '{}'".format(key))
kwargs[key] = value
except TypeError:
kwargs[key] = [kwargs[key] for i in len(n)]
except KeyError:
kwargs[key] = value
if kwargs['text_position'][1] == 'up':
vertical = [.9-i*.08 for i in range(n+1)]
elif kwargs['text_position'][1] == 'down':
vertical = [.05+i*.08 for i in range(n+1)]
else:
if kwargs['text_position'][1] <= .08:
fact = .08
else:
fact = -.08
vertical = [
kwargs['text_position'][1]+fact*i for i in range(n+1)]
for i in range(n):
plt.annotate(
'a{} = {}'.format(
i,
ivu.errorValueLatex(
parameters[i],
sqrt(covariance[i,i]),
error_digits=kwargs['par_error_digits'][i],
units=kwargs['par_units'][i],
string_scale=kwargs['par_string_scale'][i],
one_point_scale=True)),
(kwargs['text_position'][0], vertical[i]),
xycoords='axes fraction')
rsqft = r'$R^2$ = {:.' + str(kwargs['rsq_decimal_digits'])+'f}'
plt.annotate(rsqft.format(rsq),
(kwargs['text_position'][0], vertical[-i]),
xycoords='axes fraction')
plt.show()
parameters_error = np.array(
[sqrt(covariance[i,i]) for i in range(n)])
parameters = list(zip(parameters, parameters_error))
return rsq, parameters
#%% PMUSIC -----------------------------------------------------------------
## Start by defining general parameters
#PMN = nsize//4 # WHY /4? "cantidad de mediciones"
#PMT = 1200 # size of the window in ps
#PMdt = 20 # time step in ps
#
## Now get PMUSIC's parameters3
#PMdata = detrend(meanV) # BEWARE! THE BEST FIT HORIZONTAL LINE IS FILTERED!
#Mp = [PMN, 200] # This is PMUSIC's most important parameter
## Mp = [components' dimension, harmonics' limit]
## Second variable marks how many harmonics to throw away.
## It can't be greater than the measurement's dimension.
#
## Then define several variables to be filled
#MSn = []
#Mfn = []
#iPMindex=0
#for i in range(PMT+1, 1350, PMdt): # WHY? SHOULD I BE INCLUDING 1350? I'M NOT.
#
# iPMindex = iPMindex + 1
#
# # Take a segment of data and apply PMUSIC
# iPMdata = PMdata[((i-PMT) < t) & (t < i)]
# [MSn1, Mfn1] = pmusic(iPMdata, Mp, 6000, samplerate, [], 0)
# # WHY 6000?
#
# iPMselection = ((Mfn1 >= 0) & (Mfn1 <= 0.06));
# MSn[:, iPMindex] = MSn1[iPMselection]
# Mfn[:, iPMindex] = Mfn1[iPMselection]
#
## Finally... Plot! :)
#plt.figure(1)
#plt.subplot(3,2,1)
#plt.imagesc(np.arange(1,T), Mfn[:,1], MSn)
"""
Problems so far:
Don't have a pmusic Python equivalent
Haven't searched an imagesc equivalent
"""
#%%
def linearPrediction(t, x, dt, svalues=None, max_svalues=8,
autoclose=True, printing=True):
"""Applies linear prediction fit to data.
Given a set of data :math:`t, x` with independent step :math:`dt`, it looks
for the best fit according to the model...
.. math:: f(t) = \sum_i A.cos(\omega_i t + \phi).e^{-\frac{t}{\tau}}
This method does not need initial values for the parameters to fit.
In order for it to work, it is necesary though to have a uniform
independent variable whose elements are multiples :math:`t_i=i.dt` of a
constant step :math:`dt`
Parameters
----------
t : np.array
Independent variable :math:`t` in ps.
x : np.array
Dependent variable :math:`x` in any unit.
dt : float
Independent variable's step :math:`dt` in ps.
svalues : None, int
Number of significant values. If set to None, it's chosen in an
interactive way.
max_svalues : int
Maximum number of significant values that can be chosen in the
interactive mode.
autoclose=True : bool
Says whether to close the intermediate eigenvalues' plot or not.
printing=True : bool
Says whether to print some results or not.
Returns
-------
results : np.array
Parameters that best fit the data. On its columns it holds...
...frequency :math:`f=2\pi\omega` in Hz.
...characteristic time :math:`\tau_i` in ps.
...quality factors :math:`Q_i=\frac{\omega}{2\gamma}=\pi f \tau`
...amplitudes :math:`A_i` in the same units as :math:`x`
...phases :math:`\phi_i` written in multiples of :math:`\pi`
other_results : dict
Other fit parameters...
...chi squared :math:`\chi^2`
...number of significant values :math:`N`
plot_results : ivu.InstancesDict
Fit parameters that allow plotting. In particular, it holds...
...'fit' which includes time, data, fit and fit terms.
...'raman' which includes frequency, fit spectrum and fit terms spectra.
See also
--------
ivp.linearPredictionPlot
"""
#%% ---------------------------------------------------------------------------
# FREQUENCIES AND DAMPING FACTORS
# -----------------------------------------------------------------------------
# Create data matrix
N = len(x)
M = roundMatlab(0.75 * N)
X = np.array([x[i+j+1] for j in range(N-M) for i in range(M)]).reshape((N-M,M))
# Diagonalize square data matrix
[eigenvalues, eigenvectors] = np.linalg.eigh( np.matmul(X, X.T) )
ordered_index = eigenvalues.argsort() # From smallest to largest value
eigenvalues = eigenvalues[ordered_index] # Eigenvalues
eigenvectors = eigenvectors[:, ordered_index] # Eigenvectors on columns
#eigenvectors = np.array([l/l[0] for l in eigenvectors.T]).T # Normalize
rank = np.linalg.matrix_rank(np.diag(eigenvalues)) # Size measure
# Choose number of significant values
if svalues is None:
fig = plt.figure()
ax = plt.subplot()
plt.semilogy(eigenvalues, linestyle='none', marker='o',
fillstyle='none', markersize=10)
plt.title('¿Número de valores singulares?')
plt.ylabel("Autovalores")
Nsignificant = ivp.interactiveIntegerSelector(ax,
min_value=0,
max_value=max_svalues)
if autoclose:
plt.close(fig)
else:
Nsignificant = svalues
# Crop data according to it
F = np.zeros((N-M, N-M))
F[-Nsignificant:,-Nsignificant:] = np.diag(1/np.sqrt(eigenvalues[-Nsignificant:]))
auxiliar = np.matmul(eigenvectors, F)
U = np.matmul(X.T, auxiliar) # Xmatrix.T * eigenvectors * F
# Define polinomyal
auxiliar = np.matmul(eigenvectors.T, x[:N-M])
auxiliar = np.matmul(F.T, auxiliar)
A = np.matmul(U, auxiliar) # U * F.T * eigenvectors.T * xvector
# |--> Least-Squares?
coeficients = np.array([1, *list(-A)])
# Solve and find its roots
roots = np.roots(coeficients)
ordered_index = abs(roots).argsort()
roots = roots[ordered_index][::-1] # From largest to smallest absolute value
# Calculate damping constants 'b' and frequencies 'omega'
damping_constants = (np.log(abs(roots)) / dt)[:rank] # Crop them accordingly
angular_frequencies = (np.angle(roots) / dt)[:rank]
# Sort them
ordered_index = angular_frequencies.argsort() # From smallest to largest freq
angular_frequencies = angular_frequencies[ordered_index]
damping_constants = damping_constants[ordered_index]
# Crop them according to number of real roots and rank of diagonalized matrix
Nzeros = len(angular_frequencies) - np.count_nonzero(angular_frequencies)
angular_frequencies = abs(angular_frequencies)[:roundMatlab(
(rank-Nzeros)/2+Nzeros)]
damping_constants = damping_constants[:roundMatlab(
(rank-Nzeros)/2+Nzeros)]
# Then crop them according to the number of positive or zero damping constants
Npositives = len(damping_constants[damping_constants>=0])
ordered_index = damping_constants.argsort()[::-1] # From largest to smallest
damping_constants = damping_constants[ordered_index][:Npositives]
angular_frequencies = angular_frequencies[ordered_index][:Npositives]
# Now I have the smallest frequencies and largest damping constants
# Then I calculate the characteristic time tau and the quality factor Q
quality_factors = angular_frequencies / (2*damping_constants)
characteristic_times = 1 / damping_constants # in ps
#%% ---------------------------------------------------------------------------
# AMPLITUDES AND PHASES
# -----------------------------------------------------------------------------
# Create modelled data matrix
Nfit_terms = len(angular_frequencies)
t2 = np.arange(0, N*dt, dt) # Time starting on zero
X2 = np.zeros((N, 2*Nfit_terms))
for i, b, omega in zip(range(Nfit_terms),
damping_constants,
angular_frequencies):
X2[:, 2*i] = np.exp(-b*t2) * np.cos(omega*t2)
X2[:, 2*i+1] = -np.exp(-b*t2) * np.sin(omega*t2)
# Diagonalize square Hermitian modelled data matrix
[eigenvalues2, eigenvectors2] = np.linalg.eigh( np.matmul(X2, X2.T) )
ordered_index = eigenvalues2.argsort() # From smallest to largest absolute
eigenvalues2 = eigenvalues2[ordered_index] # Eigenvalues
eigenvectors2 = eigenvectors2[:, ordered_index] # Eigenvectors on columns
# Choose number of significant values
Nsignificant2 = np.linalg.matrix_rank( np.matmul(X2, X2.T) )
# Crop data according to it
F2 = np.zeros((N, N))
F2[-Nsignificant2:,-Nsignificant2:] = np.diag(
1/np.sqrt(eigenvalues2[-Nsignificant2:]))
auxiliar = np.matmul(eigenvectors2, F2)
U2 = np.matmul(X2.T, auxiliar) # Xmatrix.T * eigenvectors * F
# Get defining vector
auxiliar = np.matmul(eigenvectors2.T, x)
auxiliar = np.matmul(F2.T, auxiliar)
A2 = np.matmul(U2, auxiliar) # U * F.T * eigenvectors.T * xvector
# |--> Least-Squares?
# Calculate phases 'phi' and amplitudes 'C'
amplitudes = []
phases = []
for i in range(Nfit_terms):
if A2[2*i]==0 and A2[2*i+1]==0:
amplitudes.append( 0 )
phases.append( 0 )
elif A2[2*i]==0:
amplitudes.append( abs(A2[2*i+1]) )
phases.append( np.sign(A2[2*i+1]) * pi/2 )
elif A2[2*i+1]==0:
amplitudes.append( abs(A2[2*i]) )
phases.append( (1-np.sign(A2[2*i])) * pi/2 )
else:
amplitudes.append( np.sqrt(A2[2*i+1]**2 + A2[2*i]**2) )
phases.append( np.arctan2(A2[2*i+1], A2[2*i]) )
frequencies = 1000 * angular_frequencies / (2*pi) # in GHz
amplitudes = np.array(amplitudes)
phases = np.array(phases)
pi_phases = phases / pi # in radians written as multiples of pi
# Print some results, if specified
if Nfit_terms==0:
raise ValueError("¡Error! No se encontraron términos de ajuste")
elif printing:
if Nfit_terms>1:
print("¡Listo! Encontramos {} términos".format(Nfit_terms))
else:
print("¡Listo! Encontramos {} término".format(Nfit_terms))
if printing:
print("Frecuencias: {} GHz".format(frequencies))
#%% ---------------------------------------------------------------------------
# FIT, PLOTS AND STATISTICS
# -----------------------------------------------------------------------------
# Calculate terms for plotting
fit_terms = np.array([a * np.exp(-b*(t-t[0])) * np.cos(omega*(t-t[0]) + phi)
for a, b, omega, phi in zip(amplitudes,
damping_constants,
angular_frequencies,
phases)]).T
fit = sum(fit_terms.T)
# Make statistics and print them, if specified
chi_squared = sum( (fit - x)**2 ) / N # Best if absolute is smaller
if printing:
print("Chi cuadrado \u03C7\u00B2: {:.2e}".format(chi_squared))
## Statistics of the residue
#residue = x - fit
#residue_transform = abs(np.fft.rfft(residue))
#residue_frequencies = 1000 * np.fft.rfftfreq(N, d=dt) # in GHz
#plt.plot(residue_frequencies, residue_transform)
# Raman-like Spectrum parameters
max_frequency = max(frequencies)
frequencies_damping = 1000 * damping_constants / (2*pi) # in GHz
if max_frequency != 0:
raman_frequencies = np.arange(0, 1.5*max_frequency, max_frequency/1000)
else:
raman_frequencies = np.array([0, 12])
# Raman-like Spectrum per se
raman_spectrum_terms = np.zeros((len(raman_frequencies), Nfit_terms))
for i in range(Nfit_terms):
if angular_frequencies[i]==0:
raman_spectrum_terms[:,i] = 0
else:
raman_spectrum_terms[:,i] = amplitudes[i] * np.imag(
frequencies[i] /
(frequencies[i]**2 - raman_frequencies**2 -
2j * raman_frequencies * frequencies_damping[i]))
raman_spectrum = np.sum(raman_spectrum_terms, axis=1)
# What I would like this function to return
results = np.array([frequencies,
characteristic_times,
quality_factors,
amplitudes,
pi_phases]).T
# Some other results I need to plot
other_results = dict(chi_squared = chi_squared,
Nsingular_values = Nsignificant)
# Create nice data to plot
fit_plot_results = np.array([t, x, fit, *list(fit_terms.T)]).T
new_plot_fit_terms = []
for f, ft in zip(results[:,0], fit_plot_results[:,3:].T):
if f != 0:
factor = (fit.max() - fit.min())/(ft.max() - ft.min())
shift = np.mean(fit)
new_plot_fit_terms.append(list(ft*factor+shift))
else:
new_plot_fit_terms.append(list(ft))
new_plot_fit_terms = np.array(new_plot_fit_terms).T
print(new_plot_fit_terms)
print(new_plot_fit_terms.shape)
print(fit.shape)
# And the data to plot
plot_results = ivu.InstancesDict(dict(
fit = np.array([t, x, fit, *new_plot_fit_terms.T]).T,
raman = np.array([raman_frequencies, raman_spectrum,
*list(raman_spectrum_terms.T)]).T))
return results, other_results, plot_results
#%%
def linearPredictionTables(parameters, results, other_results):
terms_heading = ["F (GHz)", "\u03C4 (ps)", "Q", "A (u.a.)", "Fase (\u03C0rad)"]
terms_heading = '\t'.join(terms_heading)
terms_table = ['\t'.join([str(element) for element in row]) for row in results]
terms_table = '\n'.join(terms_table)
terms_table = '\n'.join([terms_heading, terms_table])
fit_heading = ["Experimentos utilizados",
"Número de valores singulares",
"Porcentaje enviado a cero (%)",
"Método de corrimiento",
"Corrimiento V\u2080 (\u03BCV)",
r"Rango temporal → Inicio (ps)",
r"Rango temporal → Final (ps)",
"Chi cuadrado \u03C7\u00B2"]
if parameters.use_full_mean:
used_experiments = 'Todos'
else:
used_experiments = ', '.join([str('{:.0f}'.format(i+1))
for i in parameters.use_experiments])
if len(parameters.use_experiments)==1:
used_experiments = 'Sólo ' + used_experiments
else:
used_experiments = 'Sólo ' + used_experiments
if parameters.send_tail_to_zero:
tail_percent = parameters.use_fraction*100
else:
tail_percent = 0
if parameters.tail_method=='mean':
method = 'Promedio'
elif parameters.tail_method=='min':
method = 'Mínimo'
elif parameters.tail_method=='max':
method = 'Máximo'
else:
method = 'Desconocido'
fit = [used_experiments,
str(other_results['Nsingular_values']),
'{:.0f}'.format(tail_percent),
method,
str(parameters.voltage_zero),
str(parameters.time_range[0]),
str(parameters.time_range[1]),
'{:.2e}'.format(other_results['chi_squared'])]
fit_table = ['\t'.join([h, f]) for h, f in zip(fit_heading, fit)]
fit_table = '\n'.join(fit_table)
return terms_table, fit_table
#%%
def arrayTable(array, heading_list=None, axis=0):
if heading_list is not None:
heading = '\t'.join(heading_list)
if axis==1:
array = array.T
elif axis!=0:
raise ValueError("Axis must be 0 or 1!")
items = ['\t'.join([str(element) for element in row]) for row in array]
items = '\n'.join(items)
if heading_list is not None:
table = '\n'.join([heading, items])
else:
table = items
return table |
def fibb(n):
fibNums = [0, 1]
for i in range(2, n+1):
fibNums.append(fibNums[i-2]+fibNums[i-1])
return fibNums[n]
N = int(input())
print(fibb(N))
|
"""
Box-and-Whisker Plot
"""
import scripts.plot._tools as tools
import argparse as ap
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from typing import Dict, Any, Optional
def load_dataframe(ifname: str) -> pd.DataFrame:
# Get file extension
_, ext = os.path.splitext(ifname)
# Clean extension
ext = ext.lower().strip()
if ext == ".csv":
df = pd.read_csv(ifname)
else:
raise IOError(f"Unsupported file extension {ext}")
return df
def plot(
df: pd.DataFrame,
output: Optional[str] = None,
x_name: Optional[str] = None,
y_name: Optional[str] = None,
hue_name: Optional[str] = None,
swarm: bool = False,
notch: bool = False,
title: Optional[str] = None,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
legend_name: Optional[str] = None,
hide_fliers: bool = False,
) -> None:
# TODO: Compute and show the average as well?
# if remove_outliers:
# df = tools.remove_outliers(df)
if hide_fliers:
# Do not show outliers
showfliers = False
else:
# Do not show outliers if the swarm is plotted
showfliers = not swarm
sns.boxplot(
data=df, x=x_name, y=y_name, hue=hue_name, notch=notch, showfliers=showfliers
)
if swarm:
ax = sns.swarmplot(
data=df,
x=x_name,
y=y_name,
hue=hue_name,
dodge=True,
edgecolor="gray", # Color lines around each point
linewidth=1,
)
# Fix legend for duplicates
# The legend contains both the hues from sns.boxplot than the ones from
# sns.swarmplot
h, l = ax.get_legend_handles_labels()
n = len(h) // 2
plt.legend(h[:n], l[:n], title=legend_name)
else:
# A legend is produced only when hue is given
if hue_name is not None:
plt.legend(title=legend_name)
# Title and labels
if title is not None:
plt.title(title)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
# Plot or save
if output is not None:
plt.savefig(output)
else:
plt.show()
def args_to_dict(args: ap.Namespace) -> Dict[str, Any]:
"""
.. note
This functions load a `pd.DataFrame` from file, since the input is a file name
but `plot` requires a `pd.DataFrame`.
"""
df = load_dataframe(args.input)
return {
"df": df,
"output": args.output,
"x_name": args.xname,
"y_name": args.yname,
"hue_name": args.group,
"swarm": args.swarm,
"notch": args.notch,
"title": args.title,
"xlabel": args.xlabel,
"ylabel": args.ylabel,
"legend_name": args.legend,
"hide_fliers": args.hide_fliers,
}
def parse(args: Optional[str] = None) -> ap.Namespace:
"""
Parse command-line arguments.
Args:
args (str, optional): String to parse
Returns:
An `ap.Namespace` containing the parsed options
.. note::
If ``args is None`` the string to parse is red from ``sys.argv``
"""
# Parser
parser = ap.ArgumentParser(description="Histogram plot.")
# Add arguments
parser.add_argument("-i", "--input", type=str, required=True, help="Input file")
parser.add_argument("-o", "--output", type=str, default=None, help="Output file")
parser.add_argument("-x", "--xname", type=str, default=None, help="x name")
parser.add_argument("-y", "--yname", type=str, default=None, help="y name")
parser.add_argument("-g", "--group", type=str, default=None, help="Hue name")
parser.add_argument(
"-s", "--swarm", default=False, action="store_true", help="Swarmplot"
)
parser.add_argument(
"-n", "--notch", default=False, action="store_true", help="Notch"
)
parser.add_argument("-t", "--title", type=str, default=None, help="Plot title")
parser.add_argument("-lx", "--xlabel", type=str, default=None, help="x label")
parser.add_argument("-ly", "--ylabel", type=str, default=None, help="y label")
parser.add_argument("-ln", "--legend", type=str, default=None, help="Legend name")
parser.add_argument(
"--hide-fliers", default=False, action="store_true", help="Do not show outliers"
)
# Parse arguments
return parser.parse_args(args)
if __name__ == "__main__":
args = parse()
args_dict = args_to_dict(args)
plot(**args_dict)
|
n=input ('enter a no')
m=2
count=0
while count < n:
for i in range (2,m):
if m%i==0:
break
else:
print m,
count+=1
m+=1
|
# Write a Python program to count the number of elements in a list within a specified range.
def countElement(n,m,listprovided):
count = 0
for i in range(len(listprovided)):
if listprovided[i]>=n and listprovided[i]<=m:
count += 1
else:
pass
return count
listprovided = [10,20,30,40,40,40,70,80,99]
output = countElement(40,100,listprovided)
print(output)
|
import math
import matplotlib
import random
class co:
one = 0
two = 0
test = range(1,102)
def calc(one=random.randint(1,401)):
price = co.one + (co.two * one)
return price,one
def cochange(change="add"):
if change == "take":
co.one -= int((100*price_check))+1
co.two -= int((100*price_check))+1
else:
co.one += int((100*price_check))+1
co.two += int((100*price_check))+1
def close(price,real):
global price_check
price_check = price/real
if price_check < 1:
cochange("a")
a = "add"
elif price_check == 1:
t = "c"
for i in test:
a,o = calc(i)
pc = a/(i*1000)
print(pc)
if pc == 1:
None
else:
t = "f"
if t == "f":
t = "f"
if pc < 1:
cochange("a")
a = "add"
else:
cochange("take")
a = "took"
print(t)
if t == "c":
a = "done"
else:
cochange("take")
a = "took"
return a
while True:
price,ba = calc()
out = close(price,(ba*1000))
print(out,": ("+str(co.one)+","+str(co.two)+")")
if out == "done":
print("co one: "+str(co.one))
print("co two: "+str(co.two))
break
|
'''Rotation of an image means rotating through an angle and we normally rotate the
image by keeping the center. so, first we will calculate center of an image
and then we will rotate through given angle. We can rotate by taking any point on
image, more preferably center'''
from __future__ import print_function
import cv2
import argparse
def show_img(img):
cv2.imshow("canvas",img)
cv2.waitKey(0)
return
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "path to image")
args = vars(ap.parse_args())
print(args)
load_image = cv2.imread(args["image"])
#show_img(load_image)
h,w = load_image.shape[:2]
print(h,w)
Center = (h//2,w//2)
print(Center)
M = cv2.getRotationMatrix2D(Center, 45, 1.0)
rotated = cv2.warpAffine(load_image, M, (w,h))
show_img(rotated)
'''45 degree is the angle we are rotating the image and 1.0 is the scale
1.0 means same size of original. 2.0 means double the size, 0.5 means half the soze
of image''' |
from django.db import models
from django.contrib.auth import get_user_model
# from accounts.models import CustomUser
class Blog(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
text = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
def __str__ (self):
return self.title
|
from chainer import Chain, serializers, optimizers, cuda, config
import chainer.links as L
import chainer.functions as F
from chainer import iterators
from chainer import Variable
import numpy as np
import const
import os.path
import pandas as pd
from time import time
import util
cp = cuda.cupy
class UNet(Chain):
def __init__(self):
super(UNet, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(1, 16, 4, 2, 1)
self.norm1 = L.BatchNormalization(16)
self.conv2 = L.Convolution2D(16, 32, 4, 2, 1)
self.norm2 = L.BatchNormalization(32)
self.conv3 = L.Convolution2D(32, 64, 4, 2, 1)
self.norm3 = L.BatchNormalization(64)
self.conv4 = L.Convolution2D(64, 128, 4, 2, 1)
self.norm4 = L.BatchNormalization(128)
self.conv5 = L.Convolution2D(128, 256, 4, 2, 1)
self.norm5 = L.BatchNormalization(256)
self.conv6 = L.Convolution2D(256, 512, 4, 2, 1)
self.norm6 = L.BatchNormalization(512)
self.deconv1 = L.Deconvolution2D(512, 256, 4, 2, 1)
self.denorm1 = L.BatchNormalization(256)
self.deconv2 = L.Deconvolution2D(512, 128, 4, 2, 1)
self.denorm2 = L.BatchNormalization(128)
self.deconv3 = L.Deconvolution2D(256, 64, 4, 2, 1)
self.denorm3 = L.BatchNormalization(64)
self.deconv4 = L.Deconvolution2D(128, 32, 4, 2, 1)
self.denorm4 = L.BatchNormalization(32)
self.deconv5 = L.Deconvolution2D(64, 16, 4, 2, 1)
self.denorm5 = L.BatchNormalization(16)
self.deconv6 = L.Deconvolution2D(32, 1, 4, 2, 1)
def __call__(self, X):
h1 = F.leaky_relu(self.norm1(self.conv1(X)))
h2 = F.leaky_relu(self.norm2(self.conv2(h1)))
h3 = F.leaky_relu(self.norm3(self.conv3(h2)))
h4 = F.leaky_relu(self.norm4(self.conv4(h3)))
h5 = F.leaky_relu(self.norm5(self.conv5(h4)))
h6 = F.leaky_relu(self.norm6(self.conv6(h5)))
dh = F.relu(F.dropout(self.denorm1(self.deconv1(h6))))
dh = F.relu(F.dropout(self.denorm2(self.deconv2(F.concat((dh, h5))))))
dh = F.relu(F.dropout(self.denorm3(self.deconv3(F.concat((dh, h4))))))
dh = F.relu(self.denorm4(self.deconv4(F.concat((dh, h3)))))
dh = F.relu(self.denorm5(self.deconv5(F.concat((dh, h2)))))
dh = F.sigmoid(self.deconv6(F.concat((dh, h1))))
dh = dh * X
return dh
def load(self, fname="unet.model"):
serializers.load_npz(fname, self)
def save(self, fname="unet.model"):
serializers.save_npz(fname, self)
class UNetTrainmodel(Chain):
def __init__(self, unet):
super(UNetTrainmodel, self).__init__()
with self.init_scope():
self.unet = unet
def __call__(self, X, Y):
O = self.unet(X)
self.loss = F.mean_absolute_error(O, Y)
return self.loss
def TrainUNet(Xlist, Ylist, Plist, Qlist, epoch=40, savefile="unet.model",checkpoint = ''):
assert(len(Xlist) == len(Ylist))
unet = UNet()
if checkpoint != '':
unet.load(checkpoint)
model = UNetTrainmodel(unet)
model.to_gpu(0)
opt = optimizers.Adam()
opt.setup(model)
config.train = True
config.enable_backprop = True
itemcnt = len(Xlist)
itemcnt_val = len(Plist)
itemlength = [x.shape[1] for x in Xlist]
print('batch_size:{}'.format(const.BATCH_SIZE))
subepoch = sum(itemlength) // const.PATCH_LENGTH // const.BATCH_SIZE * 4
#subepoch = itemcnt // const.BATCH_SIZE
print("subepoch:{}".format(subepoch))
print("ready to train")
loss_dataframe = {
'FFT_size':const.FFT_SIZE,
'Hop_size':const.H,
'Window_length':const.WIN_LENGTH,
'Batch_size':const.BATCH_SIZE,
'Patch_length':const.PATCH_LENGTH,
'train_loss':[],
'val_loss':[],
'epoch':[]
}
for ep in range(1,epoch):
start = time()
print("*****************************************************************")
sum_loss = 0.0
loss_val = 0.0
for subep in range(subepoch):
X = np.zeros((const.BATCH_SIZE, 1, const.FFT_SIZE//2, const.PATCH_LENGTH),
dtype="float32")
Y = np.zeros((const.BATCH_SIZE, 1, const.FFT_SIZE//2, const.PATCH_LENGTH),
dtype="float32")
P = np.zeros((const.BATCH_SIZE, 1, const.FFT_SIZE//2, const.PATCH_LENGTH),
dtype="float32")
Q = np.zeros((const.BATCH_SIZE, 1, const.FFT_SIZE//2, const.PATCH_LENGTH),
dtype="float32")
idx_item = np.random.randint(0, itemcnt, const.BATCH_SIZE)
idx_item_val = np.random.randint(0, itemcnt_val, const.BATCH_SIZE)
for i in range(const.BATCH_SIZE):#To generate input X and Y in training set, and P and Q in validation set, both in mini-batch.
if itemlength[idx_item[i]] > const.PATCH_LENGTH:
randidx = np.random.randint(
itemlength[idx_item[i]]-const.PATCH_LENGTH)
X[i, 0, :, :] = \
Xlist[idx_item[i]][1:, randidx:randidx+const.PATCH_LENGTH]
Y[i, 0, :, :] = \
Ylist[idx_item[i]][1:, randidx:randidx+const.PATCH_LENGTH]
else:
dff = const.PATCH_LENGTH - itemlength[idx_item[i]]
x_spec = Xlist[idx_item[i]][1:, :]
y_spec = Ylist[idx_item[i]][1:, :]
x_spec = np.pad(x_spec,((0,0),(0,dff)),'constant')
y_spec = np.pad(y_spec,((0,0),(0,dff)),'constant')
X[i, 0, :, :] = \
x_spec
Y[i, 0, :, :] = \
y_spec
if Plist[idx_item_val[i]].shape[1] >const.PATCH_LENGTH:
randidx = np.random.randint(
Plist[idx_item_val[i]].shape[1]-const.PATCH_LENGTH)
P[i, 0, :, :] = \
Plist[idx_item_val[i]][1:, randidx:randidx+const.PATCH_LENGTH]
Q[i, 0, :, :] = \
Qlist[idx_item_val[i]][1:, randidx:randidx+const.PATCH_LENGTH]
else:
dff = const.PATCH_LENGTH - Plist[idx_item_val[i]].shape[1]
x_spec = Plist[idx_item_val[i]][1:, :]
y_spec = Qlist[idx_item_val[i]][1:, :]
x_spec = np.pad(x_spec,((0,0),(0,dff)),'constant')
y_spec = np.pad(y_spec,((0,0),(0,dff)),'constant')
P[i, 0, :, :] = \
x_spec
Q[i, 0, :, :] = \
y_spec
opt.use_cleargrads(use = True)
opt.update(model, cp.asarray(X), cp.asarray(Y))#update parameters and compute loss for each batch
sum_loss += model.loss.data * const.BATCH_SIZE #model.loss returns a chainer varible, model.loss.data returns the data array of the varible
P = cp.asarray(P)
O = unet(P)
loss_val_batch = F.mean_absolute_error(O ,cp.asarray(Q))
loss_val = loss_val + loss_val_batch.data * const.BATCH_SIZE
sum_loss = sum_loss / subepoch
loss_val = loss_val / subepoch
loss_dataframe['train_loss'].append(sum_loss)
loss_dataframe['val_loss'].append(loss_val)
loss_dataframe['epoch'].append(ep)
sf = os.path.join(const.PATH_CHECKPOINTS,"checkpoint_" + str(ep) + ".model")
unet.save(sf)
end = time()
print("duration:{:.2f}s".format(end - start))
print("epoch: %d/%d loss=%.3f" % (ep, epoch, sum_loss))
print("loss_val:{},epoch:{}".format(loss_val, ep))
frame = pd.DataFrame(loss_dataframe)
frame.to_excel(excel_writer = 'C:\\Singing_voice\\UUNet\\loss.xlsx',engine = 'xlsxwriter')
|
class BankAccount:
def __init__(self, name, surname):
self.name = name
self.surname = surname
self._balance = 0
self._password = ''
def set_balance(self, amount):
self._balance += amount
def set_password(self, parole):
self._password = parole
def get_balance(self):
if self._password == '':
print('Установите пароль.')
else:
sign = input('Для получения баланса введите пароль: ')
if sign == self._password:
print(f'Баланс Вашего счета {self._balance} P.')
else:
print('Пароль неверный. В доступе отказано.')
masha = BankAccount('Masha', 'Petrova')
masha.set_balance(250)
masha.set_balance(43974)
masha.get_balance()
masha.set_password('mammamia')
masha.get_balance()
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class TensorflowToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('tensorflow-cms')
def install(self, spec, prefix):
values = {}
values['PFX']=spec['tensorflow'].prefix
values['VER']=spec['tensorflow'].version
fname='tensorflow.xml'
content=str("""
<tool name="tensorflow" version="${VER}">
<client>
<environment name="TENSORFLOW_BASE" default="${PFX}"/>
<environment name="LIBDIR" default="$$TENSORFLOW_BASE/lib"/>
<environment name="INCLUDE" default="$$TENSORFLOW_BASE/include"/>
<environment name="TFCOMPILE" default="$$TENSORFLOW_BASE/bin/tfcompile"/>
</client>
<runtime name="PATH" value="$$TENSORFLOW_BASE/bin" type="path"/>
</tool>
""")
write_scram_toolfile(content, values, fname, prefix)
fname='tensorflow-framework.xml'
content=str("""
<tool name="tensorflow-framework" version="${VER}">
<lib name="tensorflow_framework"/>
<use name="tensorflow"/>
</tool>
""")
write_scram_toolfile(content, values, fname, prefix)
fname='tensorflow-cc.xml'
content=str("""
<tool name="tensorflow-cc" version="${VER}">
<lib name="tensorflow_cc"/>
<use name="tensorflow-framework"/>
<use name="eigen"/>
<use name="protobuf"/>
</tool>
""")
write_scram_toolfile(content, values, fname, prefix)
fname='tensorflow-c.xml'
content=str("""
<tool name="tensorflow-c" version="${VER}">
<lib name="tensorflow"/>
<use name="tensorflow-framework"/>
</tool>
""")
write_scram_toolfile(content, values, fname, prefix)
fname='tensorflow-runtime.xml'
content=str("""
<tool name="tensorflow-runtime" version="${VER}">
<lib name="tf_aot_runtime"/>
<use name="tensorflow"/>
</tool>
""")
write_scram_toolfile(content, values, fname, prefix)
fname='tensorflow-xla_compiled_cpu_function.xml'
content=str("""
<tool name="tensorflow-xla_compiled_cpu_function" version="${VER}">
<lib name="xla_compiled_cpu_function"/>
<use name="tensorflow"/>
</tool>
""")
write_scram_toolfile(content, values, fname, prefix)
|
#!/usr/bin/env python3
"""
QAMatching.py used for this QAMatchingServer.py
Created by Sriram Sitharaman, Damir Cavar
Version: 0.1
Given a input Natural Language query,
Identifies the matching regex and hits the neo4j graph DB with the corresponding Cypher query
11/30/2017 : Created by Sriram Sitharaman
12/3/2017 : Updated By Shridivya Sharma - Updated date functions
To run the code in test mode one server instance at the configured port has to be present. The test mode uses a client
to communicate via XML-RPC to the server.
Run a server first in a terminal:
python3 QAMatchingServer.py -c config.ini
Then run the test client in another terminal window, and observe the logs and outputs.
python3 QAMatchingServer.py -t -c config.ini
"""
import re, sys, argparse, configparser, logging, time
from neo4j.v1 import GraphDatabase, basic_auth
# from py2neo import authenticate, Graph
from bs4 import BeautifulSoup
import wikipedia
import xmlrpc.client
from xmlrpc.server import SimpleXMLRPCServer
from SemDiscUtils import encodeReturn, decodeReturn
from defaults import MODULES, CONFFILE
MODULENAME = "QAMatchingServer"
def loadModel():
"""Loads the XML model of patterns and queries and returns the two lists."""
# Reading the XML file content in to a variable
# regexXML = open("QAPatterns.xml").read()
# Using BeautifulSoup to parse the loaded XML file
parsedXML = BeautifulSoup(open("QAPatterns.xml").read(), "lxml")
# Getting the List of regex patterns from the XML file
patternsList = [patternTag.text for patternTag in parsedXML.findAll("pattern")]
# Getting the List of corresponding Cypher query matches
cypherList = [cypherTag.text for cypherTag in parsedXML.findAll("cypher")]
return patternsList, cypherList
def parse(text):
"""Identifies the matching regex and hits the neo4j graph DB with the corresponding Cypher query"""
text = text.replace("?", "")
hit_yes_no = False
#if log:
logging.debug("Question:" + text)
#else:
print("Question:", text)
uri = "bolt://linguistic.technology:7687"
driver = GraphDatabase.driver(uri, auth=("neo4j", "DtwAMjrk6zt1bHifYOJ6"))
session = driver.session()
# match (n) optional match (n)-[r]-() return n,r:
# graph = Neo4J.createCypherQueries(self._edgeList)
# authenticate("linguistic.technology:7474", "neo4j", "DtwAMjrk6zt1bHifYOJ6")
# graph = Graph("http://linguistic.technology:7474/db/data/")
result = ""
matchedX = ""
pos = 0
for pos in range(len(patternsList)):
match = re.match(patternsList[pos], text, re.IGNORECASE)
if match:
matchedX = list(match.groups())[-1]
print("matchedX:", matchedX)
hit_yes_no = True
#logging.debug("Match:" + matchedX)
print("Match:", matchedX)
break
if hit_yes_no:
cypher = cypherList[pos].replace("(.*)", matchedX)
logging.debug("Cypher:\n" + cypher)
print("Cypher:", cypher)
#result = graph.cypher.execute(cypher)
result = session.run(cypher)
logging.debug("Result:")
logging.debug(result)
print("Result:", result)
res=[record for record in result]
if len(res)>0:
Count=0
resultStr=[]
for row in res:
StartName=row['a'].properties['Name']
StartLabel=row['a'].properties['label']
try:
relationshipName=row['r'].properties['Name']
EndName=row['b'].properties['Name']
EndLabel=row['b'].properties['label']
except:
relationshipName=EndName=EndLabel=""
if Count==0:
resultStr.append(" ".join([StartName,"is a",StartLabel]))
if len(relationshipName)!=0:
resultStr.append(" ".join([StartName,relationshipName,EndName]))
Count+=1
session.close()
return ". ".join(resultStr)
# Hits Wikipedia only for factual questions
else: # and pos <= 2:
resultStr=[]
logging.debug("Result from Wikipedia:")
print("Here is what we found on Wikipedia:")
resultStr.append("Here is what we found on Wikipedia:")
# changed code:
#
try:
result = wikipedia.page(matchedX)
except:
return "Sorry, I cannot help you with that."
#print("Wikipedia suggests for:", matchedX)
#print(result)
#if result:
# result = str(wikipedia.page(result).summary.encode(sys.stdout.encoding, 'ignore')) # .split(".")[:2]
# print("Wikipedia:")
# print(result)
#else:
# result = wikipedia.search(matchedX)
# if result:
# result = result[0]
# print("Wikipedia search result:")
# print(result)
if result:
resultStr.append(". ".join(result.summary.split(".")[:2])) # .encode("utf-8").split(".")[:2]))
logging.debug(resultStr)
print(".".join(resultStr))
# session.close()
return ". ".join(resultStr)
logging.debug("No Matches found!")
print ("No Matches found!")
return "Sorry, I cannot help you with that."
# close Neo4J session
#print("Type of result:", type(result))
#print("Test", list(result))
##for record in result:
## print("BoltStatementResult:")
## print(record["a"], record["r"], record["b"])
#logging.debug("-----------------------------------------------------------------------")
#print("-----------------------------------------------------------------------")
#print("")
#return ".".join(resultStr)
def test():
# Process some sample questions
for x in ("Who is Obama?",
"Who likes cherries?",
"Who bought Apple?",
"Tell me about Peter",
"Who is the president of the United States of America?"):
start = time.time()
s = xmlrpc.client.ServerProxy("http://{}:{}".format(MODULES[MODULENAME].host, MODULES[MODULENAME].port), allow_none=True)
res = s.parse(x)
if res:
print("Result:")
print(res)
print("Processing time: ", time.time() - start)
print("Result:", res)
def parseConfiguration(conff=CONFFILE):
"""Parse the config.ini and set the parameters."""
global MODULES
# read configuration
config = configparser.ConfigParser()
config.read(conff)
# parse config for modules
for l in ("QAMatchingServer", "Neo4J", "StarDog"):
if l not in MODULES.keys():
continue
inilabel = MODULES[l].inilabel
if inilabel in config.sections():
if "host" in config[inilabel]:
MODULES[l].host = config[inilabel]["host"]
if "port" in config[inilabel]:
MODULES[l].port = int(config[inilabel]["port"])
if "logfile" in config[inilabel]:
MODULES[l].logfile = config[inilabel]["logfile"]
def mainServer(port, host):
"""Start the Dispatcher in Server mode."""
print("Host:", host, "Port:", port)
# start the XML-RPC server
server = SimpleXMLRPCServer((host, int(port)), allow_none=True)
server.register_introspection_functions()
server.register_function(parse)
logging.info('Serving over XML-RPC on {} port {}'.format(host, port))
try:
server.serve_forever()
except KeyboardInterrupt:
logging.info("Interrupt received, exiting.")
sys.exit(0)
if __name__ == "__main__":
# command line arguments overwrite config-file parameters
parser = argparse.ArgumentParser(prog=MODULENAME, description='Command line arguments.', epilog='')
parser.add_argument('-c', '--config', dest="conffile", default=CONFFILE,
help="Alternative " + CONFFILE + " file name")
parser.add_argument('-t', '--test', dest='test', action='store_true', help="Run in test mode") # just a flag
args = parser.parse_args()
if args.conffile != CONFFILE:
parseConfiguration(args.conffile)
else:
parseConfiguration()
# start logging
logging.basicConfig(filename=MODULES[MODULENAME].logfile,
filemode='w',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
# loads the model
patternsList, cypherList = loadModel()
if args.test:
test()
else:
mainServer(MODULES[MODULENAME].port, MODULES[MODULENAME].host)
|
#!/usr/bin/env python3
import shutil
import os
# change current directory to "/home/student/mycode"
os.chdir('/home/student/mycode/')
'''
Calling shutil.move(source, destination) will move the file or folder at the path source to the path destination and will return a string of the absolute path of the new location. If destination points to a folder, the source file gets moved into destination and keeps its current filename.
'''
shutil.move('raynor.obj', 'ceph_storage/')
# Prompt the user for a new name for the kerrigan.obj file.
xname = input('What is the new name for kerrigan.obj? ')
# Rename the current kerrigan.obj file.
shutil.move('ceph_storage/kerrigan.obj', 'ceph_storage/' + xname)
|
import os, sys
from .Faresystem import Faresystem
from .Linki import Linki
from .Network import Network
from .NetworkException import NetworkException
from .PTSystem import PTSystem
from .PNRLink import PNRLink
from .Supplink import Supplink
# add ..\_static for dataTable import
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","_static"))) # for dataTable
from .TransitAssignmentData import TransitAssignmentData ##
from .TransitCapacity import TransitCapacity
from .TransitLine import TransitLine
from .TransitLink import TransitLink
from .TransitNetwork import TransitNetwork
from .TransitParser import TransitParser
from .HighwayNetwork import HighwayNetwork
from .Logger import setupLogging, WranglerLogger
from .Node import Node
from .HwySpecsRTP import HwySpecsRTP
__all__ = ['NetworkException', 'setupLogging', 'WranglerLogger',
'Network', 'TransitAssignmentData', 'TransitNetwork', 'TransitLine', 'TransitParser',
'Node', 'TransitLink', 'Linki', 'PNRLink', 'Supplink', 'HighwayNetwork', 'HwySpecsRTP',
'TransitCapacity', 'Faresystem', 'PTSystem'
]
if __name__ == '__main__':
LOG_FILENAME = "Wrangler_main_%s.info.LOG" % time.strftime("%Y%b%d.%H%M%S")
setupLogging(LOG_FILENAME, LOG_FILENAME.replace("info", "debug"))
net = Network()
net.cloneAndApplyProject(projectname="Muni_TEP")
net.cloneAndApplyProject(projectname="Muni_CentralSubway", tag="1-latest", modelyear=2030)
net.cloneAndApplyProject(projectname="BART_eBART")
net.write(name="muni", writeEmptyFiles=False)
|
from flask import Flask, jsonify, url_for, make_response, request, abort
from rele import inicializaPlaca, definePinoComoSaida, escreveParaPorta, obterEstadoPorta
reles = []
def init():
inicializaPlaca()
definePinoComoSaida(7)
definePinoComoSaida(11)
escreveParaPorta(7, 0)
escreveParaPorta(11, 0)
reles.append({'id': 7, 'ligado': bool(obterEstadoPorta(7))})
reles.append({'id': 11, 'ligado': bool(obterEstadoPorta(11))})
init()
app = Flask(__name__)
@app.route('/api/v1/reles', methods=['GET'])
def getReles():
# return jsonify({'reles': [makePublicRele(rele) for rele in reles]})
return jsonify({'reles': reles})
@app.route('/api/v1/reles/<int:id>', methods=['GET'])
def getRele(id):
rele = [rele for rele in reles if rele['id'] == id]
if len(rele) == 0:
abort(404)
return jsonify({'rele': rele[0]})
#@app.route('/api/v1/reles/<int:id>', methods=['PUT'])
#def ascenderRele(id):
@app.route('/api/v1/reles', methods=['PUT'])
def alterarRele():
print('alterarRele() - ', request.json)
if not 'id' in request.json:
abort(400)
rele = [rele for rele in reles if rele['id'] == request.json['id']]
if len(rele) == 0:
abort(404)
if not request.json:
print('nao possui request.json - abortar')
abort(400)
if 'ligado' in request.json and type(request.json['ligado']) is not bool:
print('abortar ', type(request.json['ligado']))
abort(400)
item = rele[0]
item['ligado'] = request.json.get('ligado', reles[0]['ligado'])
escreveParaPorta(item['id'], item['ligado'])
# print('estado da porta', obterEstadoPorta(item['id']))
return jsonify({'rele': item})
@app.route('/api/v1/reles/<int:id>', methods=['DELETE'])
def deleteRele(id):
print(request.json)
return jsonify({'result': 'Funcao DELETE nao implementada'})
@app.route('/api/v1/reles/<int:id>', methods=['POST'])
def criarRele(id):
print(request.json)
return jsonify({'result': 'Funcao POST nao implementada'})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
def makePublicRele(rele):
novoRele = {}
for field in rele:
if field == 'id':
novoRele['uri'] = url_for('getRele', id=rele['id'], _external=True)
else:
novoRele[field] = rele[field]
return novoRele
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
import datetime as dt
from django.test import TestCase, Client
from django.utils import timezone
from .models import *
from .scheduler import *
from .statistics import *
class TaskModelTests(TestCase):
# Test that marking a task as done works as expected
def test_mark_done_on_todo_task(self):
# Create a task that is todo
todo_task = Task(done=False)
# Get the time before
before = timezone.now()
# Mark task as done
todo_task.mark_done()
# Get the time after
after = timezone.now()
# Check that the task is indeed marked as done
self.assertIs(todo_task.done, True)
# Check that the task was completed between the two timestamps
not_too_early = bool(todo_task.completion_time >= before)
not_too_late = bool(todo_task.completion_time <= after)
self.assertIs(not_too_early, True)
self.assertIs(not_too_late, True)
# Test that marking a task as todo works as expected
def test_mark_todo_on_done_task(self):
# Create a task that is done
done_task = Task(done=True)
# Mark it as todo
done_task.mark_todo()
# Check the task is not marked as done
self.assertIs(done_task.done, False)
# Check that the completion time has been reset
self.assertEquals(done_task.completion_time, None)
# Test that the overdue method works correctly
def test_is_overdue_on_not_overdue_task(self):
# Create a task that is not overdue
non_overdue_task = Task(
due_date=dt.date.today() + dt.timedelta(days=1),
due_time=dt.time(hour=0, minute=0),
)
# Check that the method finds it to not be overdue
self.assertIs(non_overdue_task.is_overdue(), False)
def test_is_overdue_on_overdue_task(self):
# Create a task that is overdue
overdue_task = Task(
due_date=dt.date.today() - dt.timedelta(days=1),
due_time=dt.time(hour=0, minute=0),
)
# Check that it is found to be overdue
self.assertIs(overdue_task.is_overdue(), True)
# Basic test for time spent alteration
def test_alter_time_spent(self):
# Create a taskwith no time spent
no_time_task = Task()
# Increase time spent by 10 minutes
no_time_task.alter_time_spent(dt.timedelta(minutes=10))
# Check that the time spent is correct
self.assertEquals(no_time_task.time_spent, dt.timedelta(minutes=10))
# Test that when altering a task to have negative time spent,
# it instead is set to 0
def test_alter_time_spent_negative(self):
# Create a task with 10 minutes time spent
ten_minute_task = Task(time_spent=dt.timedelta(minutes=10))
# Reduce time spent by 20 minutes
ten_minute_task.alter_time_spent(timedelta(minutes=-20))
# Time spent should now be 0 minutes, rather then -10
self.assertEquals(ten_minute_task.time_spent, timedelta(minutes=0))
class EventModelTests(TestCase):
# Test non-overlapping events
#
# Test events that are on the same day and nearly overlap, but don't
def test_same_day_no_time_overlap(self):
# Define two events which are consecutive but don't overlap
event_1 = Event(
date=timezone.now().date(),
start_time=(timezone.now() - dt.timedelta(minutes=10)).time(),
end_time=timezone.now().time(),
)
event_2 = Event(
date=timezone.now().date(),
start_time=timezone.now().time(),
end_time=(timezone.now() + dt.timedelta(minutes=10)).time(),
)
# Neither should clash with the other
self.assertIs(event_1.does_clash(event_2), False)
self.assertIs(event_2.does_clash(event_1), False)
# Test events where the time overlaps but are on different days
def test_different_day_overlapping_time(self):
# Define two events which would clash if they were on the same day,
# but are on different days
event_1 = Event(
date=timezone.now().date(),
start_time=timezone.now().time(),
end_time=(timezone.now() + dt.timedelta(minutes=10)).time(),
)
event_2 = Event(
date=(timezone.now() + dt.timedelta(days=1)).date(),
start_time=(timezone.now() - dt.timedelta(minutes=5)).time(),
end_time=(timezone.now() + dt.timedelta(minutes=5)).time(),
)
# Neither should clash with the other
self.assertIs(event_1.does_clash(event_2), False)
self.assertIs(event_2.does_clash(event_1), False)
# Test overlapping events
#
# Test events overlapping only at one end
def test_overlap_one_end(self):
# Define two events which overlap only on one end
event_1 = Event(
date=timezone.now().date(),
start_time=timezone.now().time(),
end_time=(timezone.now() + dt.timedelta(minutes=10)).time(),
)
event_2 = Event(
date=timezone.now().date(),
start_time=(timezone.now() - dt.timedelta(minutes=5)).time(),
end_time=(timezone.now() + dt.timedelta(minutes=5)).time(),
)
# Both should clash with the other
self.assertIs(event_1.does_clash(event_2), True)
self.assertIs(event_2.does_clash(event_1), True)
# Test events overlapping at both ends
def test_overlap_both_ends(self):
# Define two events which overlap at both ends
event_1 = Event(
date=timezone.now().date(),
start_time=timezone.now().time(),
end_time=(timezone.now() + dt.timedelta(minutes=10)).time(),
)
event_2 = Event(
date=timezone.now().date(),
start_time=(timezone.now() - dt.timedelta(minutes=5)).time(),
end_time=(timezone.now() + dt.timedelta(minutes=15)).time(),
)
# Both should clash with the other
self.assertIs(event_1.does_clash(event_2), True)
self.assertIs(event_2.does_clash(event_1), True)
class SchedulerTests(TestCase):
# Test that the scheduler assigns timeslots for all events and routines on a day
def test_num_timeslots(self):
# Specify number of events and routines.
# These numbers should not be too large so that they can all fit in the one day.
num_events = 10
num_routines = 2
# Create a bunch of events
for i in range(num_events):
event = Event(
date=timezone.now().date(),
start_time=dt.time(hour=i),
end_time=dt.time(hour=i, minute=30),
)
# They need to be saved to the database so the scheduler can see them
event.save()
# Ditto for routines
for i in range(num_routines):
routine = Routine(
day=timezone.now().date().weekday(),
start_time=dt.time(hour=i + num_events),
end_time=dt.time(hour=i + num_events, minute=30),
)
routine.save()
# Run the scheduler
update_schedule(timezone.now().date().weekday())
# Check that the number of timeslots created is the total no. events and routines
self.assertEquals(len(TimeSlot.objects.all()), num_events + num_routines)
# A similar test, this time involving tasks
def test_num_timeslots_with_tasks(self):
# Specify number of events, routines and tasks.
# These numbers should not be too large so that they can all fit in the one day.
num_events = 10
num_routines = 2
num_tasks = 10
# Create a bunch of events
for i in range(num_events):
event = Event(
date=timezone.now().date(),
start_time=dt.time(hour=i),
end_time=dt.time(hour=i, minute=30),
)
# They need to be saved to the database so the scheduler can see them
event.save()
# Ditto for routines
for i in range(num_routines):
routine = Routine(
day=timezone.now().date().weekday(),
start_time=dt.time(hour=i + num_events),
end_time=dt.time(hour=i + num_events, minute=30),
)
routine.save()
# Ditto for tasks
# I'll create all the tasks with time estimates of 0 so they should definitely be scheduled.
for i in range(num_tasks):
task = Task(time_estimate=dt.timedelta(minutes=0))
task.save()
# Run the scheduler
update_schedule(timezone.now().date().weekday())
# Check that the number of timeslots
self.assertEquals(
len(TimeSlot.objects.all()), num_events + num_routines + num_tasks
)
# Test that a task which is too long to be scheduled is not scheduled
def test_unschedulable_task(self):
# Have the first event of the day from 09:30 to 10:00
first_event = Event(
date=timezone.now().date(),
start_time=dt.time(hour=9, minute=30),
end_time=dt.time(hour=10),
)
first_event.save()
# Have the last event of the day from 10:30 to 11:00
last_event = Event(
date=timezone.now().date(),
start_time=dt.time(hour=10, minute=30),
end_time=dt.time(hour=11),
)
last_event.save()
# If the task is longer than 30 minutes, there won't be room to schedule it
task = Task(time_estimate=dt.timedelta(hours=1))
task.save()
# Run the scheduler
update_schedule(timezone.now().date().weekday())
# There should only be two timeslots, as the task has not been scheduled
self.assertEquals(len(TimeSlot.objects.all()), 2)
# Given the test for no. timeslots with tasks, this is probably redundant,
# but I'll do it anyway
def test_schedulable_task(self):
# Have the first event of the day from 09:30 to 10:00
first_event = Event(
date=timezone.now().date(),
start_time=dt.time(hour=9, minute=30),
end_time=dt.time(hour=10),
)
first_event.save()
# Have the last event of the day from 10:30 to 11:00
last_event = Event(
date=timezone.now().date(),
start_time=dt.time(hour=10, minute=30),
end_time=dt.time(hour=11),
)
last_event.save()
# If the task is shorter than 30 minutes, there is room to schedule it
task = Task(time_estimate=dt.timedelta(minutes=10))
task.save()
# Run the scheduler
update_schedule(timezone.now().date().weekday())
# There should be three timeslots,
# for the two events and the task
self.assertEquals(len(TimeSlot.objects.all()), 3)
# Test what happens when events have weird timings
def test_end_before_start_event(self):
# This event starts at 10:00 and finishes as 09:00
invalid_event = Event(
date=timezone.now().date(),
start_time=dt.time(hour=10),
end_time=dt.time(hour=9),
)
invalid_event.save()
# Also make a valid event
valid_event = Event(
date=timezone.now().date(),
start_time=dt.time(hour=11),
end_time=dt.time(hour=12),
)
valid_event.save()
# Run the scheduler
update_schedule(timezone.now().date().weekday())
# The scheduler shouldn't create a timeslot for the nonsensical event,
# so there should only be one timeslot
self.assertEquals(len(TimeSlot.objects.all()), 1)
class SpecificStatisticsTests(TestCase):
# Test specific statistics generation
def test_completion_delta_stats(self):
# This is just to make sure there are no unexpected small discretions in timings
time = timezone.now()
# Test large and small differences between due and completion
late_one_day = Task(
done=True,
due_date=(time - dt.timedelta(days=1)).date(),
due_time=time.time(),
completion_time=time,
)
early_one_day = Task(
done=True,
due_date=time.date(),
due_time=time.time(),
completion_time=time + dt.timedelta(days=1),
)
late_one_minute = Task(
done=True,
due_date=time.date(),
due_time=(time - dt.timedelta(minutes=1)).time(),
completion_time=time,
)
early_one_minute = Task(
done=True,
due_date=time.date(),
due_time=time.time(),
completion_time=time + dt.timedelta(minutes=1),
)
exactly_due = Task(
done=True, due_date=time.date(), due_time=time.time(), completion_time=time,
)
tasks = [
late_one_day,
early_one_day,
late_one_minute,
early_one_minute,
exactly_due,
]
# Run the statistics generator on them
generate_specific_stats(tasks)
# Check that the values are what we would expect
self.assertEquals(late_one_day.completion_delta, dt.timedelta(days=1))
self.assertEquals(early_one_day.completion_delta, dt.timedelta(days=1))
self.assertEquals(late_one_minute.completion_delta, dt.timedelta(minutes=1))
self.assertEquals(early_one_minute.completion_delta, dt.timedelta(minutes=1))
self.assertEquals(exactly_due.completion_delta, dt.timedelta(minutes=0))
def test_estimate_accuracy_stats(self):
# Test large and small differences between time spent and estimated
second_too_long = Task(
done=True,
time_estimate=dt.timedelta(seconds=11),
time_spent=dt.timedelta(seconds=10),
)
second_too_short = Task(
done=True,
time_estimate=dt.timedelta(seconds=9),
time_spent=dt.timedelta(seconds=10),
)
hour_too_long = Task(
done=True,
time_estimate=dt.timedelta(hours=11),
time_spent=dt.timedelta(hours=10),
)
hour_too_short = Task(
done=True,
time_estimate=dt.timedelta(hours=9),
time_spent=dt.timedelta(hours=10),
)
exactly_estimate = Task(
done=True,
time_estimate=dt.timedelta(minutes=10),
time_spent=dt.timedelta(minutes=10),
)
tasks = [
second_too_long,
second_too_short,
hour_too_long,
hour_too_short,
exactly_estimate,
]
# Run the stats generator
generate_specific_stats(tasks)
# Check the values
self.assertEquals(second_too_long.estimate_accuracy, 10)
self.assertEquals(second_too_short.estimate_accuracy, 10)
self.assertEquals(hour_too_long.estimate_accuracy, 10)
self.assertEquals(hour_too_short.estimate_accuracy, 10)
self.assertEquals(exactly_estimate.estimate_accuracy, 0)
# Test null values
def test_specific_stats_null(self):
time = timezone.now()
# Create tasks with unusual attributes
null_complete = Task(done=True, due_date=time.date(), due_time=time.time(),)
zero_spent_zero_estimate = Task(done=True)
zero_spent_nonzero_estimate = Task(
done=True, time_estimate=dt.timedelta(minutes=10)
)
# Run the generator
generate_specific_stats(
[null_complete, zero_spent_zero_estimate, zero_spent_nonzero_estimate]
)
# Check the stats
self.assertEquals(null_complete.completion_delta, dt.timedelta(minutes=0))
self.assertEquals(zero_spent_zero_estimate.estimate_accuracy, 0)
self.assertEquals(zero_spent_nonzero_estimate.estimate_accuracy, 100)
class GeneralStatisticsTests(TestCase):
# Create n tasks today and n tasks yesterday
def setup(self, n):
# What's the time?
time = timezone.now()
yesterday = time - dt.timedelta(days=1)
# Take precautionary measures
Task.objects.all().delete()
# Create a bunch of tasks, done today,
# in less than estimated time and before their due date
for i in range(n):
Task(
done=True,
completed_on_time=True,
completed_in_time=True,
completion_time=time,
time_spent=dt.timedelta(minutes=1),
).save()
# Create a bunch of tasks, done yesterday,
# in over the estimated time and overdue
for i in range(n):
Task(
done=True,
completed_on_time=False,
completed_in_time=False,
completion_time=yesterday,
time_spent=dt.timedelta(minutes=1),
).save()
def test_num_day(self):
# Number of tasks
n = 10
# Run setup
self.setup(n)
# Generate stats
stats = generate_overall_stats()
# Check the number today
self.assertEquals(stats["num_day"], n)
def test_num_week(self):
# Number of tasks
n = 10
# Run setup
self.setup(n)
# Generate stats
stats = generate_overall_stats()
# Check the number this week
self.assertEquals(stats["num_week"], 2 * n)
def test_time_day(self):
# Number of tasks
n = 10
# Run setup
self.setup(n)
# Generate stats
stats = generate_overall_stats()
# Check the time spent today
self.assertEquals(stats["time_day"], timedelta(minutes=n))
def test_time_week(self):
# Number of tasks
n = 10
# Run setup
self.setup(n)
# Generate stats
stats = generate_overall_stats()
# Check the time spent this week
self.assertEquals(stats["time_week"], timedelta(minutes=2 * n))
def test_on_time(self):
# Number of tasks
n = 10
# Run setup
self.setup(n)
# Generate stats
stats = generate_overall_stats()
# Check the time spent today
self.assertEquals(stats["on_time"], 50)
def test_in_time(self):
# Number of tasks
n = 10
# Run setup
self.setup(n)
# Generate stats
stats = generate_overall_stats()
# Check the time spent today
self.assertEquals(stats["in_time"], 50)
|
# JTSK-350112
# regexw.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
import csv
import re
import datetime
def extract_time(row):
y = int(row[1])
mn = int(row[2])
d = int(row[3] )
h,m = row[4].split(':')
h = int(h)
m = int(m)
return datetime.datetime(y, mn, d, h, m)
def extract_temp(row) :
t = row[5]
try :
return float(t)
except :
return None
def extract_rel_humidity(row) :
rh = row[9]
try :
return float(rh)
except :
return None
def extract_wind_dir(row) :
wd = row[11]
try :
return float(wd) * 10.0
except :
return None
def extract_wind_speed(row) :
sp = row[13]
try :
return float(sp)
except :
return None
def extract_pressure(row) :
p = row[17]
try :
return float(p)
except :
return None
def extract_description(row) :
return row[23]
f = open("oct.csv", "r")
reader = csv.reader(f)
name = next(reader)[1]
print('name=', name)
prov = next(reader)[1]
print('prov=', prov)
latitude = next(reader)[1]
print('latitude=', latitude)
longitude = next(reader)[1]
print('longitude=', longitude)
elevation = next(reader)[1]
print('elevation=', elevation)
# skip to line 16
# the line number of the input file is maintained in line_num
while reader.line_num < 16 :
next(reader)
headers = next(reader)
print(headers)
fout = open("wdata3.csv", "w")
writer = csv.writer(fout)
writer.writerow( ('date and time', ' temp', ' humidity', ' wind', ' direction'))
for row in reader:
for x in row:
found = re.sub(".-7-15.", "2019-7-15", x)
print(found)
if found:
t = extract_time(found)
temp = extract_temp(found)
h = extract_rel_humidity(found)
ws = extract_wind_speed(found)
wd = extract_wind_dir(found)
writer.writerow((t, temp, h, ws, wd))
fout.close()
f.close() |
#!/usr/bin/python3
for numbers in range(0, 99):
print("{:02d}".format(numbers), end=', ')
print("{:02d}".format(numbers + 1))
|
import sbol3
import tyto
import labop
#############################################
# Helper functions
# set up the document
doc = sbol3.Document()
sbol3.set_namespace("https://sd2e.org/LabOP/")
#############################################
# Import the primitive libraries
print("Importing libraries")
labop.import_library("liquid_handling")
labop.import_library("plate_handling")
labop.import_library("spectrophotometry")
# this should really get pulled into a common library somewhere
rpm = sbol3.UnitDivision(
"rpm",
name="rpm",
symbol="rpm",
label="revolutions per minute",
numerator=tyto.OM.revolution,
denominator=tyto.OM.minute,
)
doc.add(rpm)
#############################################
# Create the protocols
print("Constructing measurement sub-protocols")
# This will be used 10 times generating "OD_Plate_1" .. "OD_Plate_9"
split_and_measure = labop.Protocol(
"SplitAndMeasure", name="Split samples, dilute, and measure"
)
split_and_measure.description = """
Subprotocol to split a portion of each sample in a plate into another plate, diluting
with PBS, then measure OD and fluorescence from that other plate
"""
doc.add(split_and_measure)
# plate for split-and-measure subroutine
od_plate = labop.Container(
name="OD Plate", type=tyto.NCIT.Microplate, max_coordinate="H12"
)
split_and_measure.locations = {od_plate}
# Inputs: collection of samples, pbs_source
samples = split_and_measure.add_input(
name="samples",
description="Samples to measure",
type="http://bioprotocols.org/labop#LocatedSamples",
)
pbs_source = split_and_measure.add_input(
name="pbs",
description="Source for PBS",
type="http://bioprotocols.org/labop#LocatedSamples",
)
# subprotocol steps
s_p = split_and_measure.execute_primitive(
"Dispense",
source=pbs_source,
destination=od_plate,
amount=sbol3.Measure(90, tyto.OM.microliter),
)
split_and_measure.add_flow(
split_and_measure.initial(), s_p
) # dispensing OD can be a first action
s_u = split_and_measure.execute_primitive("Unseal", location=samples)
split_and_measure.add_flow(
split_and_measure.initial(), s_u
) # unsealing the growth plate can be a first action
s_t = split_and_measure.execute_primitive(
"TransferInto",
source=samples,
destination=s_p.output_pin("samples"),
amount=sbol3.Measure(10, tyto.OM.microliter),
mixCycles=sbol3.Measure(10, tyto.OM.number),
)
split_and_measure.add_flow(
s_u, s_t
) # transfer can't happen until growth plate is unsealed
# add the measurements, in parallel
ready_to_measure = labop.Fork()
split_and_measure.activities.append(ready_to_measure)
split_and_measure.add_flow(s_t.output_pin("samples"), ready_to_measure)
measurement_complete = labop.Join()
split_and_measure.activities.append(measurement_complete)
s_a = split_and_measure.execute_primitive(
"MeasureAbsorbance",
samples=ready_to_measure,
wavelength=sbol3.Measure(600, tyto.OM.nanometer),
numFlashes=sbol3.Measure(25, tyto.OM.number),
)
v_a = split_and_measure.add_output("absorbance", s_a.output_pin("measurements"))
split_and_measure.add_flow(v_a, measurement_complete)
gains = {0.1, 0.2, 0.16}
for g in gains:
s_f = split_and_measure.execute_primitive(
"MeasureFluorescence",
samples=ready_to_measure,
excitationWavelength=sbol3.Measure(488, tyto.OM.nanometer),
emissionBandpassWavelength=sbol3.Measure(530, tyto.OM.nanometer),
numFlashes=sbol3.Measure(25, tyto.OM.number),
gain=sbol3.Measure(g, tyto.OM.number),
)
v_f = split_and_measure.add_output(
"fluorescence_" + str(g), s_f.output_pin("measurements")
)
split_and_measure.add_flow(v_f, measurement_complete)
s_c = split_and_measure.execute_primitive("Cover", location=od_plate)
split_and_measure.add_flow(measurement_complete, s_c)
split_and_measure.add_flow(s_c, split_and_measure.final())
s_s = split_and_measure.execute_primitive(
"Seal", location=samples, type="http://autoprotocol.org/lids/breathable"
) # need to turn this into a proper ontology
split_and_measure.add_flow(measurement_complete, s_s)
split_and_measure.add_flow(s_s, split_and_measure.final())
print("Measurement sub-protocol construction complete")
overnight_od_measure = labop.Protocol(
"OvernightODMeasure", name="Split samples and measure, without dilution"
)
overnight_od_measure.description = """
Subprotocol to split a portion of each sample in an unsealed plate into another plate, then measure OD and fluorescence from that other plate
"""
doc.add(overnight_od_measure)
# plate for split-and-measure subroutine
od_plate = labop.Container(
name="OD Plate", type=tyto.NCIT.Microplate, max_coordinate="H12"
)
overnight_od_measure.locations = {od_plate}
# Input: collection of samples
samples = overnight_od_measure.add_input(
name="samples",
description="Samples to measure",
type="http://bioprotocols.org/labop#LocatedSamples",
)
# subprotocol steps
s_t = overnight_od_measure.execute_primitive(
"Transfer",
source=samples,
destination=od_plate,
amount=sbol3.Measure(200, tyto.OM.microliter),
)
overnight_od_measure.add_flow(overnight_od_measure.initial(), s_t) # first action
# add the measurements, in parallel
ready_to_measure = labop.Fork()
overnight_od_measure.activities.append(ready_to_measure)
overnight_od_measure.add_flow(s_t.output_pin("samples"), ready_to_measure)
measurement_complete = labop.Join()
overnight_od_measure.activities.append(measurement_complete)
s_a = overnight_od_measure.execute_primitive(
"MeasureAbsorbance",
samples=ready_to_measure,
wavelength=sbol3.Measure(600, tyto.OM.nanometer),
numFlashes=sbol3.Measure(25, tyto.OM.number),
)
v_a = overnight_od_measure.add_output("absorbance", s_a.output_pin("measurements"))
overnight_od_measure.add_flow(v_a, measurement_complete)
gains = {0.1, 0.2, 0.16}
for g in gains:
s_f = overnight_od_measure.execute_primitive(
"MeasureFluorescence",
samples=ready_to_measure,
excitationWavelength=sbol3.Measure(488, tyto.OM.nanometer),
emissionBandpassWavelength=sbol3.Measure(530, tyto.OM.nanometer),
numFlashes=sbol3.Measure(25, tyto.OM.number),
gain=sbol3.Measure(g, tyto.OM.number),
)
v_f = overnight_od_measure.add_output(
"fluorescence_" + str(g), s_f.output_pin("measurements")
)
overnight_od_measure.add_flow(v_f, measurement_complete)
s_c = overnight_od_measure.execute_primitive("Cover", location=od_plate)
overnight_od_measure.add_flow(measurement_complete, s_c)
overnight_od_measure.add_flow(s_c, overnight_od_measure.final())
overnight_od_measure.add_flow(measurement_complete, overnight_od_measure.final())
print("Overnight measurement sub-protocol construction complete")
#############################################
# Now the full protocol
print("Making protocol")
protocol = labop.Protocol("GrowthCurve", name="SD2 Yeast growth curve protocol")
protocol.description = """
Protocol from SD2 Yeast States working group for studying growth curves:
Grow up cells and read with plate reader at n-hour intervals
"""
doc.add(protocol)
# Create the materials to be provisioned
PBS = sbol3.Component("PBS", "https://identifiers.org/pubchem.compound:24978514")
PBS.name = (
"Phosphate-Buffered Saline" # I'd like to get this name from PubChem with tyto
)
doc.add(PBS)
# need to retrieve and convert this one
SC_media = sbol3.Component("SC_Media", "TBD", name="Synthetic Complete Media")
doc.add(SC_media)
SC_plus_dox = sbol3.Component(
"SC_Media_plus_dox", "TBD", name="Synthetic Complete Media plus 40nM Doxycycline"
)
doc.add(SC_plus_dox)
protocol.material += {PBS, SC_media, SC_plus_dox}
## create the containers
# provisioning sources
pbs_source = labop.Container(name="PBS Source", type=tyto.NCIT.Bottle)
sc_source = labop.Container(
name="SC Media + 40nM Doxycycline Source", type=tyto.NCIT.Bottle
)
om_source = labop.Container(name="Overnight SC Media Source", type=tyto.NCIT.Bottle)
# plates for the general protocol
overnight_plate = labop.Container(
name="Overnight Growth Plate", type=tyto.NCIT.Microplate, max_coordinate="H12"
)
overnight_od_plate = labop.Container(
name="Overnight Growth Plate", type=tyto.NCIT.Microplate, max_coordinate="H12"
)
growth_plate = labop.Container(
name="Growth Curve Plate", type=tyto.NCIT.Microplate, max_coordinate="H12"
)
protocol.locations = {pbs_source, sc_source, om_source, overnight_plate, growth_plate}
# One input: a microplate full of strains
# TODO: change this to allow alternative places
strain_plate = protocol.add_input(
name="strain_plate",
description="Plate of strains to grow",
type="http://bioprotocols.org/labop#LocatedSamples",
)
# input_plate = labop.Container(name='497943_4_UWBF_to_stratoes', type=tyto.NCIT.Microplate, max_coordinate='H12')
print("Constructing protocol steps")
# set up the sources
p_pbs = protocol.execute_primitive(
"Provision",
resource=PBS,
destination=pbs_source,
amount=sbol3.Measure(117760, tyto.OM.microliter),
)
protocol.add_flow(protocol.initial(), p_pbs) # start with provisioning
p_om = protocol.execute_primitive(
"Provision",
resource=SC_media,
destination=om_source,
amount=sbol3.Measure(98, tyto.OM.milliliter),
)
protocol.add_flow(protocol.initial(), p_om) # start with provisioning
p_scm = protocol.execute_primitive(
"Provision",
resource=SC_plus_dox,
destination=sc_source,
amount=sbol3.Measure(117200, tyto.OM.microliter),
)
protocol.add_flow(protocol.initial(), p_scm) # start with provisioning
# prep the overnight culture, then seal away the source plate again
s_d = protocol.execute_primitive(
"Dispense",
source=p_om.output_pin("samples"),
destination=overnight_plate,
amount=sbol3.Measure(500, tyto.OM.microliter),
)
s_u = protocol.execute_primitive("Unseal", location=strain_plate)
s_t = protocol.execute_primitive(
"TransferInto",
source=strain_plate,
destination=s_d.output_pin("samples"),
amount=sbol3.Measure(5, tyto.OM.microliter),
mixCycles=sbol3.Measure(10, tyto.OM.number),
)
s_s = protocol.execute_primitive(
"Seal", location=strain_plate, type="http://autoprotocol.org/lids/breathable"
) # need to turn this into a proper ontology
protocol.add_flow(s_u, s_t) # transfer can't happen until strain plate is unsealed ...
protocol.add_flow(s_t, s_s) # ... and must complete before we re-seal it
# run the overnight culture
overnight_samples = s_t.output_pin("samples")
s_s = protocol.execute_primitive(
"Seal", location=overnight_samples, type="http://autoprotocol.org/lids/breathable"
) # need to turn this into a proper ontology
s_i = protocol.execute_primitive(
"Incubate",
location=overnight_samples,
temperature=sbol3.Measure(30, tyto.OM.get_uri_by_term("degree Celsius")),
duration=sbol3.Measure(16, tyto.OM.hour),
shakingFrequency=sbol3.Measure(350, rpm.identity),
)
protocol.add_flow(s_t, s_s) # sealing after transfer
protocol.add_flow(s_s, s_i) # incubation after sealing
# Check the OD after running overnight; note that this is NOT the same measurement process as for the during-growth measurements
s_u = protocol.execute_primitive(
"Unseal", location=overnight_samples
) # added because using the subprotocol leaves a sealed plate
protocol.add_flow(s_i, s_u) # growth plate after measurement
s_m = protocol.execute_subprotocol(overnight_od_measure, samples=overnight_samples)
protocol.add_flow(s_u, s_m) # measurement after incubation and unsealing
# Set up the growth plate
s_d = protocol.execute_primitive(
"Dispense",
source=p_scm.output_pin("samples"),
destination=growth_plate,
amount=sbol3.Measure(700, tyto.OM.microliter),
)
s_t = protocol.execute_primitive(
doc.find("TransferInto"),
source=overnight_samples,
destination=s_d.output_pin("samples"),
amount=sbol3.Measure(2, tyto.OM.microliter),
mixCycles=sbol3.Measure(10, tyto.OM.number),
)
s_s = protocol.execute_primitive(
"Seal", location=overnight_samples, type="http://autoprotocol.org/lids/breathable"
) # need to turn this into a proper ontology
protocol.add_flow(
s_u, s_t
) # transfer can't happen until overnight plate is unsealed ...
protocol.add_flow(s_t, s_s) # ... and must complete before we re-seal it
protocol.add_flow(s_m, s_s) # ... as must its measurement
# run the step-by-step culture
growth_samples = s_t.output_pin("samples")
last_round = None
# sample_hours = [1, 3, 6, 9, 12, 15, 18, 21, 24] # Original: modified to be friendly to human execution
sample_hours = [1, 3, 6, 9, 18, 21, 24]
for i in range(0, len(sample_hours)):
incubation_hours = sample_hours[i] - (sample_hours[i - 1] if i > 0 else 0)
s_i = protocol.execute_primitive(
"Incubate",
location=growth_samples,
temperature=sbol3.Measure(30, tyto.OM.get_uri_by_term("degree Celsius")),
duration=sbol3.Measure(incubation_hours, tyto.OM.hour),
shakingFrequency=sbol3.Measure(350, rpm.identity),
)
s_m = protocol.execute_subprotocol(
split_and_measure, samples=growth_samples, pbs=p_pbs.output_pin("samples")
)
if last_round:
protocol.add_flow(last_round, s_i) # measurement after incubation
protocol.add_flow(s_i, s_m) # measurement after incubation
last_round = s_m
protocol.add_flow(last_round, protocol.final())
print("Protocol construction complete")
######################
# Invocation of protocol on a plate:;
# plate for invoking the protocol
# input_plate = labop.Container(name='497943_4_UWBF_to_stratoes', type=tyto.NCIT.Microplate, max_coordinate='H12')
print("Validating document")
for e in doc.validate().errors:
print(e)
for w in doc.validate().warnings:
print(w)
print("Writing document")
doc.write("test/testfiles/growth_curve.json", "json-ld")
doc.write("test/testfiles/growth_curve.ttl", "turtle")
print("Complete")
|
# Generated by Django 2.2.12 on 2021-04-25 14:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('task', '0009_auto_20210425_1311'),
]
operations = [
migrations.AddField(
model_name='task',
name='image',
field=models.ImageField(default='', upload_to='images/'),
),
]
|
import requests
date = '13990507'
url = 'http://members.tsetmc.com/tsev2/excel/MarketWatchPlus.aspx?d=' + date
r = requests.get(url, allow_redirects=True)
open(date+'.xlsx', 'wb').write(r.content)
|
segundos = int(input("Ingrese una cantidad en segundos: "))
if len(str(segundos)) <=5 and (segundos)>0:
horas = 0
minutos = 0
while segundos > 3600:
horas = horas + 1
segundos = segundos - 3600
while segundos > 60:
minutos = minutos + 1
segundos = segundos - 60
print("Horas:", horas, ". Minutos: ", minutos, ". Segundos: ", segundos, ".")
else:
print("Numero fuera de rango: Mayor de 5 digitos o es negativo")
|
import numpy as np
import scipy.spatial.distance as dist
from permaviss.simplicial_complexes.vietoris_rips import vietoris_rips
def test_vietoris_rips():
X = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
Dist = dist.squareform(dist.pdist(X))
# Expected values
expected_complex = [
4,
np.array([[0, 1], [0, 2], [1, 3], [2, 3], [1, 2], [0, 3]]),
np.array([[0, 1, 3], [0, 2, 3], [1, 2, 3], [0, 1, 2]]),
np.array([[0, 1, 2, 3]]),
np.array([])
]
expected_R = [
np.zeros(4),
np.array([1, 1, 1, 1, np.sqrt(2), np.sqrt(2)]),
np.array([np.sqrt(2), np.sqrt(2), np.sqrt(2), np.sqrt(2)]),
np.array([np.sqrt(2)]),
np.array([])
]
# Calculate vietoris_rips complex
viRip, R = vietoris_rips(Dist, 4, 4)
print(viRip)
assert viRip[0] == expected_complex[0]
for dim, simplices in enumerate(viRip[1:]):
assert np.array_equal(simplices, expected_complex[dim+1])
for dim, rad in enumerate(R):
assert np.array_equal(rad, expected_R[dim])
|
# найти ивывести строки, содержащие двоичную запись числа, кратного 3.
import re
import sys
pattern = r"^((1(01*0)*1|0)*)$"
for line in sys.stdin:
try:
test_line = line.rstrip()
f = re.findall(pattern, test_line)
if f is not [] and f[0][0] == test_line:
print(f[0][0])
except IndexError:
pass |
import torch
import numpy as np
class Resize_preprocess(object):
"""Rescales the input PIL.Image to the given 'size_w,size_h'.
"""
def __init__(self, size_w, size_h):
self.size = (size_w, size_h)
def __call__(self, img):
return img.resize(self.size)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def calculate_mean_std(loader):
# the image should be preprocessed by torch.transform.ToTensor(), so the value is in [0,1]
sum = np.ones(3)
cnt = 0
for datas, _ in loader:
cnt += len(datas)
for data in datas:
data = data.numpy()
sum += data.sum(1).sum(1) / np.prod(data.shape[1:])
mean = sum / cnt
error = np.ones(3)
_mean = mean.reshape([3, 1, 1])
for datas, _ in loader:
cnt += len(datas)
for data in datas:
data = data.numpy()
error += ((data - _mean) ** 2).sum(1).sum(1) / np.prod(data.shape[1:])
std = np.sqrt(error / cnt)
return mean, std
def no_strict_load_state_dict(net, state_dict):
r"""Copies parameters and buffers from :attr:`state_dict` into
this module and its descendants. If :attr:`strict` is ``True``, then
the keys of :attr:`state_dict` must exactly match the keys returned
by this module's :meth:`~torch.nn.Module.state_dict` function.
Arguments:
state_dict (dict): a dict containing parameters and
persistent buffers.
strict (bool, optional): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
"""
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, False, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(net)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import urllib
import re
import time
pre_url = 'http://movie.douban.com/top250?start='
top_urls = []
top_tag = re.compile(r'<span class="title">(.+?)</span>')
top_content = []
top_num = 1
def getHtml2(url2):
html2=urllib.urlopen(url2).read().decode('utf-8')
return html2
def gettopic(html2):
reg2=r'http://www.douban.com/group/topic/\d+'
topiclist = re.findall(reg2,html2)
x=0
for topicurl1 in topiclist:
x+=1
topicurl = topicurl1
return topicurl
def download(topic_page):
reg3=r'http://img3.douban.com/view/group_topic/large/public/.+\.jpg'
imglist = re.findall(reg3,topic_page)
i=1
download_img=None
for imgurl in imglist:
img_numlist = re.findall(r'p\d{7}',imgurl)
for img_num in img_numlist:
download_img = urllib.urlretrieve(imgurl,'/Users/wutaotao/Desktop/书shu/%s.jpg'%img_num)
time.sleep(1)
i+=1
print (imgurl)
return download_img
page_end = int(input('请输入结束时的页码'))
num_end = page_end*25
num=0
page_num=1
while num<=num_end:
html2 = getHtml2('http://www.douban.com/group/kaopulove/discussion?start=%d'%num)
topicurl = gettopic(html2)
topic_page = getHtml2(topicurl)
download_img = download(topic_page)
num = page_num * 25
page_num += 1
else:
print ('采集成功!')
|
#!/usr/bin/env python
# pylint: disable=I0011,C0103,C0326
import os.path
import shutil
import dill
# Languages are a tuple with a full name and a short name.
# LANGUAGES = [("Haskell", "hs"), ("Javascript", "js"), ("MATLAB", "m")]
LANGUAGES = [("Cpp", "cpp"), ("R", "r"), ("Rcpp", "rcpp")]
# The ligatures dict associated to each language full name a list of substitution tuples.
# A substitution tuple consists of a tuple of glyph names to match, a function from the
# glyph names to the substituted glyph, and a string of additional ignores. Most glyphs are
# named according to the characters they replace, hence we use `"_".join`, but others are
# given by a constant function of the form `lambda xs: "glyph_name"`.
LIGATURES = {
"Haskell": [
(("asterisk", "asterisk", "asterisk"), "_".join,
" ignore sub slash asterisk' asterisk asterisk;\n"
" ignore sub asterisk' asterisk asterisk slash;\n"),
(("colon", "colon", "colon"), "_".join, ""),
(("equal", "equal", "equal"), lambda xs: "equivalence_3", ""),
(("equal", "equal", "greater"), "_".join, ""),
(("equal", "less", "less"), "_".join, ""),
(("greater", "equal", "greater"), "_".join, ""),
(("greater", "greater", "equal"), "_".join, ""),
(("greater", "greater", "greater"), "_".join, ""),
(("greater", "greater", "hyphen"), "_".join, ""),
(("hyphen", "greater", "greater"), "_".join, ""),
(("hyphen", "less", "less"), "_".join, ""),
(("less", "asterisk", "greater"), "_".join, ""),
(("less", "bar", "greater"), "_".join, ""),
(("less", "dollar", "greater"), "_".join, ""),
(("less", "equal", "greater"), "_".join, ""),
(("less", "equal", "less"), "_".join, ""),
(("less", "hyphen", "greater"), "_".join, ""),
(("less", "plus", "greater"), "_".join, ""),
(("less", "less", "less"), "_".join, ""),
(("period", "period", "period"), "_".join, ""),
(("plus", "plus", "plus"), "_".join, ""),
(("asterisk", "greater"), "_".join, ""),
(("backslash", "backslash"), "_".join, ""),
(("bar", "bar"), "_".join, ""),
(("bar", "greater"), "_".join, ""),
(("colon", "colon"), "_".join, ""),
(("equal", "equal"), lambda xs: "equal_2", ""),
(("equal", "greater"), "_".join, ""),
(("exclam", "exclam"), "_".join, ""),
(("greater", "greater"), "_".join, ""),
(("greater", "hyphen"), "_".join, ""),
(("hyphen", "greater"), "_".join, ""),
(("hyphen", "less"), "_".join, ""),
(("less", "asterisk"), "_".join, ""),
(("less", "greater"), "_".join, ""),
(("less", "bar"), "_".join, ""),
(("less", "hyphen"), "_".join, ""),
(("less", "less"), "_".join, ""),
(("period", "period"), "_".join, ""),
(("plus", "plus"), "_".join, ""),
(("slash", "equal"), lambda xs: "not_equal_2", "")],
"Javascript": [
(("equal", "equal", "equal"), lambda xs: "equivalence_3", ""),
(("exclam", "equal", "equal"), lambda xs: "not_equivalence_3", ""),
(("greater", "greater", "greater"), "_".join, ""),
(("less", "less", "less"), "_".join, ""),
(("bar", "bar"), "_".join, ""),
(("equal", "equal"), lambda xs: "equal_2", ""),
(("equal", "greater"), "_".join, ""),
(("exclam", "equal"), lambda xs: "not_equal_2", ""),
(("greater", "greater"), "_".join, ""),
(("less", "less"), "_".join, ""),
(("plus", "plus"), "_".join, "")],
"MATLAB": [
(("equal", "equal"), lambda xs: "equal_2", ""),
(("asciitilde", "equal"), lambda xs: "not_equal_2", "")],
"Cpp": [
(("less", "equal"), lambda xs: "less_inequality", ""),
(("greater", "equal"), lambda xs: "greater_inequality", ""),
(("ampersand", "ampersand"), "_".join, ""),
(("hyphen", "greater"), "_".join, ""),
(("less", "hyphen"), "_".join, ""),
(("colon", "colon"), "_".join, ""),
(("bar", "bar"), "_".join, ""),
(("slash", "slash"), "_".join, ""),
(("slash", "asterisk"), "_".join, ""),
(("asterisk", "slash"), "_".join, ""),
(("equal", "equal"), lambda xs: "equivalence_2", ""),
(("exclam", "equal"), lambda xs: "not_equivalence_2", ""),
(("greater", "greater"), "_".join, ""),
(("less", "less"), "_".join, ""),
(("plus", "plus"), "_".join, "")],
"R": [
(("period", "period", "period"), "_".join, ""),
(("colon", "colon", "colon"), "_".join, ""),
(("less", "less", "hyphen"), "_".join, ""),
(("less", "equal"), lambda xs: "less_inequality", ""),
(("greater", "equal"), lambda xs: "greater_inequality", ""),
(("ampersand", "ampersand"), "_".join, ""),
(("hyphen", "greater"), "_".join, ""),
(("less", "hyphen"), "_".join, ""),
(("colon", "colon"), "_".join, ""),
(("bar", "bar"), "_".join, ""),
(("numbersign", "numbersign"), "_".join, ""),
(("equal", "equal"), lambda xs: "equivalence_2", ""),
(("exclam", "equal"), lambda xs: "not_equivalence_2", "")],
"Rcpp": [
(("equal", "equal", "equal"), lambda xs: "equal_3", ""),
(("exclam", "equal", "equal"), lambda xs: "not_equal_3", ""),
(("period", "period", "period"), "_".join, ""),
(("colon", "colon", "colon"), "_".join, ""),
(("less", "equal", "greater"), "_".join, ""),
(("less", "less", "hyphen"), "_".join, ""),
(("asciitilde", "equal"), lambda xs: "approx_2", ""),
(("less", "equal"), lambda xs: "less_inequality", ""),
(("greater", "equal"), lambda xs: "greater_inequality", ""),
(("ampersand", "ampersand"), "_".join, ""),
(("hyphen", "greater"), "_".join, ""),
(("less", "hyphen"), "_".join, ""),
(("colon", "colon"), "_".join, ""),
(("bar", "bar"), "_".join, ""),
(("numbersign", "numbersign"), "_".join, ""),
(("slash", "slash"), "_".join, ""),
(("slash", "asterisk"), "_".join, ""),
(("asterisk", "slash"), "_".join, ""),
(("equal", "equal"), lambda xs: "equivalence_2", ""),
(("equal", "greater"), "_".join, ""),
(("exclam", "equal"), lambda xs: "not_equivalence_2", ""),
(("greater", "greater"), "_".join, ""),
(("less", "less"), "_".join, ""),
(("plus", "plus"), "_".join, "")],
}
# Unused ignores from Hasklig:
# ("slash", "asterisk") :
# " ignore sub slash' asterisk slash;\n"
# " ignore sub asterisk slash' asterisk;\n"
# ("asterisk", "slash") :
# " ignore sub slash asterisk' slash;\n"
# " ignore sub asterisk' slash asterisk;\n"
# ("asterisk", "asterisk") :
# " ignore sub slash asterisk' asterisk;\n"
# " ignore sub asterisk' asterisk slash;\n")
ROMAN_WEIGHTS = ["Black", "Bold", "ExtraLight", "Light", "Medium", "Regular", "Semibold"]
ITALIC_WEIGHTS = ["BlackIt", "BoldIt", "ExtraLightIt", "LightIt", "MediumIt", "It", "SemiboldIt"]
def to_rule(substitution_tuple):
glyphs, replace_function, ignores = substitution_tuple
length = len(glyphs)
rule = ""
# the rule starts out as a template with 0 standing for the combined glyph
# and 1.. standing for the individual glyphs that are replaced
if length == 2:
rule = (" lookup 1_2 {\n"
" ignore sub 1 1' 2;\n"
" ignore sub 1' 2 2;\n"
+ ignores +
" sub LIG 2' by 0;\n"
" sub 1' 2 by LIG;\n"
" } 1_2;\n\n")
elif length == 3:
rule = (" lookup 1_2_3 {\n"
" ignore sub 1 1' 2 3;\n"
" ignore sub 1' 2 3 3;\n"
+ ignores +
" sub LIG LIG 3' by 0;\n"
" sub LIG 2' 3 by LIG;\n"
" sub 1' 2 3 by LIG;\n"
" } 1_2_3;\n\n")
elif length == 4:
rule = (" lookup 1_2_3_4 {\n"
" ignore sub 1 1' 2 3 4;\n"
" ignore sub 1' 2 3 4 4;\n"
+ ignores +
" sub LIG LIG LIG 4' by 0;\n"
" sub LIG LIG 3' 4 by LIG;\n"
" sub LIG 2' 3 4 by LIG;\n"
" sub 1' 2 3 4 by LIG;\n"
" } 1_2_3_4;\n\n")
# since replacement glyph names can contain digits, perform the individual
# substitutions first
for i in range(0, length):
rule = rule.replace(str(i+1), glyphs[i])
rule = rule.replace(str(0), replace_function(glyphs))
return rule
def generate_config_file(languages):
with open('config.dill', 'w') as f:
dill.dump({"LANGUAGES": languages,
"LIGATURES": LIGATURES,
"ITALIC_WEIGHTS": ITALIC_WEIGHTS,
"ROMAN_WEIGHTS": ROMAN_WEIGHTS,
}, f)
def generate_ligature_files(languages):
for full_name, short_name in languages:
with open("ligatures.%s.fea" % short_name, 'w') as f:
f.write("feature calt {\n")
f.writelines([to_rule(ligature) for ligature in LIGATURES[full_name]])
f.write("} calt;\n")
def generate_fontinfo_files(languages):
fontinfo_file = "fontinfo.plist"
generic_file = "fontinfo.generic.plist"
for weight in ROMAN_WEIGHTS:
generic_info_file = "Roman/%s/font.ufo/%s" % (weight, generic_file)
if not os.path.exists(generic_info_file):
basic_info_file = "Roman/%s/font.ufo/%s" % (weight, fontinfo_file)
shutil.copy(basic_info_file, generic_info_file)
with open(generic_info_file, 'r') as f:
generic_info = f.read()
for full_name, short_name in languages:
language_info_file = "Roman/%s/font.ufo/fontinfo.%s.plist" % (weight, short_name)
with open(language_info_file, 'w') as f:
f.write(generic_info.replace("SemanticCode", "Semantic%s" % full_name))
for weight in ITALIC_WEIGHTS:
generic_info_file = "Italic/%s/font.ufo/%s" % (weight, generic_file)
if not os.path.exists(generic_info_file):
basic_info_file = "Italic/%s/font.ufo/%s" % (weight, fontinfo_file)
shutil.copy(basic_info_file, generic_info_file)
with open(generic_info_file, 'r') as f:
generic_info = f.read()
for full_name, short_name in languages:
language_info_file = "Italic/%s/font.ufo/fontinfo.%s.plist" % (weight, short_name)
with open(language_info_file, 'w') as f:
f.write(generic_info.replace("SemanticCode", "Semantic%s" % full_name))
def generate_fontmenunamedbs(languages):
with open('FontMenuNameDB.generic', 'r') as f:
fontmenunamedb = f.read()
for full_name, short_name in languages:
with open('FontMenuNameDB.%s' % short_name, 'w') as f:
f.write(fontmenunamedb.replace("SemanticCode", "Semantic%s" % full_name))
if __name__ == "__main__":
enabled = LANGUAGES
print "Configuring Semantic Fonts..."
print " Languages enabled: %s" % ", ".join([fn for fn, sn in enabled])
generate_config_file(enabled)
generate_fontinfo_files(enabled)
generate_fontmenunamedbs(enabled)
generate_ligature_files(enabled)
print "Done"
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
创建时间:Sun Aug 5 09:42:55 2018
作者: 星空飘飘
平台:Anaconda 3-5.1.0
语言版本:Python 3.6.4
编辑器:Spyder
分析器:Pandas: 0.22.0
解析器:lxml: 4.1.1
数据库:MongoDB 2.6.12
程序名:autologinzdiao.py
登陆中调网 http://www.zdiao.com/
模拟浏览器登录输入验证码10后自动登录获取cookie
通过id定位元素:find_element_by_id(“id_vaule”)
通过name定位元素:find_element_by_name(“name_vaule”)
通过tag_name定位元素:find_element_by_tag_name(“tag_name_vaule”)
通过class_name定位元素:find_element_by_class_name(“class_name”)
通过css定位元素:find_element_by_css_selector();用css定位是比较灵活的
通过xpath定位元素:find_element_by_xpath(“xpath”)
通过link定位:find_element_by_link_text(“text_vaule”)或find_element_by_partial_link_text()*
"""
from selenium import webdriver
import time
import requests
from lxml import etree
browser_opt = webdriver.ChromeOptions() # 设置是否开启浏览器
browser_opt.set_headless()
browser = webdriver.Chrome() # 此参数不开启浏览器 chrome_options=browser_opt webdriver.Chrome(chrome_options=browser_opt)
browser.set_page_load_timeout(10) # 防止页面加载个没完等待时间10s
def get_cookie():
# 获取cookie
login_url = 'http://www.zdiao.com/login.asp'
browser.get(login_url) # 打开登录网页
user = browser.find_element_by_id('username') # 审查元素username的id
user.clear() # 清空用户栏中内容
user.send_keys("xxx") # 输入账号
passwd = browser.find_element_by_id('password') # 审查元素username的id
passwd.clear()
passwd.send_keys("xxx") # 输入密码
yan = browser.find_element_by_id("yan") # 审查元素username的id
yan.clear()
time.sleep(10) # 等待10秒输入
# yan_code = input('输入验证码:')
# yan.send_keys(yan_code) # 输入验证码
bt = browser.find_element_by_name('bt') # name定位元素
bt.click() # 点击登录
browser.get('http://www.zdiao.com/') # 打开首页
# browser.execute_script("return navigator.userAgent") # 查看User-Agent
cookie = "; ".join([item["name"] + "=" + item["value"] for item in browser.get_cookies()]) # 取得cookie 复制到headers = {'Cookie': 'landcode=AB7CAC13%2D9045%2D4DE2%2D9562%2D551598732FDE; ASPSESSIONIDQACBDDTC=GLMABDEDMPHEJOHCMLHAPLCM; Hm_lvt_7ddacf66134d1ba13d31392486ada51e=1537186916; Hm_lpvt_7ddacf66134d1ba13d31392486ada51e=1537186926'}
# browser.get_cookies() # 查看cookie
browser.page_source # 获取登录后网页源码
return cookie
def check_cookie():
# 验证获取的cookie是否正常登陆
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.4467.400 QQBrowser/10.0.424.400',
'Cookie': cookie}
url = 'http://www.zdiao.com/u/member.asp'
response = requests.get(url, headers=headers)
response.encoding = response.apparent_encoding # 识别编码
html = response.text
page = etree.HTML(html)
title = page.xpath('/html/body/div[1]/div[11]/a/text()')
print(title)
cookie = get_cookie()
browser.quit() # 关闭浏览器
check_cookie()
|
from kafka import KafkaProducer
import sys
import time
import os
f=list(open('/home/student1/streamingGc/dataset/foil13/TAXI_sample_new_3.csv'))
producer= KafkaProducer(bootstrap_servers=['localhost:9092'])
prev=None
counter=0
i=0
modCounter = 0
while(i<int(sys.argv[1])):
s= f[i]
bt = s.split(',',1)[0]
s=s.split(',',1)[1]
if(not prev):
prev = bt
if(prev==bt):
#counter+=1
producer.send('sample'+str(modCounter), value=s.encode('utf-8'))
#modCounter = (modCounter+1)%4
#print(s)
else:
#print(bt,prev,int(bt)-int(prev),counter)
counter += 1
if(counter == sys.argv[3]):
#time.sleep((int(bt)-int(prev))/sys.argv[2])
counter = 0
producer.send('sample'+str(modCounter), value=s.encode('utf-8'))
#modCounter = (modCounter+1)%4
#producer.flush()
i+=1
prev = bt
#time.sleep(2)
print(i)
producer.close()
|
"""Custom template tags."""
from datetime import datetime
from django import template
from django.template import Context, Template
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from modoboa.core import signals as core_signals
register = template.Library()
@register.simple_tag
def join(items, sep=","):
res = ""
for k, v in list(items.items()):
if res != "":
res += sep
res += "%s : '%s'" % (k, v)
return res
@register.simple_tag
def tolist(values):
return mark_safe("[%s]" % ",".join(['"%s"' % v for v in values]))
@register.simple_tag
def alert(msg, typ):
t = Template("""<div class="alert alert-{{ type }}" role="alert">
<button type="button" class="close" data-dismiss="alert"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
{{ msg }}
</div>""") # NOQA:E501
return t.render(Context({"type": typ, "msg": msg}))
@register.simple_tag
def render_link(linkdef, mdclass=""):
t = Template("""<a href="{{ link.url }}" name="{{ link.name }}" title="{{ link.title }}"
{% if link.modal %}data-toggle="ajaxmodal{% if link.autowidth %}-autowidth{% endif %}"{% endif %}
{% if link.modalcb %}modalcb="{{ link.modalcb }}"{% endif %}
{% if link.closecb %}closecb="{{ link.closecb }}"{% endif %}
class="{{ mdclass }}{% if link.class %} {{ link.class }}{% endif %}"
{% if link.confirm %} onclick="return confirm('{{ link.confirm }}')"{% endif %}
{% for attr, value in link.extra_attributes.items %} {{ attr }}="{{ value }}"{% endfor %}
>
{% if link.img %}<i class="{{ link.img }}"></i>{% endif %}
{{ link.label }}</a>""") # NOQA:E501
return t.render(Context({"link": linkdef, "mdclass": mdclass}))
@register.simple_tag
def progress_color(value):
value = int(value)
if value < 50:
return "progress-bar progress-bar-info"
if value < 80:
return "progress-bar progress-bar-warning"
return "progress-bar progress-bar-danger"
@register.filter
def fromunix(value):
return datetime.fromtimestamp(int(value))
@register.simple_tag
def render_tags(tags):
t = Template("""{% for tag in tags %}
<span class="label label-{% if tag.color %}{{ tag.color }}{% else %}default{% endif %}">
<a href="#" class="filter {{ tag.type }}" name="{{ tag.name }}">{{ tag.label }}</a>
</span>
{% endfor %}
""") # NOQA:E501
return t.render(Context({"tags": tags}))
@register.simple_tag
def extra_static_content(caller, st_type, user):
"""Get extra static content from extensions.
:param str caller: the application (location) responsible for the call
:param str st_type: content type (css or js)
:param ``User`` user: connected user
"""
tpl = template.Template(
"{% for sc in static_content %}{{ sc|safe }}{% endfor %}"
)
static_content = core_signals.extra_static_content.send(
sender="extra_static_content",
caller=caller, st_type=st_type, user=user)
static_content = [result[1] for result in static_content]
return tpl.render(
template.Context({"static_content": static_content})
)
@register.filter(name="localize_header_name")
def localize_header_name(headername):
""" Localizes the header names """
names = {
"From": _("From"),
"To": _("To"),
"Date": _("Date"),
"Subject": _("Subject")
}
return names.get(headername, headername)
|
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.request import CommonRequest
import random
from common import cache_
def send_code(phone):
#生成code
code_set = set()
while len(code_set) < 4:
code_set.add(str(random.randint(0.9)))
code = ''.join(code_set)
#保存code到缓存中,-redis
cache_.save_code()
#发送短信
pass
def valid_code(phone, code):
#从缓存中读取phone中的code(发送的code)
#判断输入的code和缓存中的code是否相同
code_cache = cache_.get_code(phone)
return code_cache == code |
#!/usr/bin/env python3
import RPi.GPIO as GPIO
import socket
import sys
import time
port = 3
GPIO.setmode(GPIO.BCM)
GPIO.setup(port, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
HOST = '10.42.0.1' # later: identify automatically the ip (fast implementation)
PORT = 80
bytes_encoding = 'utf-8'
print(GPIO.input(port))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # if the socket is already open, reutilizes.
s.bind((HOST,PORT))
s.listen()
print("PyServer listening {0}:{1}".format(HOST,PORT))
conn, addr = s.accept()
with conn:
print("Connected by: ", addr)
once = False
while True:
state = GPIO.input(port)
if not state and not once:
time.sleep(0.1)
try:
data = str(1)
datab = bytes(data, bytes_encoding)
conn.sendall(datab)
print("Sent: {0} - {1} bytes ({2})".format(data, sys.getsizeof(data), datab))
except (BrokenPipeError, ConnectionResetError):
print("Client disconnected")
break
once = True
elif state and once:
once = False
time.sleep(0.1)
|
from rest_framework import serializers
from .models import *
from django.contrib.auth.models import User
class KlientS(serializers.ModelSerializer):
class Meta:
model = Klient
fields = ['Imie', 'Nazwisko', 'Telefon','Kod_pocztowy', 'Adres', 'Miasto']
class Dane_firmyS(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Dane_firmy
fields = ['Nip','Nazwa_firmy', 'Kod_pocztowy', 'Adres', 'Miasto', 'klient','owner']
class PersonelS(serializers.ModelSerializer):
class Meta:
model = Personel
fields = ['Imie', 'Nazwisko', 'Pesel','Grupa']
class ZleceniaS(serializers.ModelSerializer):
class Meta:
model = Zlecenia
fields = [ 'Zlecenie', 'Zaplacone', 'klient']
class ObecnoscS(serializers.ModelSerializer):
class Meta:
model = Obecnosc
fields = ['Dzien', 'Obecnosc', 'osoba']
class UserSerializer(serializers.ModelSerializer):
Klient = serializers.PrimaryKeyRelatedField(many=True, queryset=Klient.objects.all())
class Meta:
model = User
fields = ['id', 'username', 'Zlecenie'] |
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
tc.manual_seed(1)
x_train=tc.FloatTensor([[1],[2],[3]])
y_train=tc.FloatTensor([[2],[4],[6]])
W=tc.zeros(1,requires_grad=True)
print(W)
b=tc.zeros(1,requires_grad=True)
print(b)
hypothesis=x_train*W+b
print(hypothesis)
cost=tc.mean((hypothesis-y_train)**2)
print(cost)
optimizer=optim.SGD([W,b],lr=0.01)
nb_epochs=2000
for epoch in range(nb_epochs+1):
hypothesis=x_train*W+b
cost=tc.mean((hypothesis-y_train)**2)
optimizer.zero_grad()
cost.backward()
optimizer.step()
if epoch%100==0:
print('Epoch{:4d}/{}W:{:.3f},b:{:.3f}Cost:{:.6f}'.format(epoch, nb_epochs,W.item(),b.item(),cost.item()))
|
from datetime import datetime
from http import HTTPStatus
from modules.db import get_db_data, set_db_data, bulk_client_update, bulk_service_update
import MySQLdb
import configparser
import itertools
import json
import logging
import numpy as np
import os
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import re
import requests
import socket
import time
from flask import Flask, render_template, request, send_from_directory, make_response, request
logging.basicConfig(handlers=[logging.FileHandler('/var/www/pow/pow.log', 'a', 'utf-8')],
level=logging.INFO)
# Read config and parse constants
config = configparser.ConfigParser()
config.read('/var/www/pow/config.ini')
app = Flask(__name__, template_folder='/var/www/pow/templates')
POW_KEY = config.get('webhooks', 'POW_KEY')
AUTHORIZED_IPS = config.get('webhooks', 'ips').split(',')
pow_count_call = "SELECT count(request_id) FROM pow_requests WHERE time_requested >= NOW() - INTERVAL 24 HOUR"
pow_ratio_call = ("SELECT pow_type, count(pow_type) FROM pow_requests "
"WHERE time_requested >= NOW() - INTERVAL 24 HOUR "
"GROUP BY pow_type order by pow_type ASC")
service_count_call = "SELECT count(service_id) FROM service_list"
unlisted_service_call = "SELECT count(service_id) FROM service_list where service_name is null"
client_count_call = "SELECT count(client_id) FROM client_list"
client_ratio_call = ("SELECT client_type, count(client_type) FROM client_list "
"GROUP BY client_type order by client_type ASC")
new_account_call = ("SELECT COUNT(*) FROM distributed_pow.pow_requests "
"WHERE new_account = 1 AND time_requested >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR")
services_24hr_call = ("SELECT "
"(SELECT round(avg(service_count),0) FROM service_connection_log "
"WHERE inserted_ts >= NOW() - INTERVAL 1 DAY) - "
"(SELECT round(avg(service_count),0) FROM service_connection_log "
"WHERE inserted_ts < NOW() - interval 1 DAY and inserted_ts >= NOW() - interval 2 day)")
clients_24hr_call = ("SELECT "
"(SELECT round(avg(client_count),0) FROM client_connection_log "
"WHERE inserted_ts >= NOW() - INTERVAL 1 DAY) - "
"(SELECT round(avg(client_count),0) FROM client_connection_log "
"WHERE inserted_ts < NOW() - interval 1 DAY and inserted_ts >= NOW() - interval 2 day)")
work_24hr_call = ("SELECT "
"(SELECT count(pow_type) FROM pow_requests WHERE time_requested >= NOW() - INTERVAL 1 DAY) - "
"(SELECT count(pow_type) FROM pow_requests WHERE time_requested < NOW() - interval 1 DAY "
"and time_requested >= NOW() - interval 2 day)")
services_call = ("SELECT t1.service_name, t1.service_web, t2.pow FROM "
"(SELECT service_id, service_name, service_web FROM service_list) AS t1 "
"LEFT JOIN (SELECT service_id, count(service_id) AS pow FROM "
"pow_requests group by service_id) AS t2 "
"ON t1.service_id = t2.service_id "
"WHERE t1.service_name != 'null'"
"ORDER BY pow desc")
clients_call = ("SELECT distinct client_address, client_precache_count, client_demand_count, sum(client_precache_count + client_demand_count) "
"FROM distributed_pow.client_list "
"GROUP BY client_address, client_precache_count, client_demand_count "
"ORDER BY sum(client_precache_count + client_demand_count) DESC;")
client_type_call = ("SELECT DISTINCT client_address, client_type "
"FROM distributed_pow.client_list ORDER BY client_address DESC;")
hour_p_call = ("SELECT t1.pow_date, t2.pow_type, t2.total FROM "
"(SELECT date_format(time_requested, '%Y-%m-%d %H') as pow_date, count(*) as total "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY pow_date order by pow_date asc) as t1 "
"LEFT JOIN "
"(SELECT date_format( time_requested, '%Y-%m-%d %H') as pow_date, pow_type, count(*) as total "
"FROM pow_requests "
"WHERE pow_type = 'P' "
"AND date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY pow_date, pow_type order by pow_date asc) as t2 "
"on t1.pow_date = t2.pow_date "
"ORDER BY t1.pow_date ASC")
hour_o_call = ("SELECT t1.pow_date, t2.pow_type, t2.total FROM "
"(SELECT date_format( time_requested, '%Y-%m-%d %H') as pow_date, count(*) as total "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY pow_date order by pow_date asc) as t1 "
"LEFT JOIN "
"(SELECT date_format( time_requested, '%Y-%m-%d %H') as pow_date, pow_type, count(*) as total "
"FROM pow_requests WHERE pow_type = 'O' "
"AND date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY pow_date, pow_type order by pow_date asc) as t2 "
"on t1.pow_date = t2.pow_date "
"ORDER BY t1.pow_date ASC")
day_p_call = ("SELECT t1.pow_date, t2.pow_type, t2.total FROM "
"(SELECT date_format( time_requested, '%Y-%m-%d' ) as pow_date, count(*) as total "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d') >= CURRENT_TIMESTAMP() - INTERVAL 1 MONTH "
"GROUP BY pow_date order by pow_date asc) as t1 "
"LEFT JOIN "
"(SELECT date_format( time_requested, '%Y-%m-%d' ) as pow_date, pow_type, count(*) as total "
"FROM pow_requests WHERE pow_type = 'P' "
"AND date_format(time_requested, '%Y-%m-%d') >= CURRENT_TIMESTAMP() - INTERVAL 1 MONTH "
"GROUP BY pow_date, pow_type order by pow_date asc) as t2 "
"on t1.pow_date = t2.pow_date "
"ORDER BY t1.pow_date ASC")
day_o_call = ("SELECT t1.pow_date, t2.pow_type, t2.total FROM "
"(SELECT date_format( time_requested, '%Y-%m-%d' ) as pow_date, count(*) as total "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d') >= CURRENT_TIMESTAMP() - INTERVAL 1 MONTH "
"GROUP BY pow_date order by pow_date asc) as t1 "
"LEFT JOIN "
"(SELECT date_format( time_requested, '%Y-%m-%d' ) as pow_date, pow_type, count(*) as total "
"FROM pow_requests WHERE pow_type = 'O' "
"AND date_format(time_requested, '%Y-%m-%d') >= CURRENT_TIMESTAMP() - INTERVAL 1 MONTH "
"GROUP BY pow_date, pow_type order by pow_date asc) as t2 "
"on t1.pow_date = t2.pow_date "
"ORDER BY t1.pow_date ASC")
minute_p_call = ("SELECT t1.pow_date, t2.pow_type, t2.total FROM "
"(SELECT date_format( time_requested, '%Y-%m-%d %H:%i' ) as pow_date, count(*) as total "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H:%i') >= CURRENT_TIMESTAMP() - INTERVAL 60 MINUTE "
"GROUP BY pow_date order by pow_date asc) as t1 "
"LEFT JOIN "
"(SELECT date_format( time_requested, '%Y-%m-%d %H:%i' ) as pow_date, pow_type, count(*) as total"
" FROM pow_requests WHERE pow_type = 'P' "
"AND date_format(time_requested, '%Y-%m-%d %H:%i') >= CURRENT_TIMESTAMP() - INTERVAL 60 MINUTE "
"GROUP BY pow_date, pow_type order by pow_date asc) as t2 "
"on t1.pow_date = t2.pow_date "
"ORDER BY t1.pow_date ASC")
minute_o_call = ("SELECT t1.pow_date, t2.pow_type, t2.total FROM "
"(SELECT date_format( time_requested, '%Y-%m-%d %H:%i' ) as pow_date, count(*) as total "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H:%i') >= CURRENT_TIMESTAMP() - INTERVAL 60 MINUTE "
"GROUP BY pow_date order by pow_date asc) as t1 "
"LEFT JOIN "
"(SELECT date_format( time_requested, '%Y-%m-%d %H:%i' ) as pow_date, pow_type, count(*) as total"
" FROM pow_requests WHERE pow_type = 'O' "
"AND date_format(time_requested, '%Y-%m-%d %H:%i') >= CURRENT_TIMESTAMP() - INTERVAL 60 MINUTE "
"GROUP BY pow_date, pow_type order by pow_date asc) as t2 "
"on t1.pow_date = t2.pow_date "
"ORDER BY t1.pow_date ASC")
pow_day_total_call = ("SELECT date_format( time_requested, '%Y-%m-%d' ), count(*) "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d') >= CURRENT_TIMESTAMP() - INTERVAL 1 MONTH "
"GROUP BY date_format( time_requested, '%Y-%m-%d' ) "
"ORDER BY date_format( time_requested, '%Y-%m-%d' ) ASC")
pow_hour_total_call = ("SELECT date_format( time_requested, '%Y-%m-%d %H' ), count(*) "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY date_format( time_requested, '%Y-%m-%d %H' ) "
"ORDER BY date_format( time_requested, '%Y-%m-%d %H' ) ASC")
pow_minute_total_call = ("SELECT date_format( time_requested, '%Y-%m-%d %H:%i' ), count(*) "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H:%i') >= "
"CURRENT_TIMESTAMP() - INTERVAL 60 MINUTE "
"GROUP BY date_format( time_requested, '%Y-%m-%d %H:%i' ) "
"ORDER BY date_format( time_requested, '%Y-%m-%d %H:%i' ) ASC")
avg_p_time_call = ("SELECT t1.time_req, t2.pow_type, t2.avg_time "
"FROM "
"(SELECT date_format(time_requested, '%Y-%m-%d %H') as time_req "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY time_req) t1 "
"LEFT JOIN "
"(SELECT date_format(time_requested, '%Y-%m-%d %H') as time_req, pow_type, "
"avg(timediff(time_responded, time_requested)) as avg_time "
"FROM pow_requests "
"WHERE pow_type = 'P' "
"AND date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY date_format(time_requested, '%Y-%m-%d %H'), pow_type) t2 "
"ON t1.time_req = t2.time_req "
"ORDER BY t1.time_req ASC;")
avg_o_time_call = ("SELECT t1.time_req, t2.pow_type, t2.avg_time "
"FROM "
"(SELECT date_format(time_requested, '%Y-%m-%d %H') as time_req "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY time_req) t1 "
"LEFT JOIN "
"(SELECT date_format(time_requested, '%Y-%m-%d %H') as time_req, pow_type, "
"avg(timediff(time_responded, time_requested)) as avg_time "
"FROM pow_requests "
"WHERE pow_type = 'O' "
"AND date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY date_format(time_requested, '%Y-%m-%d %H'), pow_type) t2 "
"ON t1.time_req = t2.time_req "
"ORDER BY t1.time_req ASC;")
avg_combined_call = ("SELECT date_format( time_requested, '%Y-%m-%d %H' ), "
"avg(timediff(time_responded, time_requested)) "
"FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR "
"GROUP BY date_format( time_requested, '%Y-%m-%d %H' )")
avg_overall_call = ("SELECT avg(timediff(time_responded, time_requested)) FROM pow_requests "
"WHERE date_format(time_requested, '%Y-%m-%d %H') >= CURRENT_TIMESTAMP() - INTERVAL 24 HOUR")
avg_difficulty_call = ("SELECT avg(pow_difficulty) FROM pow_requests "
"WHERE time_requested >= NOW() - INTERVAL 30 MINUTE")
avg_requests_call = ("SELECT date_format( time_requested, '%Y-%m-%d' ), count(request_id) FROM pow_requests "
"WHERE time_requested >= NOW() - INTERVAL 1 MONTH "
"GROUP BY date_format( time_requested, '%Y-%m-%d' )")
def auth_check(ip, request_json):
if ip not in AUTHORIZED_IPS:
return 'IP Authorization Error', False
if 'api_key' not in request_json:
return 'API Key not provided', False
if request_json['api_key'] != POW_KEY:
return 'API Key invalid', False
return '', True
@app.route("/")
@app.route("/index")
def index():
# Get current POW count
pow_count_data = get_db_data(pow_count_call)
pow_count = int(pow_count_data[0][0])
# Get POW type ratio
on_demand_count = 0
precache_count = 0
pow_ratio_data = get_db_data(pow_ratio_call)
for pow in pow_ratio_data:
if pow[0] == 'O':
on_demand_count = pow[1]
elif pow[0] == 'P':
precache_count = pow[1]
if pow_count > 0:
on_demand_ratio = round((on_demand_count / pow_count) * 100, 1)
precache_ratio = round((precache_count / pow_count) * 100, 1)
else:
on_demand_ratio = 0
precache_ratio = 0
# Get service count
service_count_data = get_db_data(service_count_call)
service_count = int(service_count_data[0][0])
# Get unlisted / listed services
unlisted_service_data = get_db_data(unlisted_service_call)
unlisted_services = int(unlisted_service_data[0][0])
listed_services = service_count - unlisted_services
# Get client count
client_count_data = get_db_data(client_count_call)
client_count = int(client_count_data[0][0])
# Client Ratio
client_both = 0
client_urgent = 0
client_precache = 0
client_ratio_data = get_db_data(client_ratio_call)
if client_count > 0:
for clients in client_ratio_data:
if clients[0] == 'P':
client_precache = int(clients[1])
elif clients[0] == 'B':
client_both = int(clients[1])
elif clients[0] == 'O':
client_urgent = int(clients[1])
client_both_ratio = round((client_both / client_count) * 100, 1)
client_precache_ratio = round((client_precache / client_count) * 100, 1)
client_urgent_ratio = round((client_urgent / client_count) * 100, 1)
else:
client_both_ratio = 0
client_precache_ratio = 0
client_urgent_ratio = 0
new_account_data = get_db_data(new_account_call)
new_account_ratio = round(int(new_account_data[0][0]) / pow_count * 100, 1)
# Get 24hr differences
services_24hr_data = get_db_data(services_24hr_call)
services_24hr = services_24hr_data[0][0]
if services_24hr is None:
services_24hr = 0
clients_24hr_data = get_db_data(clients_24hr_call)
clients_24hr = clients_24hr_data[0][0]
if clients_24hr is None:
clients_24hr = 0
work_24hr_data = get_db_data(work_24hr_call)
work_24hr = work_24hr_data[0][0]
# Get info for Services section
services_table = get_db_data(services_call)
unlisted_services_call = "SELECT count(service_id) FROM service_list where service_name is null"
unlisted_services_data = get_db_data(unlisted_services_call)
unlisted_count = unlisted_services_data[0][0]
unlisted_pow_call = ("SELECT count(request_id) FROM pow_requests WHERE service_id in "
"(SELECT service_id FROM service_list WHERE service_name is null)")
unlisted_pow_data = get_db_data(unlisted_pow_call)
unlisted_pow = unlisted_pow_data[0][0]
# Get info for Clients section
clients_temp_table = get_db_data(clients_call)
clients_table = []
client_type_table = get_db_data(client_type_call)
client_type_dict = {}
for client in client_type_table:
if client[0] in client_type_dict:
client_type_dict[client[0]] += client[1]
else:
client_type_dict[client[0]] = client[1]
for row in clients_temp_table:
newrow = list(row)
newrow.append(client_type_dict[newrow[0]])
clients_table.append(newrow)
logging.info("row: {}".format(newrow))
logging.info(clients_table)
# Get info for POW charts
day_total = get_db_data(pow_day_total_call)
hour_total = get_db_data(pow_hour_total_call)
minute_total = get_db_data(pow_minute_total_call)
day_precache = get_db_data(day_p_call)
day_ondemand = get_db_data(day_o_call)
hour_precache = get_db_data(hour_p_call)
hour_ondemand = get_db_data(hour_o_call)
minute_precache = get_db_data(minute_p_call)
minute_ondemand = get_db_data(minute_o_call)
avg_p_time = get_db_data(avg_p_time_call)
avg_o_time = get_db_data(avg_o_time_call)
avg_combined_time = get_db_data(avg_combined_call)
avg_overall_data = get_db_data(avg_overall_call)
avg_requests_data = get_db_data(avg_requests_call)
total_requests = 0
count_requests = 0
for row in avg_requests_data:
total_requests += row[1]
count_requests += 1
requests_avg = int(total_requests / count_requests)
if avg_overall_data[0][0] is not None:
avg_overall = round(float(avg_overall_data[0][0]), 1)
else:
avg_overall = 0
avg_difficulty_data = get_db_data(avg_difficulty_call)
if avg_difficulty_data[0][0] is not None:
avg_difficulty = round(avg_difficulty_data[0][0], 1)
else:
avg_difficulty = 1.0
return render_template('index.html', pow_count=pow_count, on_demand_ratio=on_demand_ratio,
precache_ratio=precache_ratio, service_count=service_count, client_count=client_count,
client_both_ratio=client_both_ratio, client_precache_ratio=client_precache_ratio,
client_urgent_ratio=client_urgent_ratio, listed_services=listed_services,
unlisted_services=unlisted_services, services_24hr=services_24hr, clients_24hr=clients_24hr,
work_24hr=work_24hr, services_table=services_table, unlisted_count=unlisted_count,
unlisted_pow=unlisted_pow, clients_table=clients_table, day_total=day_total,
hour_total=hour_total, minute_total=minute_total, day_ondemand=day_ondemand,
day_precache=day_precache, hour_ondemand=hour_ondemand, hour_precache=hour_precache,
minute_ondemand=minute_ondemand, minute_precache=minute_precache, avg_p_time=avg_p_time,
avg_overall=avg_overall, avg_combined_time=avg_combined_time, avg_difficulty=avg_difficulty,
requests_avg=requests_avg, avg_o_time=avg_o_time, new_account_ratio=new_account_ratio)
@app.route('/get_updates', methods=["GET"])
def return_data():
# request_json = request.get_json()
# logging.info(request.args.get('load_time'))
# Get total count of requests
# update_pow_call = ("SELECT count(request_id) FROM pow_requests "
# "WHERE time_requested >= {} - INTERVAL 24 HOUR").format(json['load_time'])
# logging.info(update_pow_call)
# pow_count_data = get_db_data(pow_count_call)
# updated_pow_count = int(pow_count_data[0][0])
# Get POW type ratio
# updated_on_demand_count = 0
# updated_precache_count = 0
# updated_pow_ratio_data = get_db_data(pow_ratio_call)
# for pow in updated_pow_ratio_data:
# if pow[0] == 'O':
# updated_on_demand_count = pow[1]
# elif pow[0] == 'P':
# updated_precache_count = pow[1]
# if updated_pow_count > 0:
# updated_on_demand_ratio = round((updated_on_demand_count / updated_pow_count) * 100, 1)
# updated_precache_ratio = round((updated_precache_count / updated_pow_count) * 100, 1)
# else:
# updated_on_demand_ratio = 0
# updated_precache_ratio = 0
# work_24hr_data = get_db_data(work_24hr_call)
# work_24hr = work_24hr_data[0][0]
# response = {
# 'pow_count': updated_pow_count,
# 'on_demand_ratio': updated_on_demand_ratio,
# 'precache_ratio': updated_precache_ratio,
# 'work_24hr': work_24hr
# }
# response_json = json.dumps(response)
return ''
@app.route('/pow_update', methods=["POST"])
def pow_update():
request_json = request.get_json()
ip = request.remote_addr
error_msg, auth = auth_check(ip, request_json)
if auth is False:
logging.info("{}: {} made a bad POST request to POW Update: {}".format(datetime.now(), ip, error_msg))
return error_msg, HTTPStatus.BAD_REQUEST
if auth is True:
time_requested = datetime.strptime(request_json['time_requested'], "%Y-%m-%d %H:%M:%S.%f")
time_responded = datetime.strptime(request_json['time_responded'], "%Y-%m-%d %H:%M:%S.%f")
if request_json['is_new_account'] is True:
new_account = 1
else:
new_account = 0
pow_call = ("INSERT INTO pow_requests (request_id, service_id, client_id, pow_type, pow_difficulty, "
"new_account, time_requested, time_responded) "
"VALUES ('{}', '{}', '{}', '{}', {}, {}, '{}', '{}')".format(request_json['request_id'],
request_json['service_id'],
request_json['client_id'],
request_json['pow_type'],
request_json['pow_difficulty'],
new_account,
time_requested,
time_responded))
set_db_data(pow_call)
return 'POW Inserted', HTTPStatus.OK
@app.route('/client_update', methods=["POST"])
def update_client():
request_json = request.get_json()
ip = request.remote_addr
error_msg, auth = auth_check(ip, request_json)
if auth is False:
logging.info("{}: {} made a bad POST request to Client Update: {}".format(datetime.now(), ip, error_msg))
return error_msg, HTTPStatus.BAD_REQUEST
if auth is True:
client_list = request_json['clients']
if len(client_list) == 0:
delete_client_table = "DELETE FROM client_list"
set_db_data(delete_client_table)
return 'Clients Updated', HTTPStatus.OK
for client in client_list:
if client['client_type'].upper() not in ['B', 'P', 'O']:
return 'Invalid client_type for client_id: {}'.format(client['client_id']), HTTPStatus.BAD_REQUEST
bulk_client_update(client_list)
client_log_call = ("INSERT INTO client_connection_log (client_count) VALUES ({})".format(len(client_list)))
set_db_data(client_log_call)
return 'Clients Updated', HTTPStatus.OK
return 'No action taken', HTTPStatus.OK
@app.route('/service_update', methods=["POST"])
def update_services():
request_json = request.get_json()
ip = request.remote_addr
error_msg, auth = auth_check(ip, request_json)
if auth is False:
logging.info("{}: {} made a bad POST request to Service Update: {}".format(datetime.now(), ip, error_msg))
return error_msg, HTTPStatus.BAD_REQUEST
if auth is True:
services_list = request_json['services']
bulk_service_update(services_list)
service_log_call = ("INSERT INTO service_connection_log (service_count) VALUES ({})".format(len(services_list)))
set_db_data(service_log_call)
return 'Services Updated', HTTPStatus.OK
return 'No action taken', HTTPStatus.OK
if __name__ == "__main__":
app.run()
|
#######Prepare QGIS for working#############################################################################################################################
############################################################################################################################################################
from qgis.core import QgsProcessing
from qgis.core import QgsProcessingAlgorithm
from qgis.core import QgsProcessingMultiStepFeedback
from qgis.core import QgsProcessingParameterFeatureSink
import processing
class Modelo4a(QgsProcessingAlgorithm):
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSink('Fixgeo_wlds', 'fixgeo_wlds', type=QgsProcessing.TypeVectorAnyGeometry, createByDefault=True, supportsAppend=True, defaultValue=None))
self.addParameter(QgsProcessingParameterFeatureSink('Fixgeo_countries', 'fixgeo_countries', type=QgsProcessing.TypeVectorAnyGeometry, createByDefault=True, supportsAppend=True, defaultValue=None))
self.addParameter(QgsProcessingParameterFeatureSink('Intersection', 'intersection', type=QgsProcessing.TypeVectorAnyGeometry, createByDefault=True, defaultValue=None))
def processAlgorithm(self, parameters, context, model_feedback)
feedback = QgsProcessingMultiStepFeedback(4, model_feedback)
results = {}
outputs = {}
############################################################################################################################################################
#######Fix geometries of shapfile imported from model 1#######
#######Use this tool to fix any polygon geometry-related problem (sometimes polygons may stack one on top of the other)#######
alg_params = {
'INPUT': '/Users/rochipodesta/Desktop/maestría/Herramientas/semana 5/output/clean.shp',
'OUTPUT': parameters['Fixgeo_wlds']
}
outputs['CorregirGeometrasWlds'] = processing.run('native:fixgeometries', alg_params, context=context, feedback=feedback, is_child_algorithm=True)
results['Fixgeo_wlds'] = outputs['CorregirGeometrasWlds']['OUTPUT']
feedback.setCurrentStep(1)
if feedback.isCanceled():
return {}
#######Fix geometries of new shapfile imported#######
#######Use this tool to fix any polygon geometry-related problem (sometimes polygons may stack one on top of the other)#######
alg_params = {
'INPUT': '/Users/rochipodesta/Desktop/maestría/Herramientas/semana 5/input/ne_10m_admin_0_countries/ne_10m_admin_0_countries.dbf',
'OUTPUT': parameters['Fixgeo_countries']
}
outputs['CorregirGeometrasCountries'] = processing.run('native:fixgeometries', alg_params, context=context, feedback=feedback, is_child_algorithm=True)
results['Fixgeo_countries'] = outputs['CorregirGeometrasCountries']['OUTPUT']
feedback.setCurrentStep(2)
if feedback.isCanceled():
return {}
####### Intersect both shapefiles into a new layer and save it. Obtain countries that are contained in both shapefiles #######
######## Select which fields you want to keep and intersect from each dataset#######
alg_params = {
'INPUT': outputs['CorregirGeometrasWlds']['OUTPUT'],
'INPUT_FIELDS': ['GID'],
'OVERLAY': outputs['CorregirGeometrasCountries']['OUTPUT'],
'OVERLAY_FIELDS': ['ADMIN'],
'OVERLAY_FIELDS_PREFIX': '',
'OUTPUT': parameters['Intersection']
}
outputs['Interseccin'] = processing.run('native:intersection', alg_params, context=context, feedback=feedback, is_child_algorithm=True)
results['Intersection'] = outputs['Interseccin']['OUTPUT']
feedback.setCurrentStep(3)
if feedback.isCanceled():
return {}
####### Obtain the number of times that 1 country is present in the intersected attribute table#######
alg_params = {
'CATEGORIES_FIELD_NAME': ['ADMIN'],
'INPUT': outputs['Interseccin']['OUTPUT'],
'OUTPUT': '/Users/rochipodesta/Desktop/maestría/Herramientas/semana 5/output/languages_by_country.gpkg',
'VALUES_FIELD_NAME': '',
'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT
}
outputs['EstadsticasPorCategoras'] = processing.run('qgis:statisticsbycategories', alg_params, context=context, feedback=feedback, is_child_algorithm=True)
return results
def name(self):
return 'modelo4a'
def displayName(self):
return 'modelo4a'
def group(self):
return ''
def groupId(self):
return ''
def createInstance(self):
return Modelo4a()
|
# 150. Evaluate Reverse Polish Notation
class Solution:
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
import operator as ops
operands = []
opes = {'+': ops.add,
'-': ops.sub,
'*':ops.mul,
'/':ops.floordiv}
for term in tokens:
if term in opes:
right, left = operands.pop(), operands.pop()
res = opes[term](left,right)
if term == '/':
if res < 0:
res = -(-left//right)
operands.append(res)
else:
operands.append(int(term))
return operands[0]
|
"""\
Usage: python ChouFas_predictor.py <fasta_file>
Options:
<fasta_file> protein FASTA file
--help print help message
"""
import sys
from time import sleep
...
# Chou-Fasman Amino Acids Propensities Value For Helix
helix = {"A":1.45, "C":0.77,"D":0.98,"E":1.53,"F":1.12,"G":0.53,"H":1.24,"I":1.00,"K":1.07,
"L":1.34,"M":1.20,"N":0.73,"P":0.59,"Q":1.17,"R":0.79,"S":0.79,"T":0.82,"V":1.14,"W":1.14,"Y":0.61}
# Chou-Fasman Amino Acids Propensities Value For B-Sheet
beta = {"A":0.97, "C":1.30,"D":0.80,"E":0.26,"F":1.28,"G":0.81,"H":0.71,"I":1.60,"K":0.74,
"L":1.22,"M":1.67,"N":0.65,"P":0.62,"Q":1.23,"R":0.90,"S":0.72,"T":1.20,"V":1.65,"W":1.19,"Y":1.29}
# Chou-Fasman Amino Acids Propensities Value For B-Turn
turn = {"A":0.66, "C":1.19,"D":1.46,"E":0.74,"F":0.60,"G":1.56,"H":0.95,"I":0.47,"K":1.01,
"L":0.59,"M":0.60,"N":1.56,"P":1.52,"Q":0.98,"R":0.95,"S":1.43,"T":0.96,"V":0.50,"W":0.96,"Y":1.14}
fi = {"A":0.060, "C":0.149,"D":0.147,"E":0.056,"F":0.059,"G":0.102,"H":0.140,"I":0.043,"K":0.055,
"L":0.061,"M":0.068,"N":0.161,"P":0.102,"Q":0.074,"R":0.070,"S":0.120,"T":0.086,"V":0.062,"W":0.077,"Y":0.082}
fi1 = {"A":0.076, "C":0.053,"D":0.110,"E":0.060,"F":0.041,"G":0.085,"H":0.047,"I":0.034,"K":0.115,
"L":0.025,"M":0.082,"N":0.083,"P":0.301,"Q":0.098,"R":0.106,"S":0.139,"T":0.108,"V":0.048,"W":0.013,"Y":0.065}
fi2 = {"A":0.035, "C":0.117,"D":0.179,"E":0.077,"F":0.065,"G":0.190,"H":0.093,"I":0.013,"K":0.072,
"L":0.036,"M":0.014,"N":0.191,"P":0.034,"Q":0.037,"R":0.099,"S":0.125,"T":0.065,"V":0.028,"W":0.064,"Y":0.114}
fi3 = {"A":0.058, "C":0.128,"D":0.081,"E":0.064,"F":0.065,"G":0.152,"H":0.054,"I":0.056,"K":0.095,
"L":0.070,"M":0.055,"N":0.091,"P":0.068,"Q":0.098,"R":0.085,"S":0.106,"T":0.079,"V":0.053,"W":0.167,"Y":0.125}
# Function Progress Bar
def progress(message):
for i in range(51):
sys.stdout.write('\r')
sys.stdout.write(message + "[%-50s] %d%%" % ('#'*i, 2*i))
sys.stdout.flush()
sleep(0.03)
# Function for Beta-Turn Prediction
def turn_predictor(seq):
new_seq = seq
for i in range(len(seq)):
if (i+4) <= len(seq):
pi = ((turn.get(seq[i])+turn.get(seq[i+1])+turn.get(seq[i+2])+turn.get(seq[i+3]))/4)
pt = (fi.get(seq[i])*fi1.get(seq[i+1])*fi2.get(seq[i+2])*fi3.get(seq[i+3]))
p_alpha = ((helix.get(seq[i])+helix.get(seq[i+1])+helix.get(seq[i+2])+helix.get(seq[i+3]))/4)
p_beta = ((beta.get(seq[i])+beta.get(seq[i+1])+beta.get(seq[i+2])+beta.get(seq[i+3]))/4)
if pt > 0.000075 and pi > 1.00 and p_alpha < pi > p_beta:
new_seq = new_seq.replace(seq[i:i+4], "~~~~")
for x in new_seq:
if x != "~":
new_seq = new_seq.replace(x, "-")
new_seq = new_seq.replace("~", "T")
return(new_seq)
# Function for Helix Prediction
def helix_predictor(seq):
probable_helix_index=[]
temp_seq1 = ""
prot_helix_prob = []
prot_beta_prob = []
for i in range(0,len(seq)):
prot_helix_prob.append(helix.get(seq[i]))
prot_beta_prob.append(beta.get(seq[i]))
if helix.get(seq[i]) > 1:
temp_seq1 += "H"
elif helix.get(seq[i]) < 1:
temp_seq1 += "-"
nucl_index = [i for i in range(len(temp_seq1)) if temp_seq1.startswith("HHHH", i)]
break_index = [i for i in range(len(temp_seq1)) if temp_seq1.startswith("----", i)]
for nucl in nucl_index:
n1 = nucl
n6 = nucl+6
for brk in break_index:
if brk > nucl and brk > n6:
sum_in_index = brk - n6
if n1-sum_in_index < 0:
probable_helix_index.append([0,n6+sum_in_index])
else:
probable_helix_index.append([n1-sum_in_index,n6+sum_in_index])
elif brk < nucl:
sum_in_index = n1-brk-1
if n6+sum_in_index > len(seq):
probable_helix_index.append([n1-sum_in_index,len(seq)])
else:
probable_helix_index.append([n1-sum_in_index,n6+sum_in_index])
if break_index == []:
probable_helix_index.append([0,len(seq)])
probable_helix_index_drm = []
for x in probable_helix_index:
if x not in probable_helix_index_drm:
probable_helix_index_drm.append(x)
probable_helix_index_f1 = []
for data in probable_helix_index_drm:
if "----" not in temp_seq1[data[0]:data[1]]:
probable_helix_index_f1.append(data)
for data in probable_helix_index_f1:
p_alpha = sum(prot_helix_prob[data[0]:data[1]]) / len(prot_helix_prob[data[0]:data[1]])
p_beta = sum(prot_beta_prob[data[0]:data[1]]) / len(prot_beta_prob[data[0]:data[1]])
if p_alpha > 1.03 and p_alpha > p_beta:
seq = seq.replace(seq[data[0]:data[1]],len(seq[data[0]:data[1]])*"~")
for x in seq:
if x != "~":
seq = seq.replace(x, "-")
seq = seq.replace("~", "H")
return(seq)
# Function for B-sheet Prediction
def sheet_predictor(seq):
probable_beta_index=[]
temp_seq1 = ""
prot_helix_prob = []
prot_beta_prob = []
for i in range(0,len(seq)):
prot_helix_prob.append(helix.get(seq[i]))
prot_beta_prob.append(beta.get(seq[i]))
if beta.get(seq[i]) > 1:
temp_seq1 += "B"
elif beta.get(seq[i]) < 1:
temp_seq1 += "-"
nucl_index = [i for i in range(len(temp_seq1)) if temp_seq1.startswith("BBB", i)]
break_index = [i for i in range(len(temp_seq1)) if temp_seq1.startswith("----", i)]
for nucl in nucl_index:
n1 = nucl
n3 = nucl+3
n5 = nucl+5
for brk in break_index:
if brk > nucl and brk > n3:
sum_in_index = brk - n3
if n1-sum_in_index < 0:
probable_beta_index.append([0,n3+sum_in_index])
else:
probable_beta_index.append([n1-sum_in_index,n3+sum_in_index])
elif brk < nucl:
sum_in_index = n1-brk-1
if n3+sum_in_index > len(seq):
probable_beta_index.append([n1-sum_in_index,len(seq)])
else:
probable_beta_index.append([n1-sum_in_index,n3+sum_in_index])
elif brk > nucl and brk > n5:
sum_in_index = brk - n5
if n1-sum_in_index < 0:
probable_beta_index.append([0,n5+sum_in_index])
else:
probable_beta_index.append([n1-sum_in_index,n5+sum_in_index])
elif brk < nucl:
sum_in_index = n1-brk-1
if n5+sum_in_index > len(seq):
probable_beta_index.append([n1-sum_in_index,len(seq)])
else:
probable_beta_index.append([n1-sum_in_index,n5+sum_in_index])
if break_index == []:
probable_beta_index.append([0,len(seq)])
probable_beta_index_drm = []
for x in probable_beta_index:
if x not in probable_beta_index_drm:
probable_beta_index_drm.append(x)
probable_beta_index_f1 = []
for data in probable_beta_index_drm:
if "----" not in temp_seq1[data[0]:data[1]]:
probable_beta_index_f1.append(data)
for data in probable_beta_index_f1:
p_alpha = sum(prot_helix_prob[data[0]:data[1]]) / len(prot_helix_prob[data[0]:data[1]])
p_beta = sum(prot_beta_prob[data[0]:data[1]]) / len(prot_beta_prob[data[0]:data[1]])
if p_beta > 1.05 and p_beta > p_alpha:
seq = seq.replace(seq[data[0]:data[1]],len(seq[data[0]:data[1]])*"~")
for x in seq:
if x != "~":
seq = seq.replace(x, "-")
seq = seq.replace("~", "B")
return(seq)
# Function to remove Overlaps
def grouper(olist):
prev = None
group = []
for item in olist:
if not prev or item - prev <= 15:
group.append(item)
else:
yield group
group = [item]
prev = item
if group:
yield group
def overlap(seq1, seq2, seq3):
seq1 = list(seq1)
seq2 = list(seq2)
seq3 = list(seq3)
nseq = ""
overlist = []
ilist = []
for x in range(0,len(seq1)):
if seq1[x] == "H" and seq2[x] == "B":
overlist.append(x)
nseq += "-"
elif seq1[x] == "H" and seq2[x] =="-":
nseq += "H"
elif seq1[x] == "-" and seq2[x] =="B":
nseq += "B"
elif seq1[x] == "-" and seq2[x] =="-":
nseq +="-"
nseq = list(nseq)
nseq1 = []
for i in range(0,len(seq3)):
if seq3[i] == "T":
nseq1.append("T")
else:
nseq1.append(nseq[i])
nseq2 = "".join(nseq1)
a = (list(grouper(overlist)))
overlap_index = []
for data in a:
if len(data) > 1:
overlap_index.append([data[0],data[-1]])
else:
overlap_index.append([data[0]])
prot_helix_prob = []
prot_beta_prob = []
for i in range(0,len(seq)):
prot_helix_prob.append(helix.get(seq[i]))
prot_beta_prob.append(beta.get(seq[i]))
for data in overlap_index:
if len(data) > 1:
p_alpha = sum(prot_helix_prob[data[0]:data[1]]) / len(prot_helix_prob[data[0]:data[1]])
p_beta = sum(prot_beta_prob[data[0]:data[1]]) / len(prot_beta_prob[data[0]:data[1]])
if p_alpha > p_beta:
nseq2 = nseq2.replace(nseq2[data[0]:data[1]],len(nseq2[data[0]:data[1]])*"H")
elif p_beta > p_alpha:
nseq2 = nseq2.replace(nseq2[data[0]:data[1]],len(nseq2[data[0]:data[1]])*"B")
else:
pass
nseq3 = []
for i in range(0,len(seq3)):
if seq3[i] == "T":
nseq3.append("T")
else:
nseq3.append(nseq2[i])
nseq4 = "".join(nseq3)
return(nseq4)
# argv[1] is not defined
if len(sys.argv) == 1:
print (__doc__)
# argv[1] is --help
elif sys.argv[1] == "--help":
print (__doc__)
# argv[1] is a fasta file
else:
# reading the fasta file
print("\nParsing Protein Sequence...\n")
file_f = sys.argv[1]
fasta_file = open(file_f)
fasta_rec = fasta_file.readlines()
# separating header from sequence
header =""
seq =""
for line in fasta_rec:
if line[0:1] == ">":
header = line
else:
seq += line.strip()
print("Sequence Information")
print("Sequence Origin: "+header.strip()[1:])
print("Sequence Length:",len(seq),"\n")
print("Job: Predicting Protein Secondary Structure Using Chou-Fasman Algorithm\n")
# Running Everything
print("|-------------------{ RUNNING CHOU-FASMAN ALGORITHM }-------------------|\n")
print("Predicting Helical Segments...")
print("Condition: { P_alpha > 1.03 and P_alpha > P_beta}\n")
progress("Helix Prediction | ")
h = helix_predictor(seq)
print("\nHelix Prediction | Done\n")
print("Predicting B-sheet Segments...")
print("Condition: { P_beta > 1.05 and P_beta > P_alpha}\n")
progress("B-sheet Prediction | ")
s = sheet_predictor(seq)
print("\nB-sheet Prediction | Done\n")
print("Predicting β-Turn Segments...")
print("Conditions: Pt > 0.000075 and pi > 1.00 and P_Helix < Pi > P_Beta-Sheet")
print("Where: Pt = f(i)*f(i+1)*f(i+2)*f(i+3) & Pi = the average value of tetrapeptide for P(Turn)\n")
progress("B-turn Prediction | ")
t = turn_predictor(seq)
print("\nB-turn Prediction | Done\n")
print("|-------------------{ EXITING CHOU-FASMAN ALGORITHM }-------------------|\n")
print("Output: Protein Sequence With Predicted Alpha-Helical Segments, Beta-Sheet Segments and Beta-Turns...\n")
try:
fseq = overlap(h,s,t)
fseq = fseq.replace("H",('\033[31m'+"H"+'\033[0m'))
fseq = fseq.replace("B",('\033[34m'+"B"+'\033[0m'))
fseq = fseq.replace("T",('\033[33m'+"T"+'\033[0m'))
fseq = fseq.replace("-",('\033[32m'+"-"+'\033[0m'))
print(fseq+"\n")
except:
fseq = overlap(h,s,t)
print(fseq+"\n")
...
# python ChouFas_predictor.py <fasta_file>
### Reference:
### Prevelige, P. Jr. and Fasman, G.D., "Chou-Fasman Prediction of the
### Secondary Structure of Proteins," in Prediction of Protein Structure
### and The Priniciples of Protein Conformation (Fasman, G.D., ed.)
### Plenum Press, New York, pp. 391-416 (1989).
|
import cv2
import numpy as np
import argparse
import imutils
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image',required=True,help='Path to the image')
args=vars(ap.parse_args())
image=cv2.imread(args['image'])
cv2.imshow('Original',image)
(h,w)=image.shape[:2]
center=(w/2,h/2)
M=cv2.getRotationMatrix2D(center,45,1.0)
rotated=cv2.warpAffine(image,M,(w,h))
cv2.imshow('Rotated by 45 degrees',rotated)
M=cv2.getRotationMatrix2D(center,-90,1.0)
rotated=cv2.warpAffine(image,M,(w,h))
cv2.imshow('Rotated by -90 degrees',rotated)
rotated=imutils.rotate(image,180)
cv2.imshow('Rotated by 180 degrees',rotated)
cv2.waitKey(0)
|
from rest_framework import permissions
class IsAdmin(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_admin
class IsApothecary(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.role == 'apothecary'
class IsDoctor(permissions.BasePermission):
def has_permission(self, request, view):
print(request.user.role)
return request.user.role == 'doctor'
class IsApothecaryOrIsDoctor(permissions.BasePermission):
def has_permission(self, request, view):
return
|
import matplotlib.pyplot as plt
import math
import numpy as np
y = []
p_range = np.arange(0.00001,0.99999,0.00001)
gamma_range = np.arange(0.1,3.0,0.3)
for gamma in gamma_range:
y.append([])
for p in p_range:
q = 1.0 - p
weighted_p = math.exp(-1*(math.pow(math.log(1.0/p),gamma)))
y[-1].append(weighted_p)
plt.figure()
plt.title("Prelec-I",fontsize=24)
plt.xlabel("p",fontsize=20)
plt.ylabel("Prelec(p)",fontsize=20)
for i,gamma in enumerate(gamma_range):
plt.plot(p_range,y[i],label="$\gamma$=%.2f"%(gamma))
plt.legend(fontsize=18)
plt.show()
|
#-*- coding=utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo, DataRequired
from wtforms import ValidationError
from ..models import User
from flask import session
class LoginForm(FlaskForm):
email = StringField(u'邮箱', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField(u'密码', validators=[DataRequired()])
remember_me = BooleanField(u'保持登录', default=True)
recaptcha = StringField(
u'验证码', validators=[DataRequired(message=u'验证码不能为空')])
submit = SubmitField(u'登陆')
# def validate_recaptcha(self, field):
# if session.get('S_RECAPTCHA') != field.data.upper():
# raise ValidationError(u'验证码错误')
class RegistrationForm(FlaskForm):
invitecode = StringField(u'邀请码-可有可无')
email = StringField(u'邮箱', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField(u'用户名', validators=[
DataRequired(), Length(1, 64), Regexp(u'^[A-Za-z][A-Za-z0-9_.]*$', 0,
'用户名由字母、数字、下划线组成')])
password = PasswordField(u'密码', validators=[
DataRequired(), EqualTo('password2', message=u'重复密码必须相同')])
password2 = PasswordField(u'确认密码', validators=[DataRequired()])
recaptcha = StringField(
u'验证码', validators=[DataRequired(message=u'验证码不能为空')])
submit = SubmitField(u'注册')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError(u'邮箱已经被注册')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError(u'用户名已经被注册')
# def validate_recaptcha(self, field):
# if session.get('S_RECAPTCHA') != field.data.upper():
# raise ValidationError(u'验证码错误')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField(u'旧密码', validators=[DataRequired()])
password = PasswordField(u'新密码', validators=[
DataRequired(), EqualTo('password2', message=u'重复密码必须相同')])
password2 = PasswordField(u'确认新密码', validators=[DataRequired()])
submit = SubmitField(u'更新密码')
class PasswordResetRequestForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
recaptcha = StringField(
u'验证码', validators=[DataRequired(message=u'验证码不能为空')])
submit = SubmitField(u'重设密码')
# def validate_recaptcha(self, field):
# if session.get('S_RECAPTCHA') != field.data.upper():
# raise ValidationError(u'验证码错误')
class PasswordResetForm(FlaskForm):
email = StringField(u'邮箱', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField(u'新密码', validators=[
DataRequired(), EqualTo('password2', message=u'密码必须相同')])
password2 = PasswordField(u'确认新密码', validators=[DataRequired()])
recaptcha = StringField(
u'验证码', validators=[DataRequired(message=u'验证码不能为空')])
submit = SubmitField(u'重设密码')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError(u'未知邮箱')
# def validate_recaptcha(self, field):
# if session.get('S_RECAPTCHA') != field.data.upper():
# raise ValidationError(u'验证码错误')
class ConfirmForm(FlaskForm):
email = StringField(u'邮箱', validators=[DataRequired(), Length(1, 64),
Email()])
recaptcha = StringField(
u'验证码', validators=[DataRequired(message=u'验证码不能为空')])
submit = SubmitField(u'提交')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError(u'未知邮箱')
# def validate_recaptcha(self, field):
# if session.get('S_RECAPTCHA') != field.data.upper():
# raise ValidationError(u'验证码错误')
|
from django.urls import path, include
from .views import FavouriteProducts, add_favourite_product
urlpatterns = [
path('favourite-products', FavouriteProducts.as_view(),name='favourite-products'),
path('add_favourite_product', add_favourite_product,name='add_favourite_product'),
]
|
#
# @lc app=leetcode.cn id=515 lang=python3
#
# [515] 在每个树行中找最大值
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def largestValues(self, root: Optional[TreeNode]) -> List[int]:
# """
# 深度优先搜索:前序遍历
# """
# res = []
# def dfs(root, depth):
# if not root:
# return
# # 初始化res[depth]的值
# if depth == len(res):
# res.append(root.val)
# else:
# # 更新res[depth]的值
# res[depth] = max(res[depth], root.val)
# # 前序遍历递归
# dfs(root.left, depth + 1)
# dfs(root.right, depth + 1)
# dfs(root, 0)
# return res
"""
广度优先搜索:队列保存当前层的所有节点
"""
if not root:
return []
res = []
# 初始化第一层节点
queue = [root]
while queue:
max_tmp = float("-inf")
q_tmp = queue
queue = []
# 遍历当前层的所有节点,并更新当前层的最大值
for node in q_tmp:
max_tmp = max(max_tmp, node.val)
# 初始化下一层的所有节点至queue中
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
res.append(max_tmp)
return res
# @lc code=end
|
# Script to create RBAC roles-to-rights map for IEEE 1547-2018 points. RBAC roles and rights are created based on the
# recommendations in J. Johnson, “Recommendations for Distributed Energy Resource Access Control,”
# Sandia Technical Report SAND2021-0977, 2021.
#
# Comments to jjohns2@sandia.gov
import json
import os
read_path = os.getcwd() + os.path.sep + 'json'
write_path = os.getcwd() + os.path.sep + 'rbac_json'
# print(read_path)
rbac_roles = [
'der_owner',
'installer',
'der_vendor_or_service_provider',
'3rd_party_or_aggregator',
'utility_or_dso',
'iso_rto_tso',
'security_administrator',
'security_auditor',
'rbac_administrator',
]
rbac_no_write = {'der_owner': 'R',
'installer': 'R',
'der_vendor_or_service_provider': 'R',
'3rd_party_or_aggregator': 'R',
'utility_or_dso': 'R',
'iso_rto_tso': 'R',
'security_administrator': 'R',
'security_auditor': 'R',
'rbac_administrator': 'R'}
rbac_read = {'der_owner': 'R',
'installer': 'R',
'der_vendor_or_service_provider': 'R',
'3rd_party_or_aggregator': 'R',
'utility_or_dso': 'R',
'iso_rto_tso': 'R',
'security_administrator': '',
'security_auditor': '',
'rbac_administrator': ''}
rbac_install_and_control = {'der_owner': 'R',
'installer': 'RW',
'der_vendor_or_service_provider': 'R',
'3rd_party_or_aggregator': 'RW',
'utility_or_dso': 'RW',
'iso_rto_tso': 'RW',
'security_administrator': '',
'security_auditor': '',
'rbac_administrator': ''}
rbac_grid_control = {'der_owner': '',
'installer': 'R',
'der_vendor_or_service_provider': 'R',
'3rd_party_or_aggregator': 'RW',
'utility_or_dso': 'RW',
'iso_rto_tso': 'R',
'security_administrator': '',
'security_auditor': '',
'rbac_administrator': ''}
rbac_ride_through = {'der_owner': '',
'installer': 'RW',
'der_vendor_or_service_provider': 'R',
'3rd_party_or_aggregator': 'RW',
'utility_or_dso': 'RW',
'iso_rto_tso': 'R',
'security_administrator': '',
'security_auditor': '',
'rbac_administrator': ''}
def add_rbac(model, pt):
# Don't give access to Modbus registers that contain SunSpec Modbus functionality
if pt.get('name') == 'ID' or pt.get('name') == 'L' or pt.get('label') == 'Pad':
pt['rbac'] = rbac_no_write
# Don't adjust the scale factors for any points - assume these are fixed for the DER lifetime
if pt.get('name')[-3:] == '_SF':
pt['rbac'] = rbac_no_write
else:
if model == 1: # Common
pt['rbac'] = rbac_read
if model == 701:
pt['rbac'] = rbac_read
if model == 702: # DERCapacity
if pt.get('name')[-3:] == 'Rtg' or pt.get('name') == 'CtrlModes' or pt.get('name') == 'IntIslandCat':
pt['rbac'] = rbac_read
else: # Configuration/Settings
pt['rbac'] = rbac_install_and_control
if model == 703: # DEREnterService
if pt.get('name') == 'ES':
pt['rbac'] = {'der_owner': 'R',
'installer': 'RW',
'der_vendor_or_service_provider': 'R',
'3rd_party_or_aggregator': 'R',
'utility_or_dso': 'RW',
'iso_rto_tso': 'RW',
'security_administrator': '',
'security_auditor': '',
'rbac_administrator': ''}
else:
pt['rbac'] = rbac_grid_control
if model == 704: # DERCtlAC
pt['rbac'] = rbac_grid_control
if model == 705: # DERVoltVar
pt['rbac'] = rbac_grid_control
if model == 706: # DERVoltWatt
pt['rbac'] = rbac_grid_control
# DERTripLV, DERTripHV, DERTripLF, DERTripHF
if model == 707 or model == 708 or model == 709 or model == 710:
pt['rbac'] = rbac_ride_through
if model == 711: # DERFreqDroop
pt['rbac'] = rbac_grid_control
if model == 712: # DERWattVar
pt['rbac'] = rbac_grid_control
if model == 713: # DERMeasureDC
pt['rbac'] = rbac_read
print(pt)
def main():
# Only for the IEEE 1547 models right now
for model in [1] + list(range(701, 714, 1)):
with open(read_path + os.path.sep + 'model_%s.json' % model) as f:
data = json.load(f)
# print(json.dumps(data, indent=4, sort_keys=True))
print('-' * 40)
print('Model: %s [%s]' % (model, data.get('group').get('label')))
print('-' * 40)
for pt in data.get('group').get('points'):
add_rbac(model, pt)
# address groups separately
if data.get('group').get('groups') is not None:
for group in data.get('group').get('groups'):
for pt1 in group.get('points'):
add_rbac(model, pt1)
if group.get('groups') is not None:
for group2 in group.get('groups'): # groups of groups, e.g., VV curves
# print("Group2 = %s" % group2['name'])
for pt2 in group2.get('points'):
add_rbac(model, pt2)
if group2.get('groups') is not None:
# print('LAYER 3 Group2.get(groups): %s' % group2.get('groups'))
for group3 in group2.get('groups'): # layer 3 groups, e.g., LVRT curve points
# print("Group3 = %s" % group3['name'])
for pt3 in group3.get('points'):
# print('Level 3 %s' % pt3['name'])
add_rbac(model, pt3)
# Write the python dict as new json file
with open(write_path + os.path.sep + 'model_%s.json' % model, 'w') as json_file:
json.dump(data, json_file, indent=4)
if __name__ == '__main__':
main()
|
import os
if __name__ == "__main__":
# For direct call only
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import random
import pytest
import math
import glob
import time
import numpy as np
import pylo
# python <3.6 does not define a ModuleNotFoundError, use this fallback
from pylo import FallbackModuleNotFoundError
from pylotestlib import DummyView
from pylotestlib import DummyViewShowsError
def remove_dirs(directories=None):
"""Remove all given directories recursively with files inside."""
if not isinstance(directories, (list, tuple)):
directories = glob.glob(os.path.join(test_root, "tmp-test-controller-*"))
for directory in directories:
if os.path.exists(directory):
directory = str(directory)
for f in os.listdir(directory):
path = os.path.join(directory, f)
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
remove_dirs((path), )
os.removedirs(directory)
root = os.path.dirname(os.path.dirname(__file__))
pylo_root = os.path.join(root, "pylo")
test_root = os.path.join(os.path.dirname(__file__))
# clear all test directories
remove_dirs()
controller_tmp_path = os.path.join(test_root, "tmp-test-controller-{}".format(random.randint(0, 30)))
os.makedirs(controller_tmp_path, exist_ok=True)
pylo.config.DEFAULT_SAVE_FILE_NAME = "{counter}-test-measurement.tif"
pylo.config.DEFAULT_LOG_PATH = os.path.join(controller_tmp_path, "measurement.log")
pylo.config.DEFAULT_INI_PATH = os.path.join(controller_tmp_path, "configuration.ini")
from pylotestlib import DummyConfiguration
class DummyImage(pylo.Image):
def saveTo(self, *args, **kwargs):
pass
use_dummy_images = False
class DummyCamera(pylo.CameraInterface):
def __init__(self, controller, *args, **kwargs):
super().__init__(controller, *args, **kwargs)
self.reset()
def reset(self):
self.init_time = time.time()
self.recorded_images = []
def recordImage(self, *args, **kwargs):
self.recorded_images.append(time.time())
img_data = (np.random.rand(5, 5) * 255).astype(dtype=np.uint8)
args = (img_data, {"dummy-tag": True})
if use_dummy_images:
return DummyImage(*args)
else:
return pylo.Image(*args)
def resetToSafeState(self):
pass
pylo.loader.addDeviceFromFile("camera", "DummyCamera", __file__, "DummyCamera")
class DummyMicroscope(pylo.MicroscopeInterface):
def __init__(self, controller, *args, **kwargs):
super().__init__(controller, *args, **kwargs)
self.reset()
def reset(self):
self.init_time = time.time()
self.performed_steps = []
self.supported_measurement_variables = [
pylo.MeasurementVariable(
"measurement-var", "Dummy Measurement Variable", -1, 1, "unit"
)
]
def setInLorentzMode(self, lorentz_mode):
pass
def setMeasurementVariableValue(self, id_, value):
self.performed_steps.append((id_, value, time.time()))
if (hasattr(self.controller, "measurement_duration_time") and
self.controller.measurement_duration_time >= 0):
time.sleep(self.controller.measurement_duration_time)
def getMeasurementVariableValue(self, id_):
for k, v, t in reversed(self.performed_steps):
if id_ == k:
return v
return None
def resetToSafeState(self):
pass
pylo.loader.addDeviceFromFile("microscope", "DummyMicroscope", __file__,
"DummyMicroscope")
# add some invalid devices
pylo.loader.addDeviceFromFile("microscope", "NoFileDummyMicroscope",
"filedoesnotexist.py", "DummyMicroscope")
pylo.loader.addDeviceFromFile("microscope", "NoClassDummyMicroscope", __file__,
"ClassDoesNotExist")
pylo.loader.addDeviceFromFile("camera", "NoFileDummyCamera",
"filedoesnotexist.py", "DummyCamera")
pylo.loader.addDeviceFromFile("camera", "NoClassDummyCamera", __file__,
"ClassDoesNotExist")
configuration_test_setup = [
({"group": "test-group", "key": "test-key", "value": "test"}, ),
({"group": "test-group2", "key": "test-key", "value": False}, ),
({"group": "test-group3", "key": "test-key3", "value": 1},
{"group": "test-group3", "key": "test-key14", "value": 2},
{"group": "test-group4", "key": "test-key13", "value": 3},
{"group": "test-group3", "key": "test-key16", "value": 4},
{"group": "test-group2", "key": "test-key14", "value": 5}),
({"group": "test-group2", "key": "test-key2", "description": "descr",
"datatype": bool, "value": False}, ),
({"group": "test-group2", "key": "test-key2",
"datatype": pylo.Datatype.options((1, 1.1, 1.2, 1.3)), "value": 1.2}, ),
({"group": "test-group2", "key": "test-key2", "description": "descr2",
"value": "test2"}, ),
({"group": "test-group3", "key": "test-key3", "description": "d1",
"datatype": pylo.Datatype.options(("a", "b", "c")), "value": "a"},
{"group": "test-group3", "key": "test-key4", "description": "d2",
"datatype": pylo.Datatype.options((1, 2, 3, 4, 5, 6, 7)), "value": 1},
{"group": "test-group4", "key": "test-key3", "description": "d3",
"datatype": pylo.Datatype.options((0.1, 0.2, 0.3, 0.5)), "value": 0.3},
{"group": "test-group3", "key": "test-key5",
"datatype": bool, "value": True})
]
@pytest.fixture()
def controller():
configuration = DummyConfiguration()
configuration.reset()
controller = pylo.Controller(DummyView(), configuration)
yield controller
# clear events
pylo.before_start.clear()
pylo.before_init.clear()
pylo.init_ready.clear()
pylo.user_ready.clear()
pylo.series_ready.clear()
pylo.microscope_ready.clear()
pylo.before_record.clear()
pylo.after_record.clear()
pylo.measurement_ready.clear()
controller.view.reset()
controller.configuration.reset()
class TestController:
@classmethod
def teardown_class(cls):
remove_dirs()
@pytest.mark.usefixtures("controller")
@pytest.mark.parametrize("lookup", configuration_test_setup)
def test_get_configuration_value_or_ask_value_exists(self, controller, lookup):
"""Test if the getConfigurationValueOrAsk() function returns the corect
values in the correct order if the values are given."""
self.check_get_configuration_value_or_ask(controller, lookup,
[True] * len(lookup))
@pytest.mark.usefixtures("controller")
@pytest.mark.parametrize("lookup", configuration_test_setup)
def test_get_configuration_value_or_ask_value_not_exists(self, controller, lookup):
"""Test if the getConfigurationValueOrAsk() function asks for the
values if they do not exist."""
self.check_get_configuration_value_or_ask(controller, lookup,
[False] * len(lookup))
@pytest.mark.usefixtures("controller")
@pytest.mark.parametrize("lookup", configuration_test_setup)
def test_get_configuration_value_or_ask_value_partly_exists(self, controller, lookup):
"""Test if the getConfigurationValueOrAsk() function asks for the
missing values if some exist and others do not."""
if len(lookup) == 1:
exist = [random.random() >= 0.5]
else:
# make sure there is at least one time True and one time False
exist = [True, False]
# add more random values
exist += [random.random() >= 0.5 for i in range(len(lookup) - 2)]
# randomize order
random.shuffle(exist)
self.check_get_configuration_value_or_ask(controller, lookup, exist)
def check_get_configuration_value_or_ask(self, controller, lookup, exist_in_config):
"""Perform the test for the getConfigurationValuesOrAsk() function.
Parameters
----------
controller : Controller
The controller
lookup : list of dicts
The lookup dict
exist_in_config : list of bool
A list whether the lookup should exist in the configuration or not
"""
# create the parameter for the getConfigurationValuesOrAsk() function
config_lookup = []
# prepare the configuration and the config_lookup
for lookup_dir, exists in zip(lookup, exist_in_config):
if exists:
# set the value
controller.configuration.setValue(lookup_dir["group"],
lookup_dir["key"],
lookup_dir["value"])
else:
# define the configuration options so the datatype ect. are
# known
args = {}
if "datatype" in lookup_dir:
args["datatype"] = lookup_dir["datatype"]
if "description" in lookup_dir:
args["description"] = lookup_dir["description"]
controller.configuration.addConfigurationOption(
lookup_dir["group"],
lookup_dir["key"],
**args
)
# set the responses for the view ask
controller.view.ask_for_response.append(((lookup_dir["key"],
lookup_dir["group"]),
lookup_dir["value"]))
l = [lookup_dir["group"], lookup_dir["key"]]
if "options" in lookup_dir:
l.append(lookup_dir["options"])
config_lookup.append(l)
# get the values
values = controller.getConfigurationValuesOrAsk(*config_lookup)
ask_counter = 0
for i, (l, e) in enumerate(zip(lookup, exist_in_config)):
if not e:
# check if the key and the group were asked for
assert isinstance(controller.view.inputs[ask_counter]["name"], str)
assert l["key"] in controller.view.inputs[ask_counter]["name"]
assert l["group"] in controller.view.inputs[ask_counter]["name"]
# check if the datatype was passed if there is one
if "datatype" in l:
assert "datatype" in controller.view.inputs[ask_counter]
assert l["datatype"] == controller.view.inputs[ask_counter]["datatype"]
# check if the desciption was passed if there is one
if "description" in l:
assert "description" in controller.view.inputs[ask_counter]
assert l["description"] == controller.view.inputs[ask_counter]["description"]
# check if the options was passed if there are some
if "options" in l:
assert "options" in controller.view.inputs[ask_counter]
assert (tuple(l["options"]) ==
tuple(controller.view.inputs[ask_counter]["options"]))
ask_counter += 1
# check if the value exists (now) in the configuration
assert values[i] == controller.configuration.getValue(l["group"],
l["key"])
# check if the returned value is correct
assert values[i] == l["value"]
def before_init_handler(self, *args):
"""The event handler for the before_init event."""
self.before_init_times.append(time.time())
def init_ready_handler(self, *args):
"""The event handler for the init_ready event."""
self.init_ready_times.append(time.time())
def user_ready_handler(self, *args):
"""The event handler for the user_ready event."""
self.user_ready_times.append(time.time())
def series_ready_handler(self, *args):
"""The event handler for the series_ready event."""
self.series_ready_times.append(time.time())
def init_start_program_test(self, controller, save_path, save_files=True,
change_save_path=True, change_microscope=True,
change_camera=True, before_start=None,
wait_for_finish=True):
"""Initialize for testing the startProgramLoop() function and execute
startProgramLoop() function.
Parameters
----------
controller : Controller
The controller to start the program loop of
save_files : bool
Whether to save the files, this changes the `DummyCamera` to use
`DummyImage`s or normal `pylo.Image`s, the `DummyImage`s do not
have a (valid) `Image::saveTo()` function.
change_save_path : bool
Whether to change the path of the images to save to the test tmp
dir or not
change_microscope : bool
Whether to change the 'microscope-module' and 'microscope-class'
configurations so the `DummyMicroscope` in this file will be used
change_camera : bool
Whether to change the 'camera-module' and 'camera-class'
configurations so the `DummyCamera` in this file will be used
before_start : callable
Executed right before the program loop is started
wait_for_finish : bool
Whether to wait until the program has finished
"""
global use_dummy_images
# prepare event time storage
self.before_init_times = []
self.init_ready_times = []
self.user_ready_times = []
self.series_ready_times = []
self.series_ready_times = []
# clear events
pylo.before_init.clear()
pylo.init_ready.clear()
pylo.user_ready.clear()
pylo.series_ready.clear()
# add event handlers
pylo.before_init["test_controller_handler"] = self.before_init_handler
pylo.init_ready["test_controller_handler"] = self.init_ready_handler
pylo.user_ready["test_controller_handler"] = self.user_ready_handler
pylo.series_ready["test_controller_handler"] = self.series_ready_handler
if change_microscope:
# define the microscope to use
controller.configuration.setValue(
pylo.controller.CONFIG_DEVICE_GROUP, "microscope",
"DummyMicroscope")
if change_camera:
# define the camera to use
controller.configuration.setValue(
pylo.controller.CONFIG_DEVICE_GROUP, "camera",
"DummyCamera")
if change_save_path:
controller.configuration.setValue("measurement", "save-directory", save_path)
controller.configuration.setValue("measurement", "save-file-format", "{counter}-dummy-img.tif")
use_dummy_images = not save_files
if callable(before_start):
before_start()
self.start_time = time.time()
controller.startProgramLoop()
if wait_for_finish:
controller.waitForProgram()
def raise_stop_program(self, *args, **kwargs):
"""Raise a StopProgram Exception."""
raise pylo.StopProgram
@pytest.mark.usefixtures("controller")
def test_measurement_save_paths_default(self, tmp_path, controller):
"""Test if the save directory and file name are correct."""
# prevent caring about camera or microscope
controller.camera = DummyCamera(controller)
controller.microscope = DummyMicroscope(controller)
self.init_start_program_test(controller, tmp_path, save_files=False,
change_save_path=False)
assert (controller.measurement.save_dir ==
pylo.config.DEFAULT_SAVE_DIRECTORY)
assert (controller.measurement.name_format ==
pylo.config.DEFAULT_SAVE_FILE_NAME)
@pytest.mark.usefixtures("controller")
def test_measurement_save_paths_custom(self, tmp_path, controller):
"""Test if the save directory and file name can be modified correctly
by changing the settings."""
# prevent caring about camera or microscope
controller.camera = DummyCamera(controller)
controller.microscope = DummyMicroscope(controller)
name_format = "{counter}-dummy-file.tif"
tmp_path = str(tmp_path)
controller.configuration.setValue(
"measurement", "save-directory", tmp_path
)
controller.configuration.setValue(
"measurement", "save-file-format", name_format
)
self.init_start_program_test(controller, tmp_path, save_files=False,
change_save_path=False)
assert (os.path.realpath(controller.measurement.save_dir) ==
os.path.realpath(tmp_path))
assert controller.measurement.name_format == name_format
@pytest.mark.usefixtures("controller")
def test_event_times(self, tmp_path, controller):
"""Test if all events are fired, test if the events are fired in the
correct order."""
# prevent caring about camera or microscope
controller.camera = DummyCamera(controller)
controller.microscope = DummyMicroscope(controller)
self.init_start_program_test(controller, tmp_path)
# check if all events are executed exactly one time
assert len(self.before_init_times) == 1
assert len(self.init_ready_times) == 1
assert len(self.user_ready_times) == 1
assert len(self.series_ready_times) == 1
# check the time order of the events is ready
assert self.start_time <= min(self.before_init_times)
assert max(self.before_init_times) <= min(self.init_ready_times)
assert max(self.init_ready_times) <= min(self.user_ready_times)
assert max(self.user_ready_times) <= min(self.series_ready_times)
# test if the init event is fired before the microscope and camera are
# created
assert controller.microscope.init_time <= self.init_ready_times[0]
assert controller.camera.init_time <= self.init_ready_times[0]
@pytest.mark.usefixtures("controller")
def test_microscope_from_configuration(self, tmp_path, controller):
"""Test if the microscope is asked from the configuration."""
try:
self.init_start_program_test(controller, tmp_path)
except Exception:
# exception is thrown sice no camera is found
pass
# contains the request with group at index 0 and key at index 1
requests = [r[:2] for r in controller.configuration.request_log]
# check if mircoscope is asked from the configuration
assert (pylo.controller.CONFIG_DEVICE_GROUP, "microscope") in requests
@pytest.mark.usefixtures("controller")
def test_camera_from_configuration(self, tmp_path, controller):
"""Test if the camera is asked from the configuration."""
try:
self.init_start_program_test(controller, tmp_path)
except Exception:
# exception is thrown sice no microscope is found
pass
# contains the request with group at index 0 and key at index 1
requests = [r[:2] for r in controller.configuration.request_log]
# check if camera is asked from the configuration
assert (pylo.controller.CONFIG_DEVICE_GROUP, "camera") in requests
@pytest.mark.usefixtures("controller")
def test_microscope_and_camera_are_valid(self, tmp_path, controller):
"""Test if microscope and camera are valid objects."""
self.init_start_program_test(controller, tmp_path)
# check mircoscope and camera are valid
# this test does not work, objects have different classes because they
# are loaded differently, does not matter in the "real" application
# assert isinstance(controller.microscope, DummyMicroscope)
# assert isinstance(controller.camera, DummyCamera)
assert controller.microscope.__class__.__module__ in os.path.basename(__file__)
assert controller.microscope.__class__.__name__ == "DummyMicroscope"
assert controller.camera.__class__.__module__ in os.path.basename(__file__)
assert controller.camera.__class__.__name__ == "DummyCamera"
@pytest.mark.usefixtures("controller")
def test_show_create_measurement_is_executed(self, tmp_path, controller):
"""Test whether the view is instructed to show the create measurement
view."""
self.init_start_program_test(controller, tmp_path)
# shown exactly one time
assert len(controller.view.shown_create_measurement_times) == 1
# shown in the correct time order
assert (self.init_ready_times[0] <=
controller.view.shown_create_measurement_times[0])
assert (controller.view.shown_create_measurement_times[0] <=
self.user_ready_times[0])
assert (controller.microscope.init_time <=
controller.view.shown_create_measurement_times[0])
assert (controller.camera.init_time <=
controller.view.shown_create_measurement_times[0])
@pytest.mark.usefixtures("controller")
def test_measurement_is_valid(self, tmp_path, controller):
"""Test whether a valid measurement object is received (the measurement
object creation function is tested in test_measurement.py)."""
self.init_start_program_test(controller, tmp_path)
# shown exactly one time
assert isinstance(controller.measurement, pylo.Measurement)
@pytest.mark.usefixtures("controller")
def test_series_ready_after_measurement_is_created(self, tmp_path, controller):
"""Test if the series_ready event is fired after the measurement is
ready."""
self.init_start_program_test(controller, tmp_path)
# contains the request with group at index 0 and key at index 1
requests = [r[:2] for r in controller.configuration.request_log]
index = requests.index(("measurement", "save-directory"))
measurement_time = controller.configuration.request_log[index][2]
# shown exactly one time
assert measurement_time <= self.series_ready_times[0]
def microscope_ready_handler(self, *args):
"""The handler for the microscope_ready event."""
self.microscope_ready_times.append(time.time())
def before_record_handler(self, *args):
"""The handler for the before_record event."""
self.before_record_times.append(time.time())
def after_record_handler(self, *args):
"""The handler for the after_record event."""
self.after_record_times.append(time.time())
def measurement_ready_handler(self, *args):
"""The handler for the measurement_ready event."""
self.measurement_ready_times.append(time.time())
@pytest.mark.usefixtures("controller")
def test_by_event_measurement_is_started(self, tmp_path, controller):
"""Test if the measuremnet fires the events which means it has started."""
# clear events
pylo.microscope_ready.clear()
pylo.before_record.clear()
pylo.after_record.clear()
pylo.measurement_ready.clear()
# clear time logs
self.microscope_ready_times = []
self.before_record_times = []
self.after_record_times = []
self.measurement_ready_times = []
# bind handler
pylo.microscope_ready["test_controller_handler"] = self.microscope_ready_handler
pylo.before_record["test_controller_handler"] = self.before_record_handler
pylo.after_record["test_controller_handler"] = self.after_record_handler
pylo.measurement_ready["test_controller_handler"] = self.measurement_ready_handler
self.init_start_program_test(controller, tmp_path)
# contains the request with group at index 0 and key at index 1
requests = [r[:2] for r in controller.configuration.request_log]
index = requests.index(("measurement", "save-directory"))
measurement_time = controller.configuration.request_log[index][2]
# events are fired
assert len(self.microscope_ready_times) == 1
assert len(self.before_record_times) >= 1
assert len(self.after_record_times) >= 1
assert len(self.measurement_ready_times) == 1
# events are fired after the measurement is created
assert measurement_time <= min(self.microscope_ready_times)
assert measurement_time <= min(self.before_record_times)
assert measurement_time <= min(self.after_record_times)
assert measurement_time <= min(self.measurement_ready_times)
@pytest.mark.usefixtures("controller")
def test_by_files_measurement_is_started(self, tmp_path, controller):
"""Test if the measuremnet creates at least one file."""
self.init_start_program_test(controller, tmp_path)
files = os.listdir(str(tmp_path))
assert len(files) > 0
for f in files:
mtime = os.path.getmtime(os.path.join(str(tmp_path), f))
assert (self.start_time < mtime or
math.isclose(self.start_time, mtime))
assert (max(self.before_init_times) < mtime or
math.isclose(max(self.before_init_times), mtime))
assert (max(self.init_ready_times) < mtime or
math.isclose(max(self.init_ready_times), mtime))
assert (max(self.user_ready_times) < mtime or
math.isclose(max(self.user_ready_times), mtime))
@pytest.mark.usefixtures("controller")
def test_error_shown_microscope_module_wrong(self, tmp_path, controller):
"""Test if an error is shown when the microsocpe could not be loaded."""
# always set both values, otherwise the controller will ask for the
# missing value which is not intendet in this test
controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP,
"microscope",
"NoFileDummyMicroscope")
controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP,
"camera", "Dummy Camera")
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(controller, tmp_path, change_microscope=False)
found = False
for e in controller.view.error_log:
if "Could not import the device 'NoFileDummyMicroscope'" in str(e[0]):
found = True
break
assert found
@pytest.mark.usefixtures("controller")
def test_error_shown_microscope_class_wrong(self, tmp_path, controller):
"""Test if an error is shown when the microsocpe could not be loaded."""
# always set both values, otherwise the controller will ask for the
# missing value which is not intendet in this test
controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP,
"microscope",
"NoClassDummyMicroscope")
controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP,
"camera", "Dummy Camera")
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(controller, tmp_path, change_microscope=False)
found = False
for e in controller.view.error_log:
if "Could not create the device 'NoClassDummyMicroscope'" in str(e[0]):
found = True
break
assert found
@pytest.mark.usefixtures("controller")
def test_error_shown_camera_module_wrong(self, tmp_path, controller):
"""Test if an error is shown when the microsocpe could not be loaded."""
# always set both values, otherwise the controller will ask for the
# missing value which is not intendet in this test
controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP,
"camera",
"NoFileDummyCamera")
controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP,
"microscope", "Dummy Microscope")
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(controller, tmp_path, change_camera=False)
found = False
for e in controller.view.error_log:
if "Could not import the device 'NoFileDummyCamera'" in str(e[0]):
found = True
break
assert found
@pytest.mark.usefixtures("controller")
def test_error_shown_camera_class_wrong(self, tmp_path, controller):
"""Test if an error is shown when the camera could not be loaded."""
# always set both values, otherwise the controller will ask for the
# missing value which is not intendet in this test
controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP,
"camera",
"NoClassDummyCamera")
controller.configuration.setValue(pylo.controller.CONFIG_DEVICE_GROUP,
"microscope", "Dummy Microscope")
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(controller, tmp_path, change_camera=False)
found = False
for e in controller.view.error_log:
if "Could not create the device 'NoClassDummyCamera'" in str(e[0]):
found = True
break
assert found
@pytest.mark.usefixtures("controller")
def test_error_shown_create_measurement_incomplete1(self, tmp_path, controller):
"""Test if an error is shown when the view returns an incomplete
measurement layout."""
# do not give a start setup
controller.view.measurement_to_create = (
{},
{"variable": "notexisting", "start": 0, "end": 1, "step-width": 1}
)
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(controller, tmp_path)
found = False
for e in controller.view.error_log:
if "The measurement could not be initialized" in str(e[0]):
found = True
break
assert found
@pytest.mark.usefixtures("controller")
def test_error_shown_create_measurement_incomplete2(self, tmp_path, controller):
"""Test if an error is shown when the view returns an incomplete
measurement layout."""
# do not give a start setup
controller.view.measurement_to_create = (
{"measurement-var": 0},
{}
)
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(controller, tmp_path)
found = False
for e in controller.view.error_log:
if "The measurement could not be initialized" in str(e[0]):
found = True
break
assert found
@pytest.mark.usefixtures("controller")
def test_error_shown_create_measurement_incomplete3(self, tmp_path, controller):
"""Test if an error is shown when the view returns an incomplete
measurement layout."""
# do not give a start setup
controller.view.measurement_to_create = (
{},
{}
)
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(controller, tmp_path)
found = False
for e in controller.view.error_log:
if "The measurement could not be initialized" in str(e[0]):
found = True
break
assert found
def raise_test_exception(self, *args):
"""Raise an exception"""
raise Exception("TestController: Test exception")
@pytest.mark.usefixtures("controller")
def test_error_when_exception_in_controller_event(self, tmp_path, controller):
"""Test if an error is shown when there is an exception raised in the
controller event."""
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(
controller,
tmp_path,
before_start=lambda: pylo.init_ready.__setitem__("test_controller_raise_exc",
self.raise_test_exception)
)
found = False
for e in controller.view.error_log:
if "Test exception" in str(e[0]):
found = True
break
assert found
@pytest.mark.usefixtures("controller")
def test_error_when_exception_in_measurement_event(self, tmp_path, controller):
"""Test if an error is shown when there is an exception raised in the
measurement event."""
with pytest.raises(DummyViewShowsError):
# DummyView raises DummyViewShowsError when showError() is called
self.init_start_program_test(
controller,
tmp_path,
before_start=lambda: pylo.after_record.__setitem__("test_controller_raise_exc",
self.raise_test_exception)
)
found = False
for e in controller.view.error_log:
if "Test exception" in str(e[0]):
found = True
break
assert found
@pytest.mark.usefixtures("controller")
@pytest.mark.parametrize("group,key,for_camera", [
(pylo.controller.CONFIG_DEVICE_GROUP, "microscope", False),
(pylo.controller.CONFIG_DEVICE_GROUP, "camera", True),
])
def test_stop_program_exception_stops_in_ask_for_microscope_or_camera(self, tmp_path, controller, group, key, for_camera):
"""Test if the program is stopped if the view raises the StopProgram
Exception while it is aksing for the micrsocope or camera. This is
equal to the user clicking the cancel button."""
controller.view.ask_for_response.append(
((group, key), self.raise_stop_program)
)
controller.configuration.removeElement(group, key)
self.init_start_program_test(
controller,
tmp_path,
change_microscope=for_camera,
change_camera=False
)
assert len(self.before_init_times) == 1
assert len(self.init_ready_times) == 0
assert len(self.user_ready_times) == 0
assert len(self.series_ready_times) == 0
# assert (isinstance(controller.microscope, pylo.MicroscopeInterface) ==
# for_camera)
assert not isinstance(controller.camera, pylo.CameraInterface)
assert not isinstance(controller.measurement, pylo.Measurement)
@pytest.mark.usefixtures("controller")
def test_stop_program_exception_stops_in_ask_for_measurement(self, tmp_path, controller):
"""Test if the program is stopped if the view raises the StopProgram
Exception while it is aksing for the measurement. This is
equal to the user clicking the cancel button."""
controller.view.measurement_to_create = self.raise_stop_program
self.init_start_program_test(controller, tmp_path)
assert len(self.before_init_times) == 1
assert len(self.init_ready_times) == 1
assert len(self.user_ready_times) == 0
assert len(self.series_ready_times) == 0
assert isinstance(controller.microscope, pylo.MicroscopeInterface)
assert isinstance(controller.camera, pylo.CameraInterface)
assert not isinstance(controller.measurement, pylo.Measurement)
@pytest.mark.usefixtures("controller")
def test_stop_program_stops_current_measurement(self, tmp_path, controller):
"""Test if the program is stopped if the view raises the StopProgram
Exception while it is aksing for the measurement. This is
equal to the user clicking the cancel button."""
controller.view.measurement_to_create = self.raise_stop_program
self.init_start_program_test(controller, tmp_path)
assert len(self.before_init_times) == 1
assert len(self.init_ready_times) == 1
assert len(self.user_ready_times) == 0
assert len(self.series_ready_times) == 0
assert isinstance(controller.microscope, pylo.MicroscopeInterface)
assert isinstance(controller.camera, pylo.CameraInterface)
assert not isinstance(controller.measurement, pylo.Measurement)
@pytest.mark.slow()
@pytest.mark.usefixtures("controller")
def test_stop_program_loop_stops_program_while_working(self, tmp_path, controller):
"""Test if the program loop is stoppend when calling
Controller::stopProgramLoop() in another thread."""
# add a listener to the microscope_ready event
pylo.microscope_ready.clear()
pylo.measurement_ready.clear()
self.microscope_ready_times = []
self.measurement_ready_times = []
pylo.microscope_ready["test_controller_handler"] = self.microscope_ready_handler
pylo.measurement_ready["test_controller_handler"] = self.measurement_ready_handler
# let the microscope take one second to arrange the measuremnet
# variable
measurement_duration_time = 1
controller.measurement_duration_time = measurement_duration_time
# program is running
self.init_start_program_test(controller, tmp_path, wait_for_finish=False)
# wait some time until the measurement should be started
time.sleep(measurement_duration_time * 1 / 2)
# stop the program
controller.stopProgramLoop()
controller.waitForProgram()
end_time = time.time()
# there should not pass that much time until the program is eded
assert self.start_time + measurement_duration_time <= end_time
# make sure the test is correct, the measurement has started
assert len(self.before_init_times) == 1
assert len(self.init_ready_times) == 1
assert len(self.user_ready_times) == 1
assert len(self.series_ready_times) == 1
assert len(self.microscope_ready_times) == 1
# make sure the measurement has not finised
assert len(self.measurement_ready_times) == 0
@pytest.mark.slow()
@pytest.mark.usefixtures("controller")
def test_restart_program_loop_works_program_while_working(self, tmp_path, controller):
"""Test if the program loop is stoppend when calling
Controller::restartProgramLoop() in another thread."""
# add a listener to the microscope_ready event
pylo.microscope_ready.clear()
pylo.measurement_ready.clear()
self.microscope_ready_times = []
self.measurement_ready_times = []
pylo.microscope_ready["test_controller_times"] = self.microscope_ready_handler
pylo.measurement_ready["test_controller_times"] = self.measurement_ready_handler
# let the microscope take one second to arrange the measuremnet
# variable
measurement_duration_time = 1
controller.measurement_duration_time = measurement_duration_time
# program is running
self.init_start_program_test(controller, tmp_path, wait_for_finish=False)
# wait some time until the measurement should be started
time.sleep(measurement_duration_time * 2 / 3)
# stop the program
restart_time = time.time()
controller.restartProgramLoop()
controller.waitForProgram()
end_time = time.time()
assert self.start_time < restart_time
assert restart_time < end_time
# contains the request with group at index 0 and key at index 1
requests = []
request_times_dict = {}
for group, key, t in controller.configuration.request_log:
requests.append((group, key))
k = "{}-{}".format(group, key)
if not k in request_times_dict:
request_times_dict[k] = []
request_times_dict[k].append(t)
# all the events must be triggered twice because the program runs twice
assert len(self.before_init_times) == 2
assert len(self.init_ready_times) == 2
assert len(self.user_ready_times) == 2
assert len(self.series_ready_times) == 2
assert len(self.microscope_ready_times) == 2
# showCreateMeasurement() is shown twice
assert len(controller.view.shown_create_measurement_times) == 2
# check if mircoscope and camera are created at least two times, there
# can be more requests when restarting, ect.
assert requests.count((pylo.controller.CONFIG_DEVICE_GROUP, "microscope")) >= 2
assert requests.count((pylo.controller.CONFIG_DEVICE_GROUP, "camera")) >= 2
# all first events are triggered before the restart
assert self.before_init_times[0] <= restart_time
assert self.init_ready_times[0] <= restart_time
assert self.user_ready_times[0] <= restart_time
assert self.microscope_ready_times[0] <= restart_time
# all first requests are made before the restart
device_group = pylo.controller.CONFIG_DEVICE_GROUP
assert min(request_times_dict["{}-microscope".format(device_group)]) <= restart_time
assert min(request_times_dict["{}-camera".format(device_group)]) <= restart_time
# all second events are triggered after the restart
assert restart_time <= self.before_init_times[1]
assert restart_time <= self.init_ready_times[1]
assert restart_time <= self.user_ready_times[1]
assert restart_time <= self.microscope_ready_times[1]
# the measurement finishes only one time
assert len(self.measurement_ready_times) == 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.