text
stringlengths 8
6.05M
|
|---|
from dataclasses import dataclass
from datetime import datetime
from datatypes import BankAccountTransaction, Account, Card
from datatypes import BankCreditCardTransaction
from datatypes import TransactionType, UnknownSubject, ModifiedFlags
@dataclass
class TestClass():
att1: str
att2: dict
def make_test_account():
return Account('TEST_ACCOUNT', '00000000001')
def make_test_card():
return Card('TEST_CARD', '00000000001')
def make_transaction(
transaction_id=None,
type=TransactionType.UNKNOWN,
details={},
keywords=[],
currency='EUR',
amount=0.0,
balance=0.0,
comment='',
source=None,
destination=None,
category=None,
tags=[]
):
return BankAccountTransaction(
transaction_id=transaction_id,
currency=currency,
amount=amount,
balance=balance,
value_date=datetime.now(),
transaction_date=datetime.now(),
type=type,
source=UnknownSubject() if source is None else source,
destination=UnknownSubject()if destination is None else destination,
account=make_test_account(),
card=make_test_card(),
details=details,
keywords=keywords,
comment=comment,
tags=tags,
category=category,
flags=ModifiedFlags()
)
def make_credit_card_transaction(
transaction_id=None,
transaction_date=None,
value_date=None,
type=TransactionType.UNKNOWN,
details={},
keywords=[],
currency='EUR',
amount=0.0,
comment='',
source=None,
destination=None,
category=None,
tags=[],
_seq=None,
):
return BankCreditCardTransaction(
transaction_id=transaction_id,
currency=currency,
amount=amount,
value_date=datetime.now() if transaction_date is None else transaction_date,
transaction_date=datetime.now() if value_date is None else value_date,
type=type,
source=UnknownSubject() if source is None else source,
destination=UnknownSubject()if destination is None else destination,
card=make_test_card(),
details=details,
keywords=keywords,
comment=comment,
tags=tags,
category=category,
flags=ModifiedFlags(),
_seq=_seq
)
|
from typing import List
from sc2 import BotAI
class Composer(BotAI):
def __init__(self, bots: List[BotAI]):
self.bots = bots
async def on_step(self, iteration):
for bot in self.bots:
bot._prepare_step(self.state)
if iteration == 0:
bot._prepare_first_step()
await bot.on_step(iteration)
def on_start(self):
for bot in self.bots:
bot._prepare_start(self._client, self.player_id, self._game_info, self._game_data)
|
import unittest
import sys
import os
import logging
from solvers.RightWallFollowerSolver import RightWallFollowerSolver
from test.solverstest.AbstractBaseSolverTest import AbstractBaseSolverTest
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
class RightWallFollowerSolverTest(AbstractBaseSolverTest, unittest.TestCase):
def setUp(self):
super(RightWallFollowerSolverTest, self).setUp()
self.log = logging.getLogger(__name__)
self.solver = RightWallFollowerSolver()
# This is needed for the individual execution of this test class
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(RightWallFollowerSolverTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 12:47:40 2020
@author: TOP Artes
"""
from model.analise import Analise
class ControlAnalise:
def __init__(self):
self.analise = Analise()
def set_plt_text(self, plt_text):
dct_text = {}
if plt_text == 'Inc_ENEM':
dct_text['ylim'] = '(min(y_test)-4500),(max(y_test)+300000)'
dct_text['yscale'] = 'log'
dct_text['ylabel'] = 'Quantidade de Inscrições'
dct_text['title'] = 'da quantidade de inscrições anuais'
else:
dct_text['ylim'] = '(min(y_test)-10),(max(y_test)+10)'
dct_text['yscale'] = 'linear'
dct_text['ylabel'] = 'Mediana estaduasl das notas'
if plt_text == 'Mediana_CN':
dct_text['title'] = 'das notas de Ciências da Naturaza'
if plt_text == 'Mediana_CH':
dct_text['title'] = 'das notas de Ciências Humanas'
if plt_text == 'Mediana_LN':
dct_text['title'] = 'das notas de Linguagens'
if plt_text == 'Mediana_MT':
dct_text['title'] = 'das notas de Matemática'
if plt_text == 'Mediana_RD':
dct_text['title'] = 'das notas de Redação'
return dct_text
def inscritos_ano(self, DataFrame, predicted):
if predicted:
df = DataFrame
regressor = str(modelo).split('(')[0]
dct_plot = {'title':f'Gráfico de dispersão da quantidade de inscrições anuais por estado.\n\
Dados originais do ENEM de 2010 a 2018 e 2019 estimado por {regressor}'}
else:
df = DataFrame[DataFrame['ano']!=2019]
dct_plot = {'title':'Gráfico de dispersão da quantidade de inscrições anuais por estado.\n\
Dados originais do ENEM de 2010 a 2018'}
# Invoca a função para plotagem do gráfico temporal de inscrições no ENEM(2010 a 2018) estadual
self.analise.visualizar_inscritos_ano(df, dct_plot)
return
def estrutura_ano(self, DataFrame, predicted):
if predicted:
regressor = str(modelo).split('(')[0]
lst_anos = [2017, 2018, 2019]
df = DataFrame[DataFrame['ano']>=2017]
dct_plot = {'regressor':regressor}
else:
lst_anos = list(DataFrame[DataFrame['ano']!=2019]['ano'].unique())
df = DataFrame
dct_plot = {'title':'Histograma da estrutura educacional de cada estado.\n\
Dados originais do ENEM de 2010 a 2018'}
# Invoca a função para plotagem dos gráficos de estrutura educacional por ano(2010 a 2018) estadual
self.analise.visualizar_proporcoes(df, lst_anos, dct_plot)
return
def plotar_previsoes(self, list):
"""
Recebe os dados previsores e alvos originais, os de teste e a previsão(X_test) do modelo.
Imprime o gráfico."""
ploter = list
self.analise.visualizar_comparacao(ploter)
ploter = []
return ploter
def print_conclusao(self, wilcox, friedman, ranks, models_par, names, cds, average_ranks, lst_models):
self.analise.plot_comparisons(friedman, names, cds[0], cds[1], average_ranks)
self.analise.visualizar_resultados_validacao(wilcox, friedman, models_par, cds, average_ranks, lst_models)
return
|
import random
from datetime import datetime
if __name__ == '__main__':
n = int(input())
arr1 = map(int, input().split())
start = datetime.now()
arr = sorted(arr1)
while arr[-2] == arr[-1]:
arr.remove(arr[-1])
print(arr[-2])
end = datetime.now() - start
print(end)
|
import pytest
from config_generator import Renderer
from config_readers import get_admin_config_file, LocalUserConfigReader
class TestConfigGenerator:
@pytest.fixture
def renderer(self):
context = get_admin_config_file('tests/fixtures/renderer_test_config.yaml')
return Renderer(context, './templates')
@pytest.fixture
def required_options(self):
required_options = {'email': ['smtp_host', 'smtp_port'],
'slack': ['slack_webhook_url']}
return required_options
@classmethod
def contains_required_options(cls, config, options):
for option in options:
if option not in config:
return False
return True
def test_example(self, renderer, required_options):
reader = LocalUserConfigReader('tests/fixtures/renderer_test_user_rules')
user_configs = reader.get_config_files()
for conf in user_configs:
renderer.add_alerts_options(conf)
for alert in conf['alert']:
if isinstance(alert, dict):
alert_type = list(alert.keys())[0]
config = alert[alert_type]
else:
alert_type = alert
config = conf
assert alert_type in required_options
assert TestConfigGenerator.contains_required_options(config,
required_options[alert_type])
assert f'{alert_type}_id' not in config
|
n=int(input("enter any number:")) #153
a=[int(x) for x in str(n)] #[1,5,3]
sum=0
i=list(map(lambda y:pow(y,len(a)),a)) #[1,125,27]
for j in i:
sum+=j
print("armstrong number" if n==sum else "not armstrong")
|
# test
from data.handpose_data2 import UCIHandPoseDataset
from model.lstm_pm import LSTM_PM
from src.utils import *
# from __future__ import print_function
import argparse
import pandas as pd
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from collections import OrderedDict
from torch.utils.data import DataLoader
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# add parameter
parser = argparse.ArgumentParser(description='Pytorch LSTM_PM with Penn_Action')
parser.add_argument('--learning_rate', type=float, default=8e-6, help='learning rate')
parser.add_argument('--batch_size', default=1, type=int, help='batch size for training')
parser.add_argument('--save_dir', default='ckpt', type=str, help='directory of checkpoint')
parser.add_argument('--cuda', default=1, type=int, help='if you use GPU, set cuda = 1,else set cuda = 0')
parser.add_argument('--temporal', default=4, type=int, help='how many temporals you want ')
args = parser.parse_args()
# hyper parameter
temporal = 5
test_data_dir = './dataset/train_data'
test_label_dir = './dataset/train_label'
model_epo = [10, 15, 20, 25, 30, 35, 40, 45, 50]
# load data
test_data = UCIHandPoseDataset(data_dir=test_data_dir, label_dir=test_label_dir, temporal=temporal, train=False)
print('Test dataset total number of images sequence is ----' + str(len(test_data)))
test_dataset = DataLoader(test_data, batch_size=args.batch_size, shuffle=False)
def load_model(model):
# build model
net = LSTM_PM(T=temporal)
if torch.cuda.is_available():
net = net.cuda()
# net = nn.DataParallel(net) # multi-Gpu
save_path = os.path.join('ckpt/ucihand_lstm_pm' + str(model)+'.pth')
state_dict = torch.load(save_path)
net.load_state_dict(state_dict)
return net
# ******************** transfer from multi-GPU model ********************
# create new OrderedDict that does not contain `module.`
# new_state_dict = OrderedDict()
# for k, v in state_dict.items():
# namekey = k[7:] # remove `module.`
# new_state_dict[namekey] = v
# # load params
# return net
# **************************************** test all images ****************************************
print('********* test data *********')
for model in model_epo:
net = load_model(model)
net.eval()
sigma = 0.01
results = []
for i in range(5): #going over the sigmas
result = [] # save sigma and pck
result.append(sigma)
pck_all = []
for step, (images, label_map, center_map, imgs) in enumerate(test_dataset):# going over the temporal data
images = Variable(images.cuda() if args.cuda else images) # 4D Tensor
# Batch_size * (temporal * 3) * width(368) * height(368)
label_map = Variable(label_map.cuda() if args.cuda else label_map) # 5D Tensor
# Batch_size * Temporal * joint * 45 * 45
center_map = Variable(center_map.cuda() if args.cuda else center_map) # 4D Tensor
# Batch_size * 1 * width(368) * height(368)
predict_heatmaps = net(images, center_map) # get a list size: temporal * 4D Tensor
predict_heatmaps = predict_heatmaps[1:]
# calculate pck
pck = lstm_pm_evaluation(label_map, predict_heatmaps, sigma=sigma, temporal=temporal)
pck_all.append(pck)
if step % 100 == 0:
print('--step ...' + str(step))
print('--pck.....' + str(pck))
save_images(label_map, predict_heatmaps, step, epoch=-1, imgs=imgs, train=False, temporal=temporal,pck=pck)
if pck < 0.8:
save_images(label_map, predict_heatmaps, step, epoch=-1, imgs=imgs, train=False, temporal=temporal,pck=pck)
print('sigma ==========> ' + str(sigma))
print('===PCK evaluation in test dataset is ' + str(sum(pck_all) / len(pck_all)))
result.append(str(sum(pck_all) / len(pck_all)))
results.append(result)
sigma += 0.01
results = pd.DataFrame(results)
results.to_csv('ckpt/' + str(model) + 'test_pck.csv')
|
import random
class Node:
def __init__(self, value, parent, data1, color="R"):
self.value = value
self.data = data1
self.parent = parent
self.left = None
self.right = None
self.color = color
def get_uncle(self):
if self.parent is not None and self.parent.parent is not None:
if self.parent == self.parent.parent.left:
return self.parent.parent.right
else:
return self.parent.parent.left
return None
def get_sibling(self):
if self.parent is not None:
if self.parent.right == self:
return self.parent.left
else:
return self.parent.right
return None
def __repr__(self):
parent = None
if self.parent is not None:
parent = self.parent.value
string = f"{self.value}<-({parent})({self.color}){self.data}\n"
if self.left is not None:
string += f"L:{self.left}"
if self.right is not None:
string += f"R:{self.right}"
return string
def __str__(self):
parent = None
if self.parent is not None:
parent = self.parent.value
return f"{self.value}, <-{parent}, {self.color}, {self.data}"
@staticmethod
def node_traversal(node, a):
if node is None:
return
else:
a.append(str(node))
Node.node_traversal(node.left, a)
Node.node_traversal(node.right, a)
class RBT:
root = None
comparisons = 0
def __insert(self, value, node, data1):
if node is None:
self.root = Node(value, None, data1, color="B")
return
if node.value > value:
if node.left is None:
node.left = Node(value, node, data1)
self.fix_violations(node.left)
else:
self.__insert(value, node.left, data1)
else:
if node.right is None:
node.right = Node(value, node, data1)
self.fix_violations(node.right)
else:
self.__insert(value, node.right, data1)
def insert(self, value, data1):
self.__insert(value, self.root, data1)
def fix_violations(self, node: None):
while node.parent is not None and node.parent.color != "B" and node.parent.parent is not None and node.color == "R":
g = node.parent.parent
p = node.parent
u = node.get_uncle()
if u is not None and u.color == "R":
self.recolor(p)
self.recolor(g)
self.recolor(u)
if g != self.root:
node = g
else:
if node.value < p.value < g.value:
self.right_rotation(g)
self.recolor(g)
self.recolor(p)
elif node.value >= p.value < g.value:
self.left_rotation(p)
self.right_rotation(g)
self.recolor(g)
self.recolor(node)
elif node.value >= p.value >= g.value:
self.left_rotation(g)
self.recolor(g)
self.recolor(p)
else:
self.right_rotation(p)
self.left_rotation(g)
self.recolor(g)
self.recolor(node)
def recolor(self, node: Node):
if node is None:
return
if node.color == "B" and node != self.root:
node.color = "R"
else:
node.color = "B"
def right_rotation(self, node: Node):
temp = node.left
node.left = temp.right
if node.left is not None:
node.left.parent = node
if node.parent is not None:
if node.parent.right == node:
node.parent.right = temp
else:
node.parent.left = temp
temp.parent = node.parent
else:
self.root = temp
temp.parent = None
temp.right = node
node.parent = temp
def left_rotation(self, node: Node):
temp = node.right
node.right = temp.left
if node.right is not None:
node.right.parent = node
if node.parent is not None:
if node.parent.right == node:
node.parent.right = temp
else:
node.parent.left = temp
temp.parent = node.parent
else:
self.root = temp
temp.parent = None
temp.left = node
node.parent = temp
def __search(self, value, node):
if node is None:
self.comparisons += 1
return
if node.value == value:
self.comparisons += 1
return node
if node.value > value:
self.comparisons += 1
return self.__search(value, node.left)
else:
return self.__search(value, node.right)
def search(self, value):
return self.__search(value, self.root)
def num_of_comparisons(self):
comp = self.comparisons
self.comparisons = 0
return comp
def __delete(self, node):
if node is None:
return
if node.left is None and node.right is None:
if node == self.root:
self.root = None
return
self.fix_delete(node)
return
if node.right is not None:
minn = self.get_min_node(node.right)
temp = node.value
node.value = minn.value
minn.value = temp
temp = node.data
node.data = minn.data
minn.data = temp
self.__delete(minn)
else:
maxn = self.get_max_node(node.left)
temp = node.value
node.value = maxn.value
maxn.value = temp
temp = node.data
node.data = maxn.data
maxn.data = temp
self.__delete(maxn)
def delete(self, value):
node = self.search(value)
self.__delete(node)
def fix_delete(self, node):
count = 0
while True:
if node.color == "R":
if node == node.parent.left:
node.parent.left = None
else:
node.parent.right = None
return
else:
p = node.parent
s = node.get_sibling()
if node == self.root:
return
if s is None or s.color == "B":
if self.delete_check_bbb(s):
if p.color == "B":
self.recolor(s)
if count == 0:
if node == node.parent.left:
count += 1
node.parent.left = None
else:
count += 1
node.parent.right = None
node = p
else:
self.recolor(p)
self.recolor(s)
break
elif node == node.parent.left:
if (s.right is None or s.right.color == "B") and s.left.color == "R":
self.recolor(s)
self.recolor(s.left)
self.right_rotation(s)
else:
if s.parent.color != s.color:
temp = s.color
s.color = s.parent.color
s.parent.color = temp
s.right.color = "B"
self.left_rotation(p)
break
else:
if (s.left is None or s.left.color == "B") and s.right.color == "R":
self.recolor(s)
self.recolor(s.right)
self.left_rotation(s)
else:
if s.parent.color != s.color:
temp = s.color
s.color = s.parent.color
s.parent.color = temp
s.left.color = "B"
self.right_rotation(p)
break
else:
# self.recolor(p)
# self.recolor(s)
if s.parent.color != s.color:
temp = s.color
s.color = s.parent.color
s.parent.color = temp
if p.left == node:
self.left_rotation(p)
else:
self.right_rotation(p)
if node == node.parent.left:
node.parent.left = None
else:
node.parent.right = None
return
@staticmethod
def delete_check_bbb(s):
if s is None:
return True
if s.left is None and s.right is None:
return True
if s.right is None and s.left is not None and s.left.color == "B":
return True
if s.left is None and s.right is not None and s.right.color == "B":
return True
if s.left is not None and s.right is not None and s.left.color == "B" and s.right.color == "B":
return True
return False
@staticmethod
def get_min_node(node):
current = node
while current.left is not None:
current = current.left
return current
@staticmethod
def get_max_node(node):
current = node
while current.right is not None:
current = current.right
return current
def search_data(self, value):
node = self.search(value)
if node is not None:
return node.data.rstrip()
return None
def change_data(self, value, data):
node = self.search(value)
if node is not None:
node.data = data
return True
return False
def __print(self, node):
if node is None:
return
print(node.value, node.color)
self.__print(node.left)
self.__print(node.right)
def print(self):
self.__print(self.root)
# tree = RBT()
#
#
# with open("data.txt", "r") as f:
# data_list = f.readlines()
#
# set_keys1 = set()
# for _ in range(10000):
# num1 = random.randint(1, 10000)
# if num1 not in set_keys1:
# set_keys1.add(num1)
# num11 = random.randint(0, 9999)
# data = data_list[num11]
# tree.insert(num1, data)
# for _ in range(15):
# num = random.randint(1, 10000)
# print(tree.search_data(num))
# print(tree.num_of_comparisons())
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import face_align
from retinaface import RetinaFace
import sklearn
import mxnet as mx
import json
import logging as log
from collections import namedtuple
from abc import ABC, abstractmethod
import sys
import os
import cv2
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'common'))
class DetectorInterface(ABC):
@abstractmethod
def run_async(self, frames, index):
pass
@abstractmethod
def wait_and_grab(self):
pass
class Detector(DetectorInterface):
"""Wrapper class for detector"""
def __init__(self, model_path='./retinaface-R50/R50', conf=.6, max_num_frames=1):
self.net = detector = RetinaFace(
model_path, 0, 0, 'net3')
self.confidence = conf
self.expand_ratio = (1., 1.)
self.max_num_frames = max_num_frames
def run_async(self, frames):
assert len(frames) <= self.max_num_frames
self.shapes = []
all_detections = []
scales = [720, 1280]
target_size = scales[0]
max_size = scales[1]
for i in range(len(frames)):
im_shape = frames[i].shape
self.shapes.append(im_shape)
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
all_detections.append(self.net.detect(frames[i], self.confidence,
scales=[im_scale], do_flip=False))
return all_detections
def get_detections(self, frames):
"""Returns all detections on frames"""
return self.run_async(frames)
class VectorCNN:
"""Wrapper class for a network returning a vector"""
def __init__(self, image_size=(112, 112), model_path='./model-r100-ii/model,0'):
ctx = mx.gpu(0)
self.model = self.get_model(ctx, image_size, model_path, 'fc1')
def get_model(ctx, image_size, model_str, layer):
_vec = model_str.split(',')
assert len(_vec) == 2
prefix = _vec[0]
epoch = int(_vec[1])
print('loading', prefix, epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = sym.get_internals()
sym = all_layers[layer+'_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
#model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
model.bind(
data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
return model
def get_align_input(self, img, points):
nimg = face_align.norm_crop(
img, landmark=points, image_size=112, mode='arcface')
nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
aligned = np.transpose(nimg, (2, 0, 1))
return aligned
def get_feature(self, aligned):
input_blob = np.expand_dims(aligned, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
self.model.forward(db, is_train=False)
embedding = self.model.get_outputs()[0].asnumpy()
embedding = sklearn.preprocessing.normalize(embedding).flatten()
return embedding
def forward(self, batch):
"""Performs forward of the underlying network on a given batch"""
embedding = []
for frame in batch:
embedding.append(self.get_feature(self.get_align_input(frame)))
return embedding
class DetectionsFromFileReader(DetectorInterface):
"""Read detection from *.json file.
Format of the file should be:
[
{'frame_id': N,
'scores': [score0, score1, ...],
'boxes': [[x0, y0, x1, y1], [x0, y0, x1, y1], ...]},
...
]
"""
def __init__(self, input_file, score_thresh):
self.input_file = input_file
self.score_thresh = score_thresh
self.detections = []
log.info('Loading {}'.format(input_file))
with open(input_file) as f:
all_detections = json.load(f)
for source_detections in all_detections:
detections_dict = {}
for det in source_detections:
detections_dict[det['frame_id']] = {
'boxes': det['boxes'], 'scores': det['scores']}
self.detections.append(detections_dict)
def run_async(self, frames, index):
self.last_index = index
def wait_and_grab(self):
output = []
for source in self.detections:
valid_detections = []
if self.last_index in source:
for bbox, score in zip(source[self.last_index]['boxes'], source[self.last_index]['scores']):
if score > self.score_thresh:
bbox = [int(value) for value in bbox]
valid_detections.append((bbox, score))
output.append(valid_detections)
return output
|
x=int(input("enter a number1"))
y=int(input("enter a number2"))
temp=x
x=y
y=temp
print(x)
print(y)
|
# -*- coding: utf-8 -*-
import openpyxl as xl
import paramiko
import sys
import json
def _set_cell(ws, row, column, value, error=False):
ws.cell(row=row, column=column).value = value.decode("ISO-8859-1")
ws.cell(row=row, column=column).alignment = xl.styles.Alignment(wrapText=True)
if error:
redFill = xl.styles.PatternFill(start_color='FFFF0000',
end_color='FFFF0000',
fill_type='solid')
ws.cell(row=row, column=column).fill = redFill
def _connect_host(host, username, password, wait_time, ip_gateway=None, port=22):
if ip_gateway:
vm = paramiko.SSHClient()
vm.set_missing_host_key_policy(paramiko.AutoAddPolicy())
vm.connect(ip_gateway, username=username, password=password, auth_timeout=wait_time)
vmtransport = vm.get_transport()
vmchannel = vmtransport.open_channel("direct-tcpip", (host, port), (ip_gateway, port))
jhost = paramiko.SSHClient()
jhost.set_missing_host_key_policy(paramiko.AutoAddPolicy())
jhost.connect(host, username=username, password=password, sock=vmchannel, auth_timeout=wait_time)
else:
jhost = paramiko.SSHClient()
jhost.set_missing_host_key_policy(paramiko.AutoAddPolicy())
jhost.connect(host, username=username, password=password, auth_timeout=wait_time)
stdin, hostname, stderr1 = jhost.exec_command('uname -n 2>/dev/null')
stdin, oslevel, stderr2 = jhost.exec_command('oslevel -s 2>/dev/null || uname -r 2>/dev/null')
hostname = hostname.read()
oslevel = oslevel.read()
if not hostname:
hostname = '###### {}'.format(stderr1.read())
if not oslevel:
oslevel = '###### {}'.format(stderr2.read())
jhost.close()
if ip_gateway:
vm.close()
values = {}
values['hostname'] = hostname
values['oslevel'] = oslevel
return values
if __name__ == "__main__":
info = json.load(open('access.json'))
filename = info['file'] # .xlsx file
gateway = info['gateway'] # Gateway used to reach final host
username = info['username'] # Username for both Gateway and final host
password = info['password'] # Password for both Gateway and final host
wait_time = info['wait_time'] # Waiting time to try connection with Gateway/host
col_ip = info['col_ip'] # number of column on .xlsx where IPs are found
col_start = info['col_start'] # in case host not found, which columns should be written with -
col_end = info['col_end'] # in case host not found, which columns should be written with -
port = info['port'] # port used for both connections
wb = xl.load_workbook(filename)
ws = wb.active
for col in ws.iter_cols(min_row=2, min_col=col_ip, max_col=col_ip):
for cell in col:
if not cell.value:
print 'FINISHED PROCESSING AT ROW {}\n'.format(cell.row),
sys.stdout.flush()
sys.exit(0)
print 'Processing row {}: '.format(cell.row),
sys.stdout.flush()
if not ws.cell(row=cell.row, column=col_start).value:
print 'CONNECTING to {}, '.format(str(cell.value)),
sys.stdout.flush()
try:
values = _connect_host(host=str(cell.value),\
username = username, password = password,\
wait_time = wait_time,\
ip_gateway = gateway, port = port)
_set_cell(ws, cell.row, 3, values['hostname'])
_set_cell(ws, cell.row, 4, values['oslevel'])
print "DONE\n".format(cell.row),
sys.stdout.flush()
except Exception as e:
for i in range(col_start,col_end+1):
_set_cell(ws, cell.row, i, str(e), True)
print "CAN'T CONNECT - {}\n".format(str(e)),
sys.stdout.flush()
wb.save(filename)
else:
print "DONE\n".format(cell.row),
sys.stdout.flush()
|
import os
import logging
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from base import LoadFile
class EmbedRank(LoadFile):
"""EmbedRank keyphrase extraction model.
Parameterized example::
import string
import pke
# 1. create an EmbedRank extractor.
extractor = pke.unsupervised.EmbedRank()
# 2. load the content of the document.
extractor.load_document(input='path/to/input',
language='en',
normalization=None)
# 3. select sequences of nouns and adjectives as candidates.
extractor.candidate_selection()
# 4. weight the candidates using EmbedRank method
extractor.candidate_weighting()
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
_embedding_path = None
_embedding_model = None
def __init__(self, embedding_path=None):
try:
import sent2vec # See https://github.com/epfml/sent2vec
except ImportError:
logging.warning('Module sent2vec was not found.')
logging.warning('Please install using `python -m pip install cython;'
'python -m pip install git+https://github.com/epfml/sent2vec` '
'to use EmbedRank')
return
super(EmbedRank, self).__init__()
if embedding_path is None:
model_name = 'wiki_bigrams.bin'
self._embedding_path = os.path.join(self._models, model_name)
else:
self._embedding_path = embedding_path
if not os.path.exists(self._embedding_path):
logging.error('Could not find {}'.format(self._embedding_path))
logging.error('Please download "sent2vec_wiki_bigrams" model from '
'https://github.com/epfml/sent2vec#downloading-sent2vec-pre-trained-models.')
logging.error('And place it in {}.'.format(self._models))
logging.error('Or provide an embedding path.')
if EmbedRank._embedding_path is None or EmbedRank._embedding_path != self._embedding_path:
logging.info('Loading sent2vec model')
EmbedRank._embedding_model = sent2vec.Sent2vecModel()
EmbedRank._embedding_model.load_model(self._embedding_path)
self._embedding_model = EmbedRank._embedding_model
EmbedRank._embedding_path = self._embedding_path
logging.info('Done loading sent2vec model')
# Initialize _pos here, if another selection function is used.
self._pos = {'NOUN', 'PROPN', 'ADJ'}
def candidate_selection(self, pos=None):
"""Candidate selection using longest sequences of PoS.
Args:
pos (set): set of valid POS tags, defaults to ('NOUN', 'PROPN',
'ADJ').
"""
if pos is not None:
self._pos = pos
# select sequence of adjectives and nouns
self.longest_pos_sequence_selection(valid_pos=self._pos)
def mmr_ranking(self, document, candidates, l):
"""Rank candidates according to a query
Args:
document (np.array): dense representation of document (query)
candidates (np.array): dense representation of candidates
l (float): ratio between distance to query or distance between
chosen candidates
Returns:
list of candidates rank
"""
def norm(sim, **kwargs):
sim -= sim.min(**kwargs)
sim /= sim.max(**kwargs)
sim = 0.5 + (sim - sim.mean(**kwargs)) / sim.std(**kwargs)
return sim
sim_doc = cosine_similarity(document, candidates)
sim_doc[np.isnan(sim_doc)] = 0.
sim_doc = norm(sim_doc)
sim_doc[np.isnan(sim_doc)] = 0.
sim_can = cosine_similarity(candidates)
sim_can[np.isnan(sim_can)] = 0.
sim_can = norm(sim_can, axis=1)
sim_can[np.isnan(sim_can)] = 0.
sel = np.zeros(len(candidates), dtype=bool)
ranks = [None] * len(candidates)
# Compute first candidate, the second part of the calculation is 0
# as there are no other chosen candidates to maximise distance to
chosen_candidate = (sim_doc * l).argmax()
sel[chosen_candidate] = True
ranks[chosen_candidate] = 0
for r in range(1, len(candidates)):
# Remove already chosen candidates
sim_can[sel] = np.nan
# Compute MMR score
scores = l * sim_doc - (1 - l) * sim_can[:, sel].max(axis=1)
chosen_candidate = np.nanargmax(scores)
# Update output and mask with chosen candidate
sel[chosen_candidate] = True
ranks[chosen_candidate] = r
return ranks
def candidate_weighting(self, l=1, lower=False):
"""Candidate weighting function using distance to document.
Args:
l (float): Lambda parameter for EmbedRank++ Maximal Marginal
Relevance (MMR) computation. Use 1 to compute EmbedRank and 0 to not
use the document, but only the most diverse set of candidates
(defaults to 1).
"""
# Flatten sentences and remove words with unvalid POS
doc = ' '.join(w.lower() if lower else w for s in self.sentences
for i, w in enumerate(s.words)
if s.pos[i] in self._pos)
doc_embed = self._embedding_model.embed_sentence(doc)
cand_name = list(self.candidates.keys())
cand = (self.candidates[k] for k in cand_name)
cand = [' '.join(k.surface_forms[0]) for k in cand]
cand = [k.lower() if lower else k for k in cand]
cand_embed = self._embedding_model.embed_sentences(cand)
rank = self.mmr_ranking(doc_embed, cand_embed, l)
for candidate_id, r in enumerate(rank):
if len(rank) > 1:
# Inverting ranks so the first ranked candidate has the biggest score
score = (len(rank) - 1 - r) / (len(rank) - 1)
else:
score = r
self.weights[cand_name[candidate_id]] = score
|
from src.cdp.Habilidades import Resistencia
from src.util.FabricaNaves import FabricaNaveInimiga
class FabricaNaveFuga(FabricaNaveInimiga):
def __init__(self, figura_nave, figura_explosao, som):
super(FabricaNaveFuga, self).__init__('Nave de Fuga', figura_nave, figura_explosao, som)
self.pontuacao_derrotar = 50
# """---------------ACOES-----------------"""
# abc.override
def move(self):
self.posicao["y"] += self.velocidade["y"]
self.posicao["x"] += self.velocidade["x"]
self.cria_area()
# """--------------ATRIBUTO---------------"""
# abc.override
@staticmethod
def cria_velocidade():
return {"x": 3, "y": 3}
# abc.override
@staticmethod
def cria_resistencia():
return Resistencia.Resistencia(5, 1)
|
from HW1_src import Linear_Function
from HW1_src import Quadratic_Function
from HW1_src import Cubic_Function
import sys
for arg in sys.argv:
try:
if int(arg) == 1:
print "The linear function was called: "
Linear_Function(5000)
if int(arg) == 2:
print "The quadratic function was called: "
Quadratic_Function(5000)
if int(arg) == 3:
print "The cubic function was called: "
Cubic_Function(5000)
except ValueError:
if arg == "question1.py":
pass
else:
print "Please either input 1, 2, or 3"
|
# -*- coding: utf-8 -*-
# EYNES- Ingenieria de software - 2019. See LICENSE file for full copyright and licensing details.
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.tools.safe_eval import safe_eval
from odoo.exceptions import ValidationError
import logging
import datetime
from datetime import timedelta
_logger = logging.getLogger(__name__)
class ObjectiveWorklog(models.Model):
_name = "objective.worklog"
_description = 'Amount of hours and other info related to an employee\'s objective'
name = fields.Char(
'Name',
required=False,
store=True
)
date_log = fields.Datetime(
'Assignation Date',
default=fields.Datetime.now,
store=True,
readonly=True
)
amount_time = fields.Float(
'Amount in hours invested',
default=0.0,
store=True
)
notes = fields.Text(
'Additional notes',
store=True
)
objective_id = fields.Many2one(
'employee.objective',
string="Current objective",
store=True
)
|
# nvdashboard version 2
import os
import sys
import re
import time
import datetime
from datetime import datetime as dt
import copy
class nvdashboard2:
def __init__(self, conf={}):
# Information to be acquired when initialized
self.status = {}
self.status['cwd'] = os.getcwd()
self.status['platform'] = sys.platform
self.status['start'] = dt.now()
# Read conf when instantiating
if len(conf) == 0:
print(' conf is empty ')
return
self.conf = conf
# Default member variable name:
# conf,status,result,pers,error_msg,items
# Save value with each run
self.result = {}
# Use pers if you want to save the value across executions
self.pers = {}
# error_msg
self.error_msg = []
# date time when instantiating
print(self.status['start'].strftime('%Y-%m-%d %H:%M:%S'))
# Useful function for List comprehension
def re(self, tg, xlist):
return([x for x in xlist if re.search(tg, x)])
# sleep
def sleep(self, args=[]):
if len(args) == 0:
time.sleep(1)
else:
time.sleep(int(args[0]))
# Set items
def set_items(self, noun="", verb="", override={}):
self.items = {}
items = {}
if noun not in self.conf['nouns'].keys():
print(" noun is not in nouns ")
self.items = {}
return False
else:
if noun != "":
items = copy.deepcopy(self.conf['nouns'][noun])
if verb not in self.conf['verbs'].keys():
print(" verb not in verbs ")
self.items = {}
return False
else:
if verb != "":
# set verb not in noun
for k in self.conf['verbs'][verb].keys():
if k not in items.keys():
items[k] = copy.deepcopy(self.conf['verbs'][verb][k])
def set_common(c, i):
if type(i) == dict:
for k in i.keys():
if (type(i[k]) == dict and
len(i[k]) == 1 and
list(i[k].keys())[0] == 'xxx_common_xxx'):
i[k] = copy.deepcopy(c[k][i[k]['xxx_common_xxx']])
if type(i[k]) == dict and len(i[k]) > 0:
i[k] = set_common(c, i[k])
return(i)
else:
return(i)
items = set_common(self.conf['commons'], items)
# set global not in verb,noun
for k in self.conf['global']:
if k not in items.keys():
items[k] = copy.deepcopy(self.conf['global'][k])
# override function
def ride(o, i):
for k in o:
if (k not in i.keys() or
type(o[k]) != dict or
type(i[k]) != dict):
i[k] = o[k]
else:
i[k] = ride(o[k], i[k])
return(i)
if len(override) > 0:
items = ride(override, items)
# set noun,verb if not in items.keys()
if 'noun' not in items.keys():
items['noun'] = noun
if 'verb' not in items.keys():
items['verb'] = verb
self.items = copy.deepcopy(items)
return True
def do(self, noun="", verb="", do_flag=False, override={}):
self.noun = noun
if len(self.error_msg) > 0:
print('...Error')
for one in self.error_msg:
print(one)
return False
# set items
i_tf = self.set_items(noun, verb, override)
if not i_tf:
return False
if type(do_flag) != bool:
do_flag = False
# Do when do_flag is True
if do_flag:
for one in self.items['do']:
xlist = one.split(',')
if len(xlist) > 1:
eval('self.' + xlist[0] + '(xlist[1:])')
else:
eval('self.' + xlist[0] + '()')
return True
else:
print()
print(' dry run ')
print(' ds.items ')
print()
return True
return True
# Execute if there is an argument for test or exec in items
def test(self, script=""):
if script != "":
exec(script)
try:
if "exec" in self.items.keys():
exec(self.items['exec'])
except:
pass
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from PIL import Image, ImageDraw
import io
TRANSPARENT_BLACK = (0, 0, 0, 128)
class request_handler(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == "/" or self.path == "/index.html":
try:
file = open("index.html").read()
self._write_data(200, file)
except:
self._write_data(500, "500")
elif self.path.startswith("/tiles/") and self.path.endswith(".png"):
path_segments = self.path.split("/")
if len(path_segments) == 5:
z = path_segments[2]
x = path_segments[3]
y = path_segments[4][:-4]
self._write_data(200, tile(z, x, y))
else:
self._write_data(404, "404")
else:
self._write_data(404, "404")
def _write_data(self, status, data):
if type(data) == str:
data = bytes(data, "utf-8")
self.send_response(status)
self.end_headers()
self.wfile.write(data)
def tile(z, x, y):
img = Image.new("RGBA", (256, 256), color = (0, 0, 0, 0))
draw = ImageDraw.Draw(img)
# borders
draw.line([(0, 0), (0, 255)], fill=TRANSPARENT_BLACK, width=1)
draw.line([(0, 255), (255, 255)], fill=TRANSPARENT_BLACK, width=1)
draw.line([(255, 255), (255, 0)], fill=TRANSPARENT_BLACK, width=1)
draw.line([(255, 0), (0, 0)], fill=TRANSPARENT_BLACK, width=1)
# text
text = f"z: {z}\nx: {x}\ny: {y}"
(textsize_x, textsize_y) = draw.multiline_textsize(text)
margin = 10
draw.rectangle(
[(margin, margin), (margin* 3 + textsize_x, margin* 3 + textsize_y)],
fill=TRANSPARENT_BLACK
)
draw.multiline_text((margin * 2, margin * 2), text, fill=(255, 255, 255, 255), align="left")
# convert to bytes
imgBytes = io.BytesIO()
img.save(imgBytes, format="PNG")
return imgBytes.getvalue()
if __name__ == "__main__":
httpd = HTTPServer(('localhost', 6371), request_handler)
httpd.serve_forever()
|
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth import views as auth_views
from polls import views
from member import views as m_views
urlpatterns = [
path('admin/', admin.site.urls),
path('join/', m_views.join),
path('login/', m_views.login),
path('logout/', m_views.logout),
path('upload1/', m_views.upload1),
path('download/', m_views.download),
path('polls/', include('polls.urls')),
path('login/', auth_views.LoginView.as_view(
template_name = 'login.html'),
name ='login'),
path('logout/',
auth_views.LogoutView.as_view(),
name='logout'),
]
|
# -*- coding: utf8 -*-
from scapy.all import sniff, sendp
from scapy.all import Packet
from scapy.all import ShortField, IntField, LongField, BitField
from scapy.all import Ether, IP, ICMP
from mininet.log import info
import time
import sys
import fire
import json
import random
def stringToList(s):
if s == '':
return []
s = s[1:len(s)-1]
s = s.replace(' ', '')#是否要删除
print(s)
return [str(i) for i in s.split(',')]
'''
为了适应时间线的功能现在每次发送一个编码之后的数据包
'''
def send(src, iface, dsts='', index = 0, pow = 5, send_pkt=[]):
dsts = stringToList(dsts)
num = len(dsts) #确定广播目标
for i in range(0,num):
print("dest%d:"% i,dsts[i])
#由于send函数只能一对一发送,因此只能采取1对1模拟,每次广播一个数据包
filename1 = '/home/shlled/mininet-project-duan/TimeSchedule/Log/msg.txt'#读取文件内容,只读取一次,以免开销过大
f1=open(filename1,'r')
buffer=f1.readlines()
lenth=len(buffer)
total=lenth
for i in range(0,num):
#只发送一次
time.sleep(1)
dst = dsts[i]
now = time.time()
alpha=buffer[index] #读取文本对应位置内容
msg = "send_time: " + "%.6f" % float(now) + "total:%d" % total + "index:%d" % index + "data:" + alpha
send_pkt.append(msg)
print(msg) #此处打印出发送的内容
p = Ether() / IP(src=src, dst=dst) / ICMP() / msg
sendp(p, iface = iface)#组装好数据包之后发送数据
f1.close()
#重传部分,先不考虑,后面再考虑重传数据包"
#AP只考虑广播,并不考虑能量和收益"
fire.Fire(send)
|
"""
Heber Cooke 10/15/2019
Chapter 4 Exercise 5
Shift Left
takes a binary input and shifts the LEFT most bit and moves it to the RIGHT most position
"""
# shift Left
num = input("Enter a binary number: ")
#shiftAmount = 2
shiftAmount = int(input("Enter a shift amount: "))
for i in range(0,shiftAmount):
temp = num[0]
num = num[1:len(num)] + temp
print(num)
|
""" Представлен список чисел. Необходимо вывести элементы
исходного списка, значения которых больше предыдущего
элемента.
Подсказка: элементы, удовлетворяющие условию, оформить
в виде списка. Для формирования списка использовать
генератор. """
num_list = [1, 5, 3, 4, 8, 7]
new_list = [elem for elem in num_list if num_list.index(elem) > 0 and elem > num_list[num_list.index(elem)-1]]
print(new_list)
|
from dataprocessing import DataReader
from classifiers import Knn, FFNN, NaiveBayes
from feature_extraction import ClusterPCA, BiclusterExtractor, FeatureCluster
from utils import Pca
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import time
import csv, datetime
#Set dataset parameters
tune_k = False
dataset = 'communities_crime'
num_clusters = 4
pca_clusters = round(num_clusters/2)
lr = 0.01
dr = DataReader('../data/'+dataset+'.csv')
in_, out = dr.run()
hidden_size = round((num_clusters + len(set(out)))/2)
#Set parameters for classifiers
layers = [hidden_size] #List of number of nodes per hidden layer
list_of_ks = [10,10,10,10,10] #If k param needs to change between different extracted features
#Make class labels consecutive values
for i, o in enumerate(set(out)):
for j, lab in enumerate(out):
if lab == o:
out[j] = i
offset = min(out)
out = out - (offset-1)
#Extract the features
cpca = ClusterPCA(in_, out, method='kmeans', num_clusters=pca_clusters, feats_per_cluster=2)
cpca_feats = cpca.extract_features()
bc = BiclusterExtractor(in_, out, n=num_clusters)
bc_feats = bc.extract_features()
fc = FeatureCluster(in_, out, method="kmeans", num_clusters=num_clusters, _type="soft")
fc_soft_feats = fc.extract_features()
fc2 = FeatureCluster(in_, out, method="kmeans", num_clusters=num_clusters, _type="mixed")
fc_mixed_feats = fc2.extract_features()
print (set(out))
print (len(set(out)))
#Tune k for knn
if tune_k:
all_knn = Knn(in_, out, k=1)
cpca_knn = Knn(cpca_feats, out, k=1)
bc_knn = Knn(bc_feats, out, k=1)
fc_soft_knn = Knn(fc_soft_feats, out, k=1)
fc_mixed_knn = Knn(fc_mixed_feats, out, k=1)
all_knn.plot_k_scores(20)
cpca_knn.plot_k_scores(20)
bc_knn.plot_k_scores(20)
fc_soft_knn.plot_k_scores(20)
fc_mixed_knn.plot_k_scores(20)
else:
features = [in_, cpca_feats, bc_feats, fc_soft_feats, fc_mixed_feats]
feature_labels = ['All', 'CPCA', 'BC', 'FC_Soft', 'FC_Mixed']
file_out_name = dataset + '_results_' + datetime.datetime.now().strftime("%d_%m_%Y:%H:%M") + '_' + str(hidden_size) + '_' + str(lr) + '.csv'
file_out = open(file_out_name, 'w')
fieldnames = ['method', 'classifier', 'score_type', 'mean', 'stdev', 'time', 'all_scores']
writer = csv.writer(file_out)
writer.writerow([num_clusters])
writer.writerow(fieldnames)
for i, feats in enumerate(features):
k = list_of_ks[i]
method = feature_labels[i]
knn = Knn(feats, out, k)
ffnn = FFNN(in_, out, layers)
nb = NaiveBayes(in_, out)
knn_start = time.time()
print("knn")
k_acc_mean, k_acc_std, k_accs, k_f_mean, k_f_std, k_fs = knn.k_fold_score(10, 'both')
knn_stop = time.time()
ffnn_start = time.time()
print("ffnn")
f_acc_mean, f_acc_std, f_accs, f_f_mean, f_f_std, f_fs = ffnn.k_fold_score(10, 'both')
ffnn_stop = time.time()
nb_start = time.time()
print("nb")
n_acc_mean, n_acc_std, n_accs, n_f_mean, n_f_std, n_fs = nb.k_fold_score(10, 'both')
nb_stop = time.time()
knn_time = knn_stop - knn_start
ffnn_time = ffnn_stop - ffnn_start
nb_time = nb_stop - nb_start
writer.writerow([method, 'knn', 'accuracy', k_acc_mean,
k_acc_std, knn_time, k_accs])
writer.writerow([method, 'knn', 'fscore', k_f_mean,
k_f_std, knn_time, k_fs])
writer.writerow([method, 'ffnn', 'accuracy', f_acc_mean,
f_acc_std, ffnn_time, f_accs])
writer.writerow([method, 'ffnn', 'fscore', f_f_mean,
f_f_std, ffnn_time, f_fs])
writer.writerow([method, 'nb', 'accuracy', n_acc_mean,
n_acc_std, nb_time, n_accs])
writer.writerow([method, 'nb', 'fscore', n_f_mean,
n_f_std, nb_time, n_fs])
#
|
# Реализуйте reducer для алгоритма расчета PageRank с помощью Hadoop Streaming. Используйте упрощенный алгоритм (без случайных переходов).
# Входные и выходные данные: В качестве ключа идет номер вершины. Значение составное: через табуляцию записано значение PageRank (округленное до 3-го знака после запятой) и список смежных вершин (через "," в фигурных скобках).
# Пример работы reducer приведен для графа из лекции (при этом номера вершин приведены без n):
# Sample Input:
# 1 0.067 {}
# 1 0.200 {2,4}
# 2 0.067 {}
# 2 0.100 {}
# 2 0.200 {3,5}
# 3 0.067 {}
# 3 0.100 {}
# 3 0.200 {4}
# 4 0.100 {}
# 4 0.200 {}
# 4 0.200 {5}
# 5 0.100 {}
# 5 0.200 {}
# 5 0.200 {1,2,3}
# Sample Output:
# 1 0.067 {2,4}
# 2 0.167 {3,5}
# 3 0.167 {4}
# 4 0.300 {5}
# 5 0.300 {1,2,3}
import sys
prev_node = ''
sum_weight = 0
prev_adjacency = '{}'
for line in sys.stdin:
node, weight, adjacency_str = line.strip().split('\t')
if not prev_node:
if adjacency_str == '{}':
sum_weight += float(weight)
else:
prev_adjacency = adjacency_str
elif node == prev_node:
if adjacency_str == '{}':
sum_weight += float(weight)
else:
prev_adjacency = adjacency_str
else:
if prev_adjacency == '{}':
print('%s\t0\t%s' % (prev_node, prev_adjacency))
else:
print('%s\t%.3f\t%s' % (prev_node, sum_weight, prev_adjacency))
if adjacency_str == '{}':
sum_weight = float(weight)
else:
sum_weight = 0
prev_adjacency = adjacency_str
prev_node = node
if prev_adjacency == '{}':
print('%s\t0\t%s' % (prev_node, prev_adjacency))
else:
print('%s\t%.3f\t%s' % (prev_node, sum_weight, prev_adjacency))
|
print("A","\t","B")
for i in range(3,18,2):
for j in range(2,17,2):
print(i,"\t",j)
|
def from_seconds(seconds):
h, seconds = divmod(seconds, 3600)
m, seconds = divmod(seconds, 60)
return '|'.join('{:0>2.0f}'.format(a) for a in (h, m, seconds))
def to_seconds(h, m, s):
return h * 3600 + m * 60 + s
def stat(string):
if not string:
return ''
times = []
total_sum = total_racers = 0
for b in string.split(', '):
seconds = to_seconds(*(int(c) for c in b.split('|')))
total_racers += 1
total_sum += seconds
times.append(seconds)
times.sort()
maximum = times[-1]
minimum = times[0]
q, r = divmod(total_racers, 2)
return 'Range: {} Average: {} Median: {}'.format(
from_seconds(maximum - minimum),
from_seconds(int(total_sum / float(total_racers))),
from_seconds(sum(times[q - 1:q + 1]) / 2 if not r else times[q])
)
|
import os
import sys
import time
import signal
import logging
from logging.handlers import RotatingFileHandler
import multiprocessing
import atexit
from resource import getrlimit, RLIMIT_NOFILE, RLIM_INFINITY
class DefaultProcess(multiprocessing.Process):
"""
Basic acolyte process class.
"""
def __init__(self,
name='Default Process',
shutdown=None,
config=None):
multiprocessing.Process.__init__(self)
self.name = name
self._shutdown = shutdown
self.daemon = True
self.debug = False
self.config = config
if config:
debug = config.get('debug', 'off')
if debug == 'on':
self.debug = True
fmt = logging.Formatter(config.get('log_format'))
logfile = config.get('logfile', '/dev/null')
max_log_size = int(config.get('max_log_size', 1))
handler = RotatingFileHandler(logfile,
backupCount=5,
maxBytes=max_log_size * 1000000)
handler.setFormatter(fmt)
log = logging.getLogger(self.name)
if debug == 'on':
log.setLevel(logging.DEBUG)
self.debug = True
else:
log.setLevel(logging.INFO)
log.addHandler(handler)
self.log = log
def wait(self, interval):
for i in list(range(0, interval)):
self.check_shutdown()
time.sleep(1)
def check_shutdown(self):
if self._shutdown.is_set():
self.shutdown()
class AbstractDaemon(object):
def __init__(self,
pidfile,
name,
umask=022,
work_dir='/',
debug='off',
files_preserve=[]):
if debug == 'on':
self.debug = True
else:
self.debug = False
self.umask = umask
self.name = name
self.work_dir = work_dir
self.pidfile = pidfile
files_preserve = files_preserve
def sigterm_handler(self, signum, frame):
sys.exit(0)
def daemonize(self):
try:
os.umask(self.umask)
except Exception, err:
msg = 'Failed to set umask: Err %s: %s\n' \
% (err.errno, err.strerror)
sys.stderr.write(msg)
sys.exit(1)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, err:
msg = 'First fork failed. Err %s: %s\n' \
% (err.errno, err.strerror)
sys.stderr.write(msg)
sys.exit(1)
try:
os.setsid()
except Exception, err:
msg = 'setsid failed. Err %s: %s\n' \
% (err.errno, err.strerror)
sys.stderr.write(msg)
sys.exit(1)
try:
os.chdir(self.work_dir)
except Exception, err:
msg = 'Change dir to %s failed. Err %s: %s\n' \
% (self.work_dir, err.errno, err.strerror)
sys.stderr.write(msg)
sys.exit(1)
try:
pid = os.fork()
if pid > 0:
sys.exit(1)
except OSError, err:
msg = 'Second fork failed. Err %s: %s\n' \
% (err.errno, err.strerror)
sys.stderr.write(msg)
sys.exit(1)
try:
rlim_max = getrlimit(RLIMIT_NOFILE)[1]
except OSError, err:
msg = 'Failed to get RLIMIT_NOFILE. Err %s: %s\n' \
% (err.errno, err.strerror)
sys.stderr.write(msg)
sys.exit(1)
try:
pid = str(os.getpid())
except Exception, err:
msg = 'Failed to get pid of running process. Err %s: %s\n' \
% (err.errno, err.strerror)
sys.stderr.write(msg)
sys.exit(1)
try:
open(self.pidfile, 'w+').write(pid)
except Exception, err:
msg = 'Failed to write pidfile. Err %s: %s\n' \
% (err.errno, err.strerror)
sys.stderr.write(msg)
sys.exit(1)
if rlim_max == RLIM_INFINITY:
rlim_max = 1024
for fd in range(3, rlim_max):
if fd in self.files_preserve:
continue
try:
os.close(fd)
except OSError:
pass
if not self.debug:
sys.stdout.flush()
sys.stderr.flush()
new_stdin = open('/dev/null','r')
new_stdout = open('/dev/null', 'a+')
new_stderr = open('/dev/null', 'a+')
os.dup2(new_stdin.fileno(), sys.stdin.fileno())
os.dup2(new_stdout.fileno(), sys.stdout.fileno())
os.dup2(new_stderr.fileno(), sys.stderr.fileno())
signal.signal(signal.SIGTERM, self.sigterm_handler)
atexit.register(self.pidfile_cleanup)
pid = os.getpid()
file(self.pidfile, 'w+').write("%s\n" % pid)
def run(self):
raise NotImplementedError
def start(self, *args, **kwargs):
print "Starting..."
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
msg = 'pidfile %s exists. %s running?' % (self.pidfile, self.name)
print(msg)
sys.exit(1)
self.daemonize()
self.run()
def get_pidfile_content(self, pidfile):
"""
Returns the contents of given pidfile as string.
"""
try:
pf = file(pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
return pid
def stop(self):
print('Stopping...')
pid = self.get_pid()
if not pid:
msg = ('Pidfile %s does not exists. '
'%s not running?') % (self.pidfile, self.name)
print(msg)
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
# return on restart
return
try:
os.kill(pid, signal.SIGTERM)
if timeout:
timeout = timeout + 1
step = timeout / 5
n = 0
for i in range(0, 5):
if n == 0:
time.sleep(1)
else:
time.sleep(step)
npid = get_pid(self.pidfile)
if not npid:
os.kill(pid, signal.SIGTERM)
except OSError, err:
if "No such process" in str(err):
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
self.stop()
self.start()
def is_running(self):
pid = self.get_pid()
if pid is None:
print('%s is stopped' % self.name)
elif os.path.exists('/proc/%s' % pid):
print('%s with pid %s is running...' % (self.name, pid))
else:
print('Pidfile %s exists but %s process not found' \
% (pid, self.name))
def pidfile_cleanup(self):
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
def get_pid(self):
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
return pid
|
def naiveStringSearch(bigString,smallString):
# for java use string.toCharArray()
if(len(bigString) == 0 or len(smallString) == 0): return 0
longerString = list(bigString)
shorterString = list(smallString)
length = len(shorterString)
frequency = 0
totalMatch = 0
longerWord = 0
shorterWord = 0
while(longerWord < len(longerString)):
if(longerString[longerWord] == shorterString[shorterWord]):
while(shorterString[shorterWord] == longerString[longerWord]):
totalMatch += 1
longerWord += 1
shorterWord += 1
if(longerWord>= len(longerString) or shorterWord >= length): break
if(totalMatch == length): frequency += 1
shorterWord = 0
totalMatch = 0
longerWord += 1
return frequency
# print(naiveStringSearch('omlomgomlomgomlomg','omg'))
|
from pyecoregen.ecore import EcoreGenerator
from pyecore.resources import ResourceSet, URI
from pyecore.resources.resource import HttpURI
from esdl.esdl import *
import esdl
from xmlresource import XMLResource
import datetime
from energy_system_handler import EnergySystemHandler
def attr_to_dict(eobj):
d = dict()
d['eClass'] = eobj.eClass.__name__
for attr in dir(eobj):
attr_value = eobj.eGet(attr)
if attr_value is not None:
d[attr] = eobj.eGet(attr)
return d
def main():
# create a resourceSet that hold the contents of the esdl.ecore model and the instances we use/create
rset = ResourceSet()
# register the metamodel (available in the generated files)
rset.metamodel_registry[esdl.nsURI] = esdl
rset.resource_factory['esdl'] = lambda uri: XMLResource(uri) # we register the factory for '.esdl' extension and XML serialization
# Create a new EnergySystem
es = EnergySystem(name="mpoc")
instance = Instance(name="test instance")
# example of using an Enum
instance.aggrType = AggrTypeEnum.PER_COMMODITY
es.instance.append( instance )
es.instance[0].area = Area(name="Groningen", id="PV20")
# create a new PV parc with 10 panels
pvparc = PVParc(name="PV parc")
pvparc.numberOfPanels = 10
# Use datatime to set dates and times
now = datetime.datetime.now()
pvparc.commissioningDate = now
ed = ElectricityDemand(name="E demand")
es.instance[0].area.asset.append(pvparc)
es.instance[0].area.asset.append(ed)
inPort = InPort(id='InPort1')
ed.port.append(inPort)
outPort = OutPort(id='OutPort1', connectedTo=[inPort])
pvparc.port.append(outPort)
# create a new windturbine
turbine = WindTurbine(id=EnergySystemHandler.generate_uuid(),
name='WindTurbine 4',
power=2E6,
fullLoadHours=2000,
height=150.0,
surfaceArea=100,
prodType=RenewableTypeEnum.from_string('RENEWABLE'),
type=WindTurbineTypeEnum.from_string('WIND_ON_LAND'))
es.instance[0].area.asset.append(turbine)
# create new wind search area
search_area_wind = SearchAreaWind(
id=EnergySystemHandler.generate_uuid(),
name="Search Area Wind",
fullLoadHours=1920,
area=2E8
)
es.instance[0].area.potential.append(search_area_wind)
# create new solar search area
search_area_solar = SearchAreaSolar(
id=EnergySystemHandler.generate_uuid(),
name="Search Area Solar",
fullLoadHours=867,
area=2E8
)
es.instance[0].area.potential.append(search_area_solar)
# create new KPIs object
es.instance[0].area.KPIs = KPIs(description="KPIs")
# Create quantity and unit for CO2-emissions
co2_unit = QuantityAndUnitType(
physicalQuantity="EMISSION",
multiplier="MEGA",
# unit="GRAM",
# perMultiplier="",
# perUnit="",
description="Mton (CO2-emissions)",
# perTimeUnit="",
id="mton",
)
# Create CO2-emissions KPI
kpi_co2 = KPI(
name="KPI CO2-emissions",
value=None,
quantityAndUnit = co2_unit
)
# Create quantity and unit for total costs
costs_unit = QuantityAndUnitType(
physicalQuantity="COST",
multiplier="MEGA",
# unit="GRAM",
# perMultiplier="",
# perUnit="",
description="Mln euros (total costs)",
# perTimeUnit="",
id="meur",
)
# Create costs KPI
kpi_costs = KPI(
name="KPI Total costs",
value=None,
quantityAndUnit = costs_unit
)
# Add CO2-emissions and total costs KPIs to KPIs
es.instance[0].area.KPIs.kpi.append(kpi_co2)
es.instance[0].area.KPIs.kpi.append(kpi_costs)
print("Energy system: {}".format(attr_to_dict(es)))
print("OutPort connectedTo: {}".format(outPort.connectedTo))
print("InPort connectedTo: {}".format(inPort.connectedTo))
resource = rset.create_resource(URI('mpoc.esdl'))
resource.append(es)
resource.save()
if __name__ == '__main__':
# Load the ESDL model from GitHub
rset = ResourceSet()
resource = rset.get_resource(HttpURI('https://raw.githubusercontent.com/EnergyTransition/ESDL/master/esdl/model/esdl.ecore'))
esdl_model = resource.contents[0]
# Generate the classes
generator = EcoreGenerator()
generator.generate(esdl_model, ".")
main()
|
from PerfectAlphabetic import PerfectAlphabetic
from PerfectAlphabetic2 import PerfectAlphabetic2
from Test import Test
if __name__ == '__main__':
# perfect_alphabetic = PerfectAlphabetic("resources/cipher.txt")
# perfect_alphabetic.crack_cipher()
# perfect_alphabetic.test()
test1 = PerfectAlphabetic2("resources/cipher.txt")
test1.crack_cipher()
# test = Test()
# test.test()
|
import random,os
clear = lambda: os.system('cls')
class Maze():
def __init__(self,width = 16, height = 16, start_point = (1,0), show_progress = False):
self.mx = width
self.my = height
self.maze = [[1 for x in range(self.mx)] for y in range(self.my)]
self.solved_maze = None
self.startP = start_point
self.endP = None # end not yet definied
self.pathArr = None
self.solved = False
dx = [0, 1, 0, -1]; dy = [-1, 0, 1, 0] # 4 directions to move in the maze
stack = [self.startP]
while len(stack) > 0:
(cx, cy) = stack[-1]
self.maze[cy][cx] = 0
# find a new cell to add
nlst = [] # list of available neighbors
for i in range(4):
nx = cx + dx[i]
ny = cy + dy[i]
if nx >= 1 and nx < self.mx-1 and ny >= 1 and ny < self.my-1:
if self.maze[ny][nx] == 1:
# of occupied neighbors must be 1
ctr = 0
for j in range(4):
ex = nx + dx[j];
ey = ny + dy[j]
if ex >= 0 and ex < self.mx and ey >= 0 and ey < self.my:
if self.maze[ey][ex] == 0: ctr += 1
if ctr == 1: nlst.append(i)
# if 1 or more neighbors available then randomly select one and move
if len(nlst) > 0:
ir = nlst[random.randint(0, len(nlst) - 1)]
cx += dx[ir]; cy += dy[ir]
stack.append((cx, cy))
else: stack.pop()
if show_progress:
print(self)
clear()
for i in range(len(self.maze[-2])-1,0,-1):
if self.maze[-2][i] == 0:
self.endP = (i,self.my-1) # set exit point
self.maze[-1][i] = 0 # break last wall
break
def start(self):
return self.startP
def end(self):
return self.endP
def solve(self,show_progress = False):
# BEGIN !
Grid = {}
Node.start_n = self.startP
Node.end_n = self.endP
x = self.mx
y = self.my
# CREATE NODES GRID
for yy in range(y):
for xx in range(x):
position = (xx,yy)
Grid[position] = Node(xx,yy)
Grid[position].obstacle = self.maze[yy][xx] == 1
# INIT NEIGHBOURS
for yy in range(y):
for xx in range(x):
node = Grid[(xx,yy)]
for nx,ny in [[1,0],[-1,0],[0,1],[0,-1]]:
try:
node.neighbours += [Grid[(node.x+nx,node.y+ny)]]
except:pass;
start_node = Grid[self.startP]
end_node = Grid[self.endP]
wanderer = pathFinder(start_node,end_node)
#clear()
while wanderer.inProgress:
wanderer.keepGoing()
if show_progress:
clear()
printNodes(Grid,x,y,start_node,end_node,end_node.road)
p = []
for nod in end_node.road:
p += [(nod.x,nod.y)]
self.pathArr = list(reversed(p))
self.solved_maze = self.maze.copy()
for (xx,yy) in self.pathArr:
self.solved_maze[yy][xx] = 2
self.solved = True
def __str__(self):
s = ""
m = self.maze
if self.solved:
m = self.solved_maze
for y,row in enumerate(self.maze):
for x,node in enumerate(row):
if (x,y) == self.startP:
s += "S"
elif (x,y) == self.endP:
s += "E"
else:
s += [" ","█","."][node]
s += "\n"
return s
def mazeArray(self):
return self.maze
def path(self):
return self.pathArr
def solvedMazeArray(self):
return self.solved_maze
class Node:
start_n,end_n = (-1,-1),(-1,-1)
def __init__(self,xx,yy):
self.x = xx
self.y = yy
self.visited = False
self.obstacle = False
self.road = [self]
self.neighbours = []
def __str__(self):
if (self.x,self.y) == self.start_n:
return "S"
if (self.x,self.y) == self.end_n:
return "E"
return ['░',' ','▒','█'][self.visited + 2*self.obstacle]
class pathFinder():
def __init__(self,start,goal):
self.path_table = [start]
self.goal = goal
self.inProgress = True
self.maxSize = -1
self.allIterations = 1
def keepGoing(self):
if not self.goal.visited:
new_path_table = []
for node in self.path_table:
node.visited = True
if not node.obstacle:
for nb in node.neighbours:
if not nb.visited:
new_path_table.extend([nb])
nb.road.extend(node.road)
self.path_table = new_path_table
size = len(self.path_table)
else:
self.inProgress = False
def printNodes(grid,gx,gy,s,e,nodes=[]):
for yy in range(gy):
for xx in range(gx):
node = grid[(xx,yy)]
if nodes:
if node in nodes and not node in [s,e]:
print(".",end="")
else:
print(node,end="")
else:
print(node,end="")
print()
|
import numpy as np
import neworder as no
import pandas as pd # type: ignore
import pytest
def test_invalid() -> None:
with pytest.raises(AssertionError):
no.Space(np.array([]), np.array([]))
with pytest.raises(AssertionError):
no.Space(np.array([0.0]), np.array([0.0]))
with pytest.raises(AssertionError):
no.Space(np.array([0.0, 1.0]), np.array([1.0, -1.0]))
def test_space2d() -> None:
# constrained edges
space2dc = no.Space(np.array([-1.0, -3.0]), np.array([2.0, 5.0]), no.Edge.CONSTRAIN)
point = np.zeros(2)
delta = np.array([0.6, 0.7])
# move point until stuck in corner
for _ in range(100):
point, delta = space2dc.move(point, delta, 1.0)
# check its in corner and not moving
assert point[0] == 2.0
assert point[1] == 5.0
assert delta[0] == 0.0
assert delta[1] == 0.0
# wrapped edges
space2dw = no.Space(np.array([-1.0, -3.0]), np.array([2.0, 5.0]), no.Edge.WRAP)
assert space2dw.dim == 2
points = np.array([[0.,0.],[1.,0.],[0.,1.]])
delta = np.array([0.6, 0.7])
# move point
for _ in range(100):
points, delta = space2dw.move(points, delta, 1.0)
# check distances dont change
d2, _ = space2dw.dists2(points)
assert np.all(d2.diagonal() == 0.0)
assert np.allclose(d2[0], np.array([0., 1., 1.]))
assert np.allclose(d2[1], np.array([1., 0., 2.]))
# check its still in domain and speed unchanged
assert np.all(points[:,0] >= -1.0) and np.all(points[:, 0] < 2.0)
assert np.all(points[:,1] >= -3.0) and np.all(points[:, 1] < 5.0)
assert delta[0] == 0.6
assert delta[1] == 0.7
# bounce edges
space2db = no.Space(np.array([-1.0, -3.0]), np.array([2.0, 5.0]), no.Edge.BOUNCE)
assert space2db.dim == 2
points = np.array([[0.,0.],[1.,0.],[0.,1.]])
deltas = np.array([[0.6, 0.7],[0.6, 0.7],[0.6, 0.7]])
# move points
for _ in range(100):
points, deltas = space2dw.move(points, deltas, 1.0)
# check points still in domain and absolute speed unchanged
assert np.all(points[:,0] >= -1.0) and np.all(points[:, 0] < 2.0)
assert np.all(points[:,1] >= -3.0) and np.all(points[:, 1] < 5.0)
assert np.all(np.abs(deltas[:,0]) == 0.6)
assert np.all(np.abs(deltas[:,1]) == 0.7)
def test_space3d() -> None:
rng = np.random.default_rng(19937)
N = 5
bodies = pd.DataFrame(index=no.df.unique_index(N), data={
"x": rng.random(N) - 0.5,
"y": rng.random(N) - 0.5,
"z": rng.random(N) - 0.5,
"vx": 0.01,
"vy": 0.01,
"vz": 0.01
})
space = no.Space.unbounded(3)
s = np.column_stack((bodies.x, bodies.y, bodies.z))
assert np.all(space.dists(s).diagonal() == 0.0)
assert space.dim == 3
dt = 1.0
(bodies.x, bodies.y, bodies.z), (bodies.vx, bodies.vy, bodies.vz) = space.move((bodies.x, bodies.y, bodies.z), (bodies.vx, bodies.vy, bodies.vz), dt, ungroup=True)
def test_grid() -> None:
with pytest.raises(ValueError):
no.StateGrid(np.empty(shape=(3,3)), no.Edge.UNBOUNDED)
with pytest.raises(ValueError):
no.StateGrid(np.empty(shape=()))
with pytest.raises(ValueError):
no.StateGrid(np.empty(shape=(2, 0)))
state = np.zeros((5,5))
state[0, 0] = 1
state[1, 1] = 2
state[1, -1] = 3
# total neighbours should be 3 in corner, 5 on edge, 8 in middle
g = no.StateGrid(state, no.Edge.CONSTRAIN)
assert np.sum(g.count_neighbours()) == 3
assert np.sum(g.count_neighbours(lambda x: x == 2)) == 8
assert np.sum(g.count_neighbours(lambda x: x == 3)) == 5
assert np.sum(g.count_neighbours(lambda x: x != 0)) == 16
assert g.shift((0, 0), (-1, -1)) == (0, 0)
state = np.zeros((4,4,4))
state[0,0,0] = 1
state[-1,1,-1] = -1
# total neighbours should be 26
g = no.StateGrid(state, no.Edge.WRAP)
assert np.sum(g.count_neighbours()) == 26
assert np.sum(g.count_neighbours(lambda x: x == -1)) == 26
assert np.sum(g.count_neighbours(lambda x: x != 0)) == 52
assert g.shift((0, 0, 0), (-1, -1, -1)) == (3, 3, 3)
g = no.StateGrid(state, no.Edge.BOUNCE)
assert g.shift((0, 0, 0), (-1, -1, -1)) == (1, 1, 1)
|
#I pledge my honor that I have abided by the Stevens Honor System
def sumValues(values):
total=0
for i in values:
total+=int(i)
return total
def main():
values=input("Enter a list of values separated by a comma:")
new_values=values.split(',')
print(sumValues(new_values))
main()
|
"""Core config for admin."""
from django.apps import AppConfig
from django.db.models import signals
from django.utils.translation import gettext_lazy
def load_core_settings():
"""Load core settings.
This function must be manually called (see :file:`urls.py`) in
order to load base settings.
"""
from modoboa.parameters import tools as param_tools
from . import app_settings
from .api.v2 import serializers
param_tools.registry.add(
"global", app_settings.GeneralParametersForm, gettext_lazy("General"))
param_tools.registry.add2(
"global", "core", gettext_lazy("General"),
app_settings.GLOBAL_PARAMETERS_STRUCT,
serializers.CoreGlobalParametersSerializer)
class CoreConfig(AppConfig):
"""App configuration."""
name = "modoboa.core"
verbose_name = "Modoboa core"
def ready(self):
load_core_settings()
# Import these to force registration of checks and signals
from . import checks # NOQA:F401
from . import handlers
signals.post_migrate.connect(handlers.create_local_config, sender=self)
|
import sae
sae.add_vendor_dir('site-packages')
from view import app
application = sae.create_wsgi_app(app)
|
import re
import requests
from utils.FileUtils import FileUtils
import matplotlib.pyplot as plt
from utils.AnalysisTool import AnalysisTool
class SianaUtils(FileUtils, AnalysisTool):
def __init__(self):
super().__init__()
self.config = self.load_yaml('od_config.yaml')['realTime']['sina']
def getRealTimeData(self, stockCode):
r = requests.get(self.config['RealTimeUrl'] + stockCode)
if r.status_code == 200:
res = re.sub(r'"', '', r.text).split('=')[1]
res = dict(zip(self.config['colName'], res.split(',')))
return res
def getRealTimeKData(self, stockCode, scale='5', ma='no', dataLen='1023'):
with requests.Session() as s:
print(self.config['RealTimeKData'].format(stockCode, scale, ma, dataLen))
r = s.get(self.config['RealTimeKData'].format(stockCode, scale, ma, dataLen))
if r.status_code == 200:
res = eval(r.text.split('=')[2][1:-2])
return res
def testCase(self):
k_data = self.getRealTimeKData('sh601211')
ts_data, _ = self.getClosePrice(k_data)
ts_data = ts_data[-500:]
ps, ds = self.findPeakAndDip(ts_data, False)
ma_20 = self.movingAverage(ts_data, 20)
self.plotData(ts_data, ps, ds, [ma_20])
if __name__ == '__main__':
su = SianaUtils()
su.testCase()
|
#
# Selects Estnltk JSON sentences corresponding to manual evaluations.
# Basically: merges outputs of scripts pick_randomly_from_errs.py
# (applied on a log file) and detect_clause_errors.py (extracted
# erroneous sentences).
#
# Partly based on:
# https://github.com/estnltk/eval_experiments_lrec_2020
#
import re
import argparse
import os, os.path
import warnings
from math import floor
from collections import defaultdict
from random import randint, seed
from estnltk import logger
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
"Selects Estnltk JSON sentences corresponding to manual evaluations.")
parser.add_argument('input_eval_file', type=str, \
help="File containing manually evaluated sentences. "+\
"Assumingly file that was initially created by "+\
"script pick_randomly_from_errs.py. ")
parser.add_argument('input_json_file', type=str, \
help="File containing all extracted erroneous sentences, which "+\
"should also include sentences used for manual evaluation in "+\
"estnltk JSON format.")
parser.add_argument('-j', '--judgements', dest='add_judgements', default=False, action='store_true', \
help="If set, attempts to pick manual judgements from the file of manual "+
"evaluations, and add to the output as document metadata. "+
"(default: False)" )
parser.add_argument('-d', '--debug', dest='debug_output', default=False, action='store_true', \
help="If set, then prints additional debug output. "+
"(default: False)" )
args = parser.parse_args()
errors_file = args.input_eval_file
json_file = args.input_json_file
add_judgements = args.add_judgements
debug_output = args.debug_output
assert os.path.exists( errors_file ), '(!) Input file {} not found!'.format( errors_file )
assert os.path.exists( json_file ), '(!) Input file {} not found!'.format( json_file )
logger.setLevel( 'INFO' )
if debug_output:
logger.setLevel( 'DEBUG' )
log = logger
# =====================================================================
# Collect sentences from the file of manual evaluations
# =====================================================================
# Collect all errs gaps, e.g.
# ==================================================
pattern_separator = re.compile('^\s*={30,}\s*$')
# ==================================================
# in text with id 700909 (delfi9.xml)
pattern_text_id = re.compile('^\s*in text with id\s+(\d+)\s+(\S+)\s*$')
# ==================================================
# attributive_embedded_clause_wrong_end::150 (?)
pattern_err_index_1 = re.compile('^\s*([^:]+)::(\d+)\s+(\S+)\s*$') # with judgement
pattern_err_index_2 = re.compile('^\s*([^:]+)::(\d+)\s*$') # without judgement
# ==================================================
# ei 7 8 aux
# panusta 8 1 acl:relcl
# , 15 8 punct <--- NEW CLAUSE END / EMBEDDING
# need 16 17 nsubj
pattern_inside_sent = re.compile('^(\S+)\s(\d+)\s(\d+)\s(\S+).*$')
log.info('Collecting manually evaluated errors ...')
all_collected_err_sentences = []
all_collected_judgements = []
all_empty_judgements = []
with open(errors_file, 'r', encoding='utf-8') as in_f:
last_was_err_indx = False
last_was_sep = False
collect_now = False
last_text_id = None
last_fname = None
last_judgement = None
collected = []
for line in in_f:
line = line.strip()
sep_indx_match = pattern_separator.match( line )
text_id_match = pattern_text_id.match( line )
if text_id_match:
last_text_id = text_id_match.group(1)
last_fname = text_id_match.group(2)
err_index_match_1 = pattern_err_index_1.match( line )
if err_index_match_1 and len(collected)==0:
collect_now = True
last_judgement = err_index_match_1.group(3)
continue
err_index_match_2 = pattern_err_index_2.match( line )
if err_index_match_2 and len(collected)==0:
collect_now = True
last_judgement = None
continue
if collect_now:
inside_sent_match = pattern_inside_sent.match( line )
if inside_sent_match:
word = inside_sent_match.group(1)
collected.append(word)
elif len(line) == 0:
if len(collected) > 0 and collected[-1] != '|':
collected.append('|')
if len(collected) > 0:
# check stopping criteria
if text_id_match is not None or sep_indx_match is not None:
# empty collected buffer
all_collected_err_sentences.append( collected )
all_collected_judgements.append(last_judgement)
if last_judgement is None:
all_empty_judgements.append(last_judgement)
log.debug(' '.join(collected))
collected = []
collect_now = False
last_judgement = None
last_line = line
last_was_err_indx = err_index_match_1 is not None or \
err_index_match_2 is not None
last_was_sep = sep_indx_match is not None
if len(collected) > 0:
# empty collected buffer
all_collected_err_sentences.append( collected )
all_collected_judgements.append(last_judgement)
if last_judgement is None:
all_empty_judgements.append(last_judgement)
log.debug(' '.join(collected))
non_empty_judgements = len(all_collected_judgements) - len(all_empty_judgements)
if non_empty_judgements > 0:
log.info(f'Collected {len(all_collected_err_sentences)} manually evaluated sentences and {len(all_collected_judgements)} manual judgements. {len(all_empty_judgements)}')
else:
log.info(f'Collected {len(all_collected_err_sentences)} manually evaluated sentences.')
if add_judgements:
assert len(all_collected_err_sentences) == len(all_collected_judgements)
assert len(all_collected_judgements) == non_empty_judgements, \
'(!) Cannot add judgements because {} of manual evaluation judgements are missing.'.format(len(all_empty_judgements))
# =====================================================================
# Collect all json sentences
# =====================================================================
assert json_file.endswith('.jsonl')
from estnltk.converters import json_to_text
from estnltk.converters import text_to_json
log.info('Collecting annotated sentences ...')
all_text_objs = []
with open(json_file, 'r', encoding='utf-8') as in_f:
for line in in_f:
line = line.strip()
text_obj = json_to_text( line )
# find whether given Text obj is inside
all_text_objs.append( text_obj )
if len(all_text_objs) == 0:
log.error('(!) No sentences found from the given file. Invalid file, perhaps?')
exit(1)
log.info(f'Collected {len(all_text_objs)} sentences from jsonl file.')
# =====================================================================
# Find corresponding json sentences
# =====================================================================
collected = []
found_text_objects = dict()
for text_obj in all_text_objs:
text = text_obj.text
words_layer = [layer for layer in text_obj.layers if 'words' in layer]
assert len(words_layer) > 0, f'No words layer detected in {text_obj.layers}'
words_layer = words_layer[0]
text_words_set = set(list(text_obj[words_layer].text))
text_words_set_len = len(text_words_set)
candidates = []
for esid, err_sent_words in enumerate(all_collected_err_sentences):
err_words_set = set(err_sent_words)
err_words_set_len = len(err_words_set)
common = text_words_set.intersection(err_words_set)
if len(common) > 0 and len(common)+2 >= min(text_words_set_len, err_words_set_len):
candidates.append( (esid, len(common)) )
if candidates:
candidates = sorted(candidates, key=lambda x:x[1], reverse=True)
first_key = candidates[0][0]
found_text_objects[ first_key ] = text_obj
log.debug( '' )
first_len = len(set(all_collected_err_sentences[first_key]))
second_len = len(set(text_obj[words_layer].text))
log.debug( f'{all_collected_err_sentences[first_key]!r} --> {text_obj.text!r} || {first_len} {second_len} || {candidates!r}' )
log.debug( '' )
# Check for missing matches
missing = 0
for esid, err_sentence in enumerate( all_collected_err_sentences ):
if esid not in found_text_objects.keys():
log.error(f'(!) No JSON match found for sentence: {err_sentence!r}')
missing += 1
if missing > 0:
log.error(f'(!) No JSON match found for {missing} sentences.')
# =====================================================================
# Write out results
# =====================================================================
in_f_head, in_f_tail = os.path.split(errors_file)
in_f_root, in_f_ext = os.path.splitext(in_f_tail)
assert in_f_ext == '' or in_f_ext.startswith('.')
assert in_f_ext != '.jsonl'
in_f_ext = '.jsonl'
judgements_suffix = '_with_judgements' if add_judgements else ''
out_fname = \
os.path.join(in_f_head, f'{in_f_root}{judgements_suffix}{in_f_ext}')
log.info('Saving into {} ...'.format(out_fname) )
with open(out_fname, 'w', encoding='utf-8') as out_f:
for esid, err_sentence in enumerate(all_collected_err_sentences):
if esid in found_text_objects.keys():
text_obj = found_text_objects[esid]
if add_judgements:
text_obj.meta['_manual_evaluation'] = \
all_collected_judgements[esid]
out_f.write( text_to_json(text_obj) )
if esid + 1 < len(all_collected_err_sentences):
out_f.write('\n')
log.info( 'Done.')
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import networkx as nx
import os
import logging
import xlwt
from src.mp_common import *
class mp_analyse:
def __init__(self, config = None):
# initialize logger
self.logger = logging.getLogger('metapath')
if config == None:
return
self.cfg = config
def run(self, model):
# check object class
if not model.__class__.__name__ == 'mp_model':
self.logger.error("could not run analysis: 'model' has to be mp_model instance!")
return False
for output in self.cfg['output']:
parts = output.split(':')
if parts[0] == 'report':
self.logger.info(" * create report")
self.report(model)
elif parts[0] == 'plot':
if len(parts) < 2:
continue
self.logger.info(" * plot %s" % (parts[1]))
self.plot(model, plot = parts[1])
return True
#
# create plots
#
# plot wrapper
def plot(self, model, plot = '', file = None):
# check object class
if not model.__class__.__name__ == 'mp_model':
self.logger.error("could not plot model: 'model' has to be mp_model instance!")
return False
if plot == 'graph':
return self.plot_graph(model, file = file)
if plot == 'network':
return self.plot_network(model, file = file)
if plot == 'knockout':
return self.plot_knockout(model, file = file)
if plot == 'correlation':
return self.plot_correlation(model, file = file)
return False
# plot model graph
def plot_graph(self, model, output = 'file', file = None):
# check object class
if not model.__class__.__name__ == 'mp_model':
self.logger.error("could not plot model: 'model' has to be mp_model instance!")
return False
# set default settings
settings = {
'dpi': 300,
'file_ext': 'png',
'edge_threshold': 0.0,
'edge_weight': 'link_energy',
'node_caption': 'rel_approx',
'edge_zoom': 1}
# set configured settings
for key, value in self.cfg['plot']['model']['graph'].items():
if key in settings:
settings[key] = value
# configure output
if output == 'file':
# get file name
if not file:
file = self.cfg['path'] + 'model-' + str(model.id) + \
'/graph-%s-%s.%s' % (settings['edge_weight'], settings['node_caption'], settings['file_ext'])
# get empty file
file = get_empty_file(file)
# get title
title = \
r'$\mathbf{Network:}\,\mathrm{%s}' % (model.network.cfg['name']) \
+ r',\,\mathbf{Dataset:}\,\mathrm{%s}$' % (model.dataset.cfg['name'])
# get node captions
model_caption, node_caption = model.get_approx(type = settings['node_caption'])
# get edge weights
edge_weight = model.get_weights(type = settings['edge_weight'])
# get model caption
caption = \
r'$\mathbf{Approximation:}\,\mathrm{%.1f' % (100 * model_caption) + '\%}$'
# labels
lbl = {}
lbl['e'] = model.network.node_labels(type = 'e')
lbl['tf'] = model.network.node_labels(type = 'tf')
lbl['s'] = model.network.node_labels(type = 's')
# graph
G = model.network.graph
# calculate sizes
zoom = 1
scale = min(250.0 / max(len(lbl['e']), len(lbl['tf']), len(lbl['s'])), 30.0)
graph_node_size = scale ** 2
graph_font_size = 0.4 * scale
graph_caption_factor = 0.5 + 0.003 * scale
graph_line_width = 0.5
# calculate node positions for 'stack layout'
pos = {}
pos_caption = {}
for node, attr in G.nodes(data = True):
i = 1.0 / len(lbl[attr['params']['type']])
x_node = (attr['params']['type_node_id'] + 0.5) * i
y_node = attr['params']['type_id'] * 0.5
y_caption = (attr['params']['type_id'] - 1) * graph_caption_factor + 0.5
pos[node] = (x_node, y_node)
pos_caption[node] = (x_node, y_caption)
# create figure object
fig = plt.figure()
# draw labeled nodes
for node, attr in G.nodes(data = True):
type = attr['params']['type']
label = attr['label']
weight_sum = 0
for (n1, n2, edge_attr) in G.edges(nbunch = [node], data = True):
weight_sum += np.abs(edge_weight[(n1, n2)])
weight_sum = min(0.01 + 0.3 * weight_sum, 1)
c = 1 - weight_sum
color = {
's': (1, c, c, 1),
'tf': (c, 1, c, 1),
'e': (1, 1, c, 1)
}[type]
# draw node
nx.draw_networkx_nodes(
G, pos,
node_size = graph_node_size,
linewidths = graph_line_width,
nodelist = [node],
node_shape = 'o',
node_color = color)
# draw node label
node_font_size = \
1.5 * graph_font_size / np.sqrt(max(len(node) - 1, 1))
nx.draw_networkx_labels(
G, pos,
font_size = node_font_size,
labels = {node: label},
font_weight = 'normal')
# draw node caption
if not type == 'tf':
nx.draw_networkx_labels(
G, pos_caption,
font_size = 0.65 * graph_font_size,
labels = {node: ' $' + '%d' % (100 * node_caption[node]) + '\%$'},
font_weight = 'normal')
# draw labeled edges
for (v, h) in G.edges():
if edge_weight[(v, h)] < 0:
color = 'red'
else:
color = 'green'
if np.abs(edge_weight[(v, h)]) > settings['edge_threshold']:
nx.draw_networkx_edges(
G, pos,
width = np.abs(edge_weight[(v, h)]) * graph_line_width * settings['edge_zoom'],
edgelist = [(v, h)],
edge_color = color,
alpha = 1)
size = graph_font_size / 1.5
label = ' $' + ('%.2g' % (np.abs(edge_weight[(v, h)]))) + '$'
nx.draw_networkx_edge_labels(
G, pos,
edge_labels = {(v, h): label},
font_color = color,
clip_on = False,
font_size = size, font_weight = 'normal')
# draw title
plt.figtext(.5, .92, title, fontsize = 10, ha = 'center')
# draw caption
if caption == None:
if edges == 'weights':
label_text = r'$\mathbf{Network:}\,\mathrm{%s}$' % (self.graph)
elif edges == 'adjacency':
label_text = r'$\mathbf{Network:}\,\mathrm{%s}$' % (self.graph)
plt.figtext(.5, .06, label_text, fontsize = 10, ha = 'center')
else:
plt.figtext(.5, .06, caption, fontsize = 10, ha = 'center')
plt.axis('off')
# output
if output == 'file':
plt.savefig(file, dpi = settings['dpi'])
elif output == 'screen':
plt.show()
# clear current figure object and release memory
plt.clf()
plt.close(fig)
# plot model knockout test as heatmap
def plot_knockout(self, model, output = 'file', file = None):
# check object class
if not model.__class__.__name__ == 'mp_model':
self.logger.error("could not plot model knockout test: 'model' has to be mp_model instance!")
return False
# set default settings
settings = {
'dpi': 300,
'file_ext': 'png',
'interpolation': 'nearest'}
# set configured settings
for key, value in self.cfg['plot']['model']['knockout'].items():
if key in settings:
settings[key] = value
# configure output
if output == 'file':
# get file name
if not file:
file = self.cfg['path'] + 'model-' + str(model.id) + '/knockout.' + settings['file_ext']
# get empty file
file = get_empty_file(file)
# calculate knockout matrix
data = model.get_knockout_matrix()
title = 'Simulated Gene Knockout Test'
label = model.dataset.cfg['label']
# create figure object
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid(True)
num = len(label)
cax = ax.imshow(data,
cmap = matplotlib.cm.hot_r,
interpolation = settings['interpolation'],
extent = (0, num, 0, num))
# set ticks and labels
plt.xticks(
np.arange(num) + 0.5,
tuple(label),
fontsize = 9,
rotation = 70)
plt.yticks(
num - np.arange(num) - 0.5,
tuple(label),
fontsize = 9)
# add colorbar
cbar = fig.colorbar(cax)
for tick in cbar.ax.get_yticklabels():
tick.set_fontsize(9)
# draw title
plt.title(title, fontsize = 11)
# output
if output == 'file':
plt.savefig(file, dpi = settings['dpi'])
elif output == 'screen':
plt.show()
# clear current figure object and release memory
plt.clf()
plt.close(fig)
# plot network as graph
def plot_network(self, model, output = 'file', file = None):
# check object class
if not model.__class__.__name__ == 'mp_model':
self.logger.error("could not plot model knockout test: 'model' has to be mp_model instance!")
return False
# get network
network = model.network
# set default settings
settings = {
'dpi': 300,
'file_ext': 'png'}
# set configured settings
for key, value in self.cfg['plot']['network']['graph'].items():
if key in settings:
settings[key] = value
# configure output
if output == 'file':
# get file name
if not file:
file = '%smodel-%s/network-%s.%s' % (self.cfg['path'], model.id, network.cfg['id'], settings['file_ext'])
# get empty file
file = get_empty_file(file)
# plot parameters
dpi = 300
title = r'$\mathbf{Network:}\,\mathrm{%s}$' % (network.cfg['name'])
# labels
lbl = {}
lbl['e'] = network.node_labels(type = 'e')
lbl['tf'] = network.node_labels(type = 'tf')
lbl['s'] = network.node_labels(type = 's')
# graph
G = network.graph
# calculate sizes
zoom = 1
scale = min(250.0 / max(len(lbl['e']), len(lbl['tf']), len(lbl['s'])), 30.0)
graph_node_size = scale ** 2
graph_font_size = 0.4 * scale
graph_caption_factor = 0.5 + 0.003 * scale
graph_line_width = 0.5
# calculate node positions for 'stack layout'
pos = {}
pos_caption = {}
for node, attr in G.nodes(data = True):
i = 1.0 / len(lbl[attr['params']['type']])
x_node = (attr['params']['type_node_id'] + 0.5) * i
y_node = attr['params']['type_id'] * 0.5
y_caption = (attr['params']['type_id'] - 1) * graph_caption_factor + 0.5
pos[node] = (x_node, y_node)
pos_caption[node] = (x_node, y_caption)
# create figure object
fig = plt.figure()
# draw labeled nodes
for node, attr in G.nodes(data = True):
type = attr['params']['type']
label = attr['label']
color = {
's': (1, 0, 0, 1),
'tf': (0, 1, 0, 1),
'e': (1, 1, 0, 1)
}[type]
# draw node
nx.draw_networkx_nodes(
G, pos,
node_size = graph_node_size,
linewidths = graph_line_width,
nodelist = [node],
node_shape = 'o',
node_color = color)
# draw node label
node_font_size = \
1.5 * graph_font_size / np.sqrt(max(len(node) - 1, 1))
nx.draw_networkx_labels(
G, pos,
font_size = node_font_size,
labels = {node: label},
font_weight = 'normal')
# draw unlabeled edges
for (v, h) in G.edges():
nx.draw_networkx_edges(
G, pos,
width = graph_line_width,
edgelist = [(v, h)],
edge_color = 'black',
alpha = 1)
# draw title
plt.figtext(.5, .92, title, fontsize = 10, ha = 'center')
plt.axis('off')
# output
if output == 'file':
plt.savefig(file, dpi = settings['dpi'])
elif output == 'screen':
plt.show()
# clear current figure object and release memory
plt.clf()
plt.close(fig)
# plot dataset correlation as heatmap
def plot_correlation(self, model, output = 'file', file = None):
# check model object class
if not model.__class__.__name__ == 'mp_model':
self.logger.error("could not plot dataset correlation: 'model' has to be mp_model instance!")
return False
# get dataset
dataset = model.dataset
# set default settings
settings = {
'dpi': 300,
'file_ext': 'png',
'interpolation': 'nearest'}
# set configured settings
for key, value in self.cfg['plot']['dataset']['correlation'].items():
if key in settings:
settings[key] = value
# configure output
if output == 'file':
# get file name
if not file:
file = self.cfg['path'] + 'model-%s/dataset-%s-correlation.%s' % (model.id, dataset.cfg['id'], settings['file_ext'])
# get empty file
file = get_empty_file(file)
# create correlation matrix
data = np.corrcoef(dataset.data.T)
title = dataset.cfg['name']
# create figure object
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid(True)
num = len(dataset.cfg['label'])
cax = ax.imshow(data,
cmap = matplotlib.cm.hot_r,
interpolation = settings['interpolation'],
extent = (0, num, 0, num))
# set ticks and labels
plt.xticks(
np.arange(num) + 0.5,
tuple(dataset.cfg['label']),
fontsize = 9,
rotation = 70)
plt.yticks(
num - np.arange(num) - 0.5,
tuple(dataset.cfg['label']),
fontsize = 9)
# add colorbar
cbar = fig.colorbar(cax)
for tick in cbar.ax.get_yticklabels():
tick.set_fontsize(9)
# draw title
plt.title(title, fontsize = 11)
# output
if output == 'file':
plt.savefig(file, dpi = settings['dpi'])
elif output == 'screen':
plt.show()
# clear current figure object and release memory
plt.clf()
plt.close(fig)
#
# create tables
#
def report(self, model, file = None):
# check object class
if not model.__class__.__name__ == 'mp_model':
self.logger.error("could not create table: 'model' has to be mp_model instance!")
return False
# set default settings
settings = {
'columns': ['knockout_approx']}
# get file name
if not file:
file = self.cfg['path'] + 'model-%s/report.xls' % (model.id)
# get empty file
file = get_empty_file(file)
# start document
book = xlwt.Workbook(encoding="utf-8")
# define common styles
style_sheet_head = xlwt.Style.easyxf(
'font: height 300;')
style_section_head = xlwt.Style.easyxf('font: bold True;')
style_head = xlwt.Style.easyxf(
'pattern: pattern solid, fore_colour gray_ega;'
'borders: bottom thin;'
'font: colour white;')
style_str = xlwt.Style.easyxf('', '')
style_num = xlwt.Style.easyxf('alignment: horizontal left;', '#,###0.000')
style_num_1 = xlwt.Style.easyxf('alignment: horizontal left;', '#,###0.0')
style_num_2 = xlwt.Style.easyxf('alignment: horizontal left;', '#,###0.00')
style_num_3 = xlwt.Style.easyxf('alignment: horizontal left;', '#,###0.000')
style_border_left = xlwt.Style.easyxf('borders: left thin;')
style_border_bottom = xlwt.Style.easyxf('borders: bottom thin;')
sheet = {}
#
# EXCEL SHEET 'UNITS'
#
sheet['units'] = book.add_sheet("Units")
row = 0
# write sheet headline
sheet['units'].row(row).height = 390
sheet['units'].row(row).write(0, 'Units', style_sheet_head)
row +=2
# write section headline
sheet['units'].row(row).write(0, 'Unit', style_section_head)
sheet['units'].row(row).write(3, 'Data', style_section_head)
sheet['units'].row(row).write(5, 'Parameter', style_section_head)
sheet['units'].row(row).write(7, 'Effect', style_section_head)
row += 1
# write column headline
columns = [ {
'label': 'id', 'col': 0,
'info': 'node_id', 'type': 'string', 'style': style_str
}, {
'label': 'class', 'col': 1,
'info': 'node_type', 'type': 'string', 'style': style_str
}, {
'label': 'label', 'col': 2,
'info': 'node_label', 'type': 'string', 'style': style_str
}, {
'label': 'mean', 'col': 3,
'info': 'data_mean', 'type': 'number', 'style': style_num
}, {
'label': 'sdev', 'col': 4,
'info': 'data_sdev', 'type': 'number', 'style': style_num
}, {
'label': 'bias', 'col': 5,
'info': 'model_bias', 'type': 'number', 'style': style_num
}, {
'label': 'sdev', 'col': 6,
'info': 'model_sdev', 'type': 'number', 'style': style_num
}, {
'label': 'rel approx [%]', 'col': 7,
'info': 'model_rel_approx', 'type': 'number', 'style': style_num_1
}, {
'label': 'abs approx [%]', 'col': 8,
'info': 'model_abs_approx', 'type': 'number', 'style': style_num_1
}]
if 'knockout_approx' in settings['columns']:
columns.append({
'label': 'knockout', 'col': 9,
'info': 'model_knockout_approx', 'type': 'number', 'style': style_num_1
})
for cell in columns:
sheet['units'].row(row).write(cell['col'], cell['label'], style_head)
row += 1
# write unit information
nodes = model.network.nodes()
# get simulation info
model_rel_approx, node_rel_approx = model.get_approx(type = 'rel_approx')
model_abs_approx, node_abs_approx = model.get_approx(type = 'abs_approx')
if 'knockout_approx' in settings['columns']:
node_knockout_approx = model.get_knockout_approx()
for node in nodes:
# create dict with info
info = {}
# get node information
network_info = model.network.node(node)
info['node_id'] = node
info['node_type'] = network_info['params']['type']
info['node_label'] = network_info['label']
# get data and model information
machine_info = model.machine.unit(node)
if machine_info['type'] == 'visible':
info['data_mean'] = machine_info['data']['mean']
info['data_sdev'] = machine_info['data']['sdev']
info['model_bias'] = machine_info['params']['bias']
info['model_sdev'] = machine_info['params']['sdev']
info['model_rel_approx'] = node_rel_approx[node] * 100
info['model_abs_approx'] = node_abs_approx[node] * 100
else:
info['model_bias'] = machine_info['params']['bias']
if 'knockout_approx' in settings['columns']:
info['model_knockout_approx'] = node_knockout_approx[node] * 100
# write cell content
for cell in columns:
if not cell['info'] in info:
continue
if cell['type'] == 'string':
sheet['units'].row(row).write(
cell['col'], info[cell['info']], cell['style'])
elif cell['type'] == 'number':
sheet['units'].row(row).set_cell_number(
cell['col'], info[cell['info']], cell['style'])
row += 1
#
# EXCEL SHEET 'LINKS'
#
sheet['links'] = book.add_sheet("Links")
row = 0
# write sheet headline
sheet['links'].row(row).height = 390
sheet['links'].row(row).write(0, 'Links', style_sheet_head)
row +=2
# write section headline
sheet['links'].row(row).write(0, 'Source', style_section_head)
sheet['links'].row(row).write(3, 'Target', style_section_head)
sheet['links'].row(row).write(6, 'Parameter', style_section_head)
sheet['links'].row(row).write(7, 'Effect', style_section_head)
row += 1
# write column headline
columns = [ {
'label': 'id', 'col': 0,
'info': 'src_node_id', 'type': 'string', 'style': style_str
}, {
'label': 'class', 'col': 1,
'info': 'src_node_type', 'type': 'string', 'style': style_str
}, {
'label': 'label', 'col': 2,
'info': 'src_node_label', 'type': 'string', 'style': style_str
},{
'label': 'id', 'col': 3,
'info': 'tgt_node_id', 'type': 'string', 'style': style_str
}, {
'label': 'class', 'col': 4,
'info': 'tgt_node_type', 'type': 'string', 'style': style_str
}, {
'label': 'label', 'col': 5,
'info': 'tgt_node_label', 'type': 'string', 'style': style_str
}, {
'label': 'weight', 'col': 6,
'info': 'weight', 'type': 'number', 'style': style_num
}, {
'label': 'energy', 'col': 7,
'info': 'energy', 'type': 'number', 'style': style_num
} ]
for cell in columns:
sheet['links'].row(row).write(cell['col'], cell['label'], style_head)
row += 1
# write link information
edges = model.network.edges()
# link knockout simulation
edge_energy = model.get_weights(type = 'link_energy')
for (src_node, tgt_node) in edges:
# create dict with info
info = {}
# get source node information
network_info_src = model.network.node(src_node)
info['src_node_id'] = src_node
info['src_node_type'] = network_info_src['params']['type']
info['src_node_label'] = network_info_src['label']
# get target node information
network_info_tgt = model.network.node(tgt_node)
info['tgt_node_id'] = tgt_node
info['tgt_node_type'] = network_info_tgt['params']['type']
info['tgt_node_label'] = network_info_tgt['label']
# simulation
info['energy'] = edge_energy[(src_node, tgt_node)]
# get data and model information
machine_info = model.machine.link((src_node, tgt_node))
if not machine_info == {}:
info['weight'] = machine_info['params']['weight']
# write cell content
for cell in columns:
if not cell['info'] in info:
continue
if cell['type'] == 'string':
sheet['links'].row(row).write(
cell['col'], info[cell['info']], cell['style'])
elif cell['type'] == 'number':
sheet['links'].row(row).set_cell_number(
cell['col'], info[cell['info']], cell['style'])
row += 1
book.save(file)
|
#heap sort
def HEAPSORT(arr):
heapSize = len(arr)
BUILD_MAX_HEAP(arr, heapSize)
for i in range(len(arr) - 1, 0, -1):
temp = arr[i]
arr[i] = arr[0]
arr[0] = temp
heapSize = heapSize - 1
MAX_HEAPIFY(arr, 0, heapSize)
def MAX_HEAPIFY(arr, i, heapSize):
left = 2 * i + 1
right = 2 * i + 2
largest = i
if left < heapSize and arr[left] > arr[i]:
largest = left
if right < heapSize and arr[right] > arr[largest]:
largest = right
if largest != i:
temp = arr[i]
arr[i] = arr[largest]
arr[largest] = temp
MAX_HEAPIFY(arr, largest, heapSize)
def BUILD_MAX_HEAP(arr, heapSize):
for i in range(len(arr)//2, -1, -1):
MAX_HEAPIFY(arr, i, heapSize)
arr = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7]
HEAPSORT(arr)
print('Sorted Array:', arr)
|
import sys
import os
import exceptions
from tstorm.utils import range
from tstorm.utils import test
class TestsError(exceptions.Exception):
pass
class Tests:
def __init__(self, data):
self.list_keys = {
'i':'get_id()', 'id':'get_id()',
't':'get_test_type()', 'type':'get_test_type()',
'r':'is_regression()', 'regression':'is_regression()',
'rfc':'get_rfc()',
'idenpotent':'is_idenpotent()',
'range':'get_range()',
'n':'get_name()', 'name':'get_name()',
'd':'get_description()', 'description':'get_description()'}
self.data = data
self.tests = {}
def __print_ids(self, node=[], run=''):
print 'ID RFC'
for key, value in self.tests.items():
services = list(set(node) & set(value.get_node()))
if len(services) != 0:
if run == 'sanity':
if value.get_test_type() == 'DT':
print '%s %s' % (value.get_id(), value.get_rfc())
elif run == 'stress':
if value.get_test_type() == 'LT':
print '%s %s' % (value.get_id(), value.get_rfc())
else:
if value.get_test_type() != 'DT':
print '%s %s' % (value.get_id(), value.get_rfc())
def __build_header_format(self, info):
if len(info) == 0:
raise TestsError('Input is wrong')
elif not type(info) is dict:
raise TestsError('Input is wrong')
elif 'f' not in info.keys():
print 'ID RFC'
else:
msg = ''
for x in info['f']:
msg += x + ' '
print msg
def __build_body_format(self, value, info):
if len(info) == 0:
raise TestsError('Input is wrong')
elif not type(info) is dict:
raise TestsError('Input is wrong')
elif 'f' not in info.keys():
print '%s %s' % (value.get_id(), value.get_rfc())
else:
msg = ''
for x in info['f']:
msg += eval('value.' + self.list_keys[x]) + ' '
print msg
def __print_ids_with_filters(self,info={},node=[],run=''):
filter_info = []
self.__build_header_format(info)
for key, value in self.tests.items():
services = list(set(node) & set(value.get_node()))
if len(services) == 0:
continue
if run == 'sanity':
if value.get_test_type() != 'DT':
continue
elif run == 'stress':
if value.get_test_type() != 'LT':
continue
elif value.get_test_type() == 'DT':
continue
if 't' in info.keys() and 'r' in info.keys() and 'i' in info.keys():
for x in info['t']:
if x == value.get_test_type() and \
str(info['r']).lower() == str(value.is_regression()).lower() and \
str(info['i']).lower() == str(value.is_idenpotent()).lower():
filter_info.append(value.get_id())
self.__build_body_format(value, info)
elif 't' in info.keys() and 'r' in info.keys():
for x in info['t']:
if x == value.get_test_type() and \
str(info['r']).lower() == str(value.is_regression()).lower():
filter_info.append(value.get_id())
self.__build_body_format(value, info)
elif 't' in info.keys() and 'i' in info.keys():
for x in info['t']:
if x == value.get_test_type():
#print 'uffa %s %s' % (str(info['i']).lower(), str(value.is_idenpotent()).lower())
if str(info['i']).lower() == str(value.is_idenpotent()).lower():
filter_info.append(value.get_id())
self.__build_body_format(value, info)
elif 'r' in info.keys() and 'i' in info.keys():
if str(info['r']).lower() == str(value.is_regression()).lower() and \
str(info['i']).lower() == str(value.is_idenpotent).lower():
filter_info.append(value.get_id())
self.__build_body_format(value, info)
elif 't' in info.keys():
for x in info['t']:
if x == value.get_test_type():
filter_info.append(value.get_id())
self.__build_body_format(value, info)
elif 'r' in info.keys():
if str(info['r']).lower() == str(value.is_regression()).lower():
filter_info.append(value.get_id())
self.__build_body_format(value, info)
elif 'i' in info.keys():
if str(info['i']).lower() == str(value.is_idenpotent()).lower():
filter_info.append(value.get_id())
self.__build_body_format(value, info)
else:
filter_info.append(value.get_id())
self.__build_body_format(value, info)
if 'o' in info.keys():
df = open(info['o'], 'w')
for id in filter_info:
df.write(id + '\n')
df.close()
def get_info(self, info = {}, run='', node=[]):
if len(info) == 0:
self.__print_ids(node=node,run=run)
else:
self.__print_ids_with_filters(info=info,node=node,run=run)
def get_methods(self, tests, run='', node=[]):
methods = {}
for key, value in tests.items():
services = list(set(node) & set(value.get_node()))
if len(services) == 0:
continue
if run == 'sanity':
if 'DT' == value.get_test_type():
methods[key] = value
else:
continue
elif run == 'stress':
if 'LT' == value.get_test_type():
methods[key] = value
else:
continue
elif 'DT' != value.get_test_type():
methods[key] = value
return methods
def get_sanity_methods(self, tests):
sanity_methods = {}
for key, value in tests.items():
if 'DT' == value.get_test_type():
sanity_methods[key] = value
return sanity_methods
def get_stress_methods(self, tests):
stress_methods = {}
for key, value in tests.items():
if 'LT' == value.get_test_type():
stress_methods[key] = value
return stress_methods
def get_valid_tests(self, release):
for data_key, data_value in self.data.items():
for val in data_value[3]:
if range.Range(val[1]).is_included(release):
test_structure = test.TestStructure(data_value, val[0], val[1])
multi_entry = False
for tests_key in self.tests.keys():
if data_key == tests_key:
multi_entry = True
break
if multi_entry:
self.tests[data_key+str(random.random())[0:5]] = test_structure
else:
self.tests[data_key] = test_structure
return self.tests
|
from app import app
from decouple import config
if __name__ == "__main__":
app.run(debug=config('DEBUG') == 'true', host='0.0.0.0', port=config('PORT'))
|
import datetime
import jsonpickle
from django.http import HttpResponse
from django.shortcuts import render, redirect
# Create your views here.
from library.models import *
def login_view(request):
if request.method == 'GET':
return render(request, "login.html")
else:
name= request.POST.get('name')
pwd = request.POST.get('pwd')
userList = Manager.objects.filter(user=name,pwd=pwd)
message = '账号或密码有误'
if userList:
request.session["user"]=jsonpickle.dumps(userList[0])
return redirect("/library/main/")
return render(request,'login.html',{'message':message})
def main_view(request):
infos = Bookinfo.objects.all().order_by()
# booktype = Bookinfo.objects.filter(btid=bid)
# print(infos)
# borrow = Bookinfo.objects.get().borrow_set.all().count()
return render(request,"main.html",{'infos':infos})
def bookBorrow_view(request):
if request.method == 'GET':
return render(request,'bookBorrow.html')
else:
barcode = request.POST.get('barcode')
key = request.POST.get('inputkey')
radio = request.POST.get('f')
btime = request.POST.get('inputbtime')
bactime = request.POST.get('inputbacktime')
submit = request.POST.get('submit3')
submit4 = request.POST.get('submit4')
submit5 = request.POST.get('submit5')
if barcode == '':
readers = ''
else:
readers = Readerinfo.objects.filter(barcode=barcode)
print(readers)
if readers == '':
readers == ''
else:
readers = Readerinfo.objects.get(barcode=barcode)
if key == '':
infos = ''
else:
if radio == 'isbn':
infos = Bookinfo.objects.get(isbn=key)
if submit5 == '完成归还':
Borrow.objects.filter(rid=readers, bid__isbn=key).update(ifback=1)
if radio == 'bookname':
infos = Bookinfo.objects.get(bname=key)
if submit5 == '完成归还':
Borrow.objects.filter(rid=readers,bid__bname=key).update(ifback=1)
if submit == '完成借阅':
if btime == '' or bactime == '':
borrowtime = ''
bactkime = ''
else:
borrowtime = datetime.datetime.strptime(btime, '%Y-%m-%d')
backtime = datetime.datetime.strptime(bactime, '%Y-%m-%d')
Borrow.objects.create(rid= readers,bid = infos,borrowtime = borrowtime,backtime = backtime,operator = '张老汉',ifback = 0)
if submit4 == '完成续借':
if btime == '' or bactime == '':
borrowtime = ''
bactkime = ''
else:
borrowtime = datetime.datetime.strptime(btime, '%Y-%m-%d')
backtime = datetime.datetime.strptime(bactime, '%Y-%m-%d')
Borrow.objects.filter(rid= readers,bid = infos,borrowtime = borrowtime,operator = '张老汉',ifback = 0).update(backtime = backtime,)
return render(request,"bookBorrow.html",{'readers':readers,'infos':infos,'barcode':barcode,
'key':key,'borrowtime':btime,'backtime':bactime})
def borrowQuery_view(request):
# infos,page = page_num(num,10)
if request.method == 'GET':
return render(request,"borrowQuery.html")
else:
key = request.POST.get('key')
flagb = request.POST.get('flagb')
flaga = request.POST.get('flaga')
sdate = request.POST.get('sdate')
edate = request.POST.get('edate')
if not flaga == 'a':
if flagb == 'b':
infos = Borrow.objects.filter(borrowtime__gte=sdate, borrowtime__lte=edate)
return render(request, "borrowQuery.html", {'infos': infos})
return render(request,'borrowQuery.html')
if flagb == 'b':
if request.POST.get('method') == 'isbn':
infos = Borrow.objects.filter(bid__bookcode=key, borrowtime__gte=sdate, borrowtime__lte=edate)
elif request.POST.get('method') == 'bname':
infos = Borrow.objects.filter(bid__bname=key, borrowtime__gte=sdate, borrowtime__lte=edate)
elif request.POST.get('method') == 'barcode':
infos = Borrow.objects.filter(rid__barcode=key, borrowtime__gte=sdate, borrowtime__lte=edate)
elif request.POST.get('method') == 'rname':
infos = Borrow.objects.filter(rid__rname=key, borrowtime__gte=sdate, borrowtime__lte=edate)
return render(request, "borrowQuery.html", {'infos': infos})
else:
if request.POST.get('method') == 'isbn':
infos = Borrow.objects.filter(bid__isbn=key)
elif request.POST.get('method') == 'bname':
infos = Borrow.objects.filter(bid__bname=key)
elif request.POST.get('method') == 'barcode':
infos = Borrow.objects.filter(rid__barcode=key)
elif request.POST.get('method') == 'rname':
infos = Borrow.objects.filter(rid__rname=key)
return render(request, "borrowQuery.html", {'infos': infos})
def bremind_view(request):
borrows = Borrow.objects.all()
return render(request,"bremind.html",{'borrows':borrows})
def pwd_Modify_view(request):
if request.method == 'GET':
return render(request, "pwd_Modify.html")
else:
name= request.POST.get('name')
oldpwd = request.POST.get('oldpwd')
pwd1 = request.POST.get('pwd1')
pwd = request.POST.get('pwd')
manager = Manager.objects.filter(user = name, pwd = oldpwd)
if manager.count() == 1:
manager.update( pwd=pwd1)
message = '修改成功'
return render(request, 'pwd_Modify.html', {'message': message})
else:
message = '账号或原密码有误'
return render(request, 'pwd_Modify.html', {'message': message})
|
"""Plot the environment varying hyper-parameters."""
import matplotlib.pyplot as plt
import gym
import numpy as np
import test_envs.gridworld
def plot():
height = 10
width = 10
probs_a = np.linspace(0, 0.50, 1)
probs_b = np.linspace(0, 0.25, 1)
if probs_a.size * probs_b.size == 1:
env = get_env(
path_noise_prob=probs_a[0],
blind_switch_prob=probs_b[0],
height=height,
width=width,
)
render = env.render("rgb_array")
plt.imshow(render, aspect="auto")
plt.scatter(env.start[1], env.start[0], color="red")
traps_y, traps_x = zip(*list(env.traps))
goals_y, goals_x = zip(*list(env.goals))
plt.scatter(traps_x, traps_y, c=[(1.0, 0.4, 0.0)], marker="X", s=128)
plt.scatter(goals_x, goals_y, c=[(0.0, 0.4, 1.0)], marker="*", s=128)
plt.axis("off")
plt.tight_layout()
plt.show()
return
fig, axes = plt.subplots(probs_a.size, probs_b.size, sharex=True, sharey=True)
fontsize = 24
fig.text(
0.5,
0.01,
"Probabilidade de movimento ruídoso [0.0, 0.5]",
ha="center",
fontsize=fontsize,
)
fig.text(
0.01,
0.5,
"Probabilidade de desativar busca guiada [0.0, 0.25]",
va="center",
rotation="vertical",
fontsize=fontsize,
)
for i, pa in enumerate(probs_a):
for j, pb in enumerate(probs_b):
subplot = 1 + i * probs_b.size + j
ax = axes[probs_a.size - i - 1][j]
print(
f"Rendering subplot {subplot} / {probs_a.size * probs_b.size} "
f"(Pa = {pa:.2f}, Pb = {pb:.2f})...",
end=" ",
)
env = get_env(
path_noise_prob=pa, blind_switch_prob=pb, height=height, width=width
)
render = env.render("rgb_array")
ax.imshow(render, aspect="auto")
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
plt.plot()
print("Done")
plt.tight_layout()
plt.subplots_adjust(hspace=0.05, wspace=0.05)
plt.show()
def get_env(
path_noise_prob: float,
blind_switch_prob: float,
width: int = 32,
height: int = 32,
random_state: int = 16,
):
assert 0 <= path_noise_prob <= 1
assert 0 <= blind_switch_prob <= 1
assert width > 0
assert height > 0
env = gym.make(
"Gridworld-v0",
height=height,
width=width,
path_noise_prob=path_noise_prob,
blind_switch_prob=blind_switch_prob,
)
env.seed(random_state)
env.reset()
return env
if __name__ == "__main__":
plot()
|
# -*- coding: utf-8 -*-
from collections import defaultdict
class Solution:
def toOccurrences(self, s):
result = defaultdict(list)
for i, c in enumerate(s):
result[c].append(i)
return result.values()
def isIsomorphic(self, s, t):
return sorted(self.toOccurrences(s)) == sorted(self.toOccurrences(t))
if __name__ == "__main__":
solution = Solution()
assert solution.isIsomorphic("egg", "add")
assert not solution.isIsomorphic("foo", "bar")
assert solution.isIsomorphic("paper", "title")
|
from flask.ext.wtf import Form
from wtforms import TextField, TextAreaField, PasswordField, SelectField, ValidationError, HiddenField
from wtforms.fields.html5 import DateField
from wtforms.widgets import HiddenInput
#from wtforms_html5 import DateField
from wtforms.validators import Required, Length, Email, EqualTo
from models import Admin, Timer
import datetime
# Here are helper classes for all the forms in the html templates. They make it easier for the view to retrieve and
# input data to/from the forms as discrete objects and interface with the db.
class NewPasswordMixin():
"""
Form for new password with validation
"""
password = PasswordField('Password:', [
Required('You need to supply a password'),
Length(min = 8, max = 20, message="password must be minimum 8 characters and maximum 20"),
EqualTo('confirm', message = 'Passwords must match')
],
)
confirm = PasswordField('Please confirm the password:',
)
class DateForm(Form):
"""
Form for inputting start and end dates
"""
localDateIn = DateField()
localDateOut = DateField()
# the following fields help convert the dates input in the user's
# local time into UTC, so they are the actual dates being queried
dateIn = HiddenField(DateField())
dateOut = HiddenField(DateField())
class TimestampForm(Form):
"""
Form for inputting timestamps
"""
localdatestamp = DateField()
localtimestamp = TextField([Required("Please supply a time")], default="00:00")
# The following field gets input from both previous fields converted to UTC and
# concatenated and is the one actually being queried
timestamp = HiddenField()
class LoginForm(Form):
"""
Form for login with validation and db retrieval of user
"""
username = TextField('username', [Required()])
password = PasswordField('password', [Required()])
def validate_login(self, field):
"""
Makes sure the user is in the database and the password matches
"""
user = self.get_user()
if user is None:
raise ValidationError('invalid user')
if user.password != self.password.data:
raise ValidationError('Invalid password')
def get_user(self):
"""
Query the database for the user input in the form
"""
return Admin.query.filter_by(name=self.username.data).first()
class RegistrationForm(Form, NewPasswordMixin):
"""
Form for user registration including validation and check for duplicates
"""
name = TextField('name', [
Required(message="Name is required"),
Length(min=4, max=35, message="Name needs to be longer than 4 and shorter than 35 characters"),
],
)
email = TextField('email', [
Required(message="Email is required"),
Length(min=6, max=50),
Email()
],
)
role = SelectField('role', choices = [(0, 'Inactive'),(1, 'Admin'),(2, 'Supervisor')], coerce=int)
timezone = HiddenField()
def validate_login(self, field):
"""
Query db to check for duplicates
"""
if Admin.query.filter_by(name = self.name.data).count() > 0:
raise ValidationError('Duplicate username')
class AdminModifyForm(Form):
"""
Form to modify an user's profile
"""
name = TextField('name')
email = TextField('email')
role = SelectField('role', choices = [(0, 'Inactive'), (1, 'Admin'), (2, 'Supervisor')], coerce=int)
timezone = HiddenField()
class SwitchAdminForm(Form):
"""
Form to hand over support to another user and leave some notes
"""
notes = TextAreaField('notes')
class ForgotPasswordForm(Form):
"""
Form to ask for a password reset link
"""
email = TextField('email', [
Required(message="Email is required"),
Email()
],
)
# The next few forms don't add anything to the class they are inheriting from but make it easier
# to add anything to them in the future
class ResetPasswordForm(Form, NewPasswordMixin):
"""
Form to reset password, inherits from the new password mixin without adding anything at the moment
"""
pass
class ChangePasswordForm(Form, NewPasswordMixin):
"""
Form to change password, inherits from the new password mixin without adding anything at the moment
"""
pass
class TakeOverConfirmForm(Form):
"""
Form to confirm take over shift
"""
pass
|
import numpy as np
import plot
import itertools
from pprint import pprint
class Strip:
def __init__(self, ID, RC, RCNum, inputArray, length):
self.ID = ID
self.RC = RC
self.RCNum = RCNum
self.inputArray = inputArray
self.length = length
self.elements = dict()
self.workingsArray = [np.nan] * length
self.workingsPermutations = dict()
self.outputArray = [np.nan] * length
self.complete = 0
self.noOfElements = 1
for i in range(len(self.workingsArray)):
self.workingsArray[i] = []
class Element:
def __init__(self, ID, minimumLength, type):
self.ID = ID
self.minimumLength = minimumLength
self.type = type # 1 = marked, 0 = blank
self.complete = 0
self.unitsIdentified = 0
class WorkingsPermutations:
def __init__(self, ID, workingsArray):
self.ID = ID
self.active = 1
self.identified = 0
self.workingsArray = workingsArray
def setup(rows, columns):
# first test to see if solvable, e.g. sum of rows = sum of cols
sumCheck(rows, columns)
# setup initial strips
strips = dict()
# rows
for i in range(len(rows)):
dictID = "R" + str(i)
strips[dictID] = Strip(dictID, "R", i, rows[i], len(columns))
# cols
for i in range(len(columns)):
dictID = "C" + str(i)
strips[dictID] = Strip(dictID, "C", i, columns[i], len(rows))
# set up elements
for i in strips:
if strips[i].inputArray[0] == 0: # special case if strip is 0 e.g. blank
strips[i].elements[0] = strips[i].Element(0, strips[i].length, 0)
else:
# first element could be zero length
strips[i].elements[0] = strips[i].Element(0, 0, 0)
x = 1 # counter
for j in range(0, len(strips[i].inputArray)):
if j != 0: # already put in place first element above, otherwise, put in space with minimum length of 1:
strips[i].elements[x] = strips[i].Element(x, 1, 0)
x += 1
strips[i].elements[x] = strips[i].Element(x, strips[i].inputArray[j], 1)
x += 1
# final element could be zero length
strips[i].elements[x] = strips[i].Element(x, 0, 0)
strips[i].noOfElements = x+1
# set up maximum lengths
totalMinimumLength = 0
for j in strips[i].elements:
totalMinimumLength = totalMinimumLength + strips[i].elements[j].minimumLength
strips[i].minimumLength = totalMinimumLength
# print totalMinimumLength
for j in strips[i].elements:
if strips[i].elements[j].type == 1:
strips[i].elements[j].maximumLength = strips[i].elements[j].minimumLength
else:
strips[i].elements[j].maximumLength = strips[i].length - totalMinimumLength + strips[i].elements[
j].minimumLength
return strips
def sumCheck(rows, columns):
rowsSum = np.nansum(np.nansum(rows))
columnsSum = np.nansum(np.nansum(columns))
if rowsSum != columnsSum:
raise Exception("error - rowsSum != columnsSum", rowsSum, columnsSum)
return
def firstPass():
# first check of strips following setup
# all permutations of elements calculated (elementPermutations) and added to elementList
# stripPermutations then calculated based on product of elementList
# possibleArray is a temporary list that shows possible positions of elements based on minimum length
# all permutations of possibleArray are considered and then recorded
for i in strips:
#printStrip(strips[i].RC, strips[i].ID)
if strips[i].inputArray[0] == 0: # special case if strip is 0 e.g. blank
strips[i].workingsPermutations[0] = strips[i].WorkingsPermutations(0, [0 for i in range(strips[i].length)])
else: # calculate permutations
elementList = []
for j in strips[i].elements:
# create element permutations
elementPermutations = []
for k in range(strips[i].elements[j].minimumLength, (strips[i].elements[j].maximumLength + 1)):
elementSubPermutations = []
for l in range(k):
elementSubPermutations.append(strips[i].elements[j].ID)
elementPermutations.append(elementSubPermutations)
strips[i].elements[j].premutations = elementPermutations
# append elementPermutations into elementList for strip
elementList.append(elementPermutations)
strips[i].elementList = elementList
# stripPermutations then calculated as produce of elementList
stripPermutations = itertools.product(*elementList)
strips[i].stripPermutations = stripPermutations
# only permutations with exact length of strip are possible
# flatten lists first, then check lengths
counter = 0
for j in list(stripPermutations):
flattened = list(itertools.chain(*j))
#print(j, len(j), flattened, len(flattened))
if len(flattened) == strips[i].length:
# only permutations of same length can be possible and added as possible workingsArray
strips[i].workingsPermutations[counter] = strips[i].WorkingsPermutations(counter, flattened)
counter += 1
buildWorkingsArray(strips[i])
def buildWorkingsArray(strip):
# takes current WorkingsPermutations of a strip and returns the WorkingsArray
#printStrip(strip.ID)
masterWorkingsArray = [[] for i in range(strip.length)]
for i in strip.workingsPermutations:
#print(masterWorkingsArray)
#print(strip.workingsPermutations[i].workingsArray)
if strip.workingsPermutations[i].active == 1: # only consider permutations that are active
for j in range(len(strip.workingsPermutations[i].workingsArray)):
possibleElement = strip.workingsPermutations[i].workingsArray[j]
#print(j, strip.workingsPermutations[i].workingsArray[j], "possibleElement - ", possibleElement)
# j = location in workingsArray
# if not in workingsArray, and doesn't = NaN, then add to workingsArray
if possibleElement not in masterWorkingsArray[j] and not np.isnan(possibleElement):
#print("add to masterWorkingsArray", j, possibleElement, masterWorkingsArray)
#print("masterWorkingsArray[j]", masterWorkingsArray[j])
masterWorkingsArray[j].append(possibleElement)
#print("masterWorkingsArray - ", masterWorkingsArray)
#print("masterWorkingsArray - ", masterWorkingsArray)
strip.workingsArray = masterWorkingsArray
return
def checkTable():
# check the strips, attempt to solve table
for i in strips:
if strips[i].complete == 0: # ignore strips that are complete
for j in range(len(strips[i].workingsArray)): # check contents of workings array
if len(strips[i].workingsArray[j]) == 1: # if only 1 option, then must be correct - mark!
element = strips[i].workingsArray[j][0]
mark(strips[i], j, strips[i].elements[element].type, element)
else: # are all options in the workings array of the same type (e.g. odd or even)
odd = True
even = True
for k in range(len(strips[i].workingsArray[j])):
if isItOdd(strips[i].workingsArray[j][k]):
odd *= True
even *= False
else:
odd *= False
even *= True
if odd:
mark(strips[i], j, 1) # mark, but we don't know which element yet
if even:
mark(strips[i], j, 0) # mark, but we don't know which element yet
def printStrip(ID):
# print a strip - handy debug function
print("---", ID, "---")
"""
print("ID:", strips[ID].ID, "RC:", strips[ID].RC, "RCNum:", strips[ID].RCNum)
print("inputArray:", strips[ID].inputArray)
print("length:", strips[ID].length)
print("workingsArray:", strips[ID].workingsArray)
print("outputArray:", strips[ID].outputArray)
print("complete:", strips[ID].complete)
"""
pprint(vars(strips[ID]))
print("---", ID, "- WorkingsPermutations ---")
for j in strips[ID].workingsPermutations:
pprint(vars(strips[ID].workingsPermutations[j]))
print("---", ID, "- Elements ---")
for j in strips[ID].elements:
pprint(vars(strips[ID].elements[j]))
print("------")
def mark(strip, location, type, element = np.nan):
# marks a unit in a strip at location, with type, and element if known
if strip.outputArray[location] == 0 and type == 1:
print("Error")
printStrip(strip.ID)
raise Exception("Mark error - trying to mark a unit with type 1 that is already marked as type 0")
elif strip.outputArray[location] == 1 and type == 0:
print("Error")
printStrip(strip.ID)
raise Exception("Mark error - trying to mark a unit with type 0 that is already marked as type 1")
elif strip.outputArray[location] == type: # already marked, so do nothing
# print("already marked")
pass
else: # mark the unit
# print "marked - location:", location, "type:", type
strip.outputArray[location] = type # mark the unit
removeWorkings(strip, location, type, element)
checkPermutations(strip)
if strip.RC == 'R' and showPlot == 1:
plot.addFrameToPlotFigure(rows, columns, strips, figure, showWorkings)
markCorrespondingStrip(strip, location, type)
checkStripComplete(strip)
def markCorrespondingStrip(strip, location, type): # find corresponding unit in row/column strip
# e.g. if RC = C, strip.id = 8 and location = 9, then look for RC = R, ID = location, location = ID
# print("correspondingStrip -"), strip.ID, location, strip.RC
# print strips
if strip.RC == 'R':
correspondingDictID = "C" + str(location)
elif strip.RC == 'C':
correspondingDictID = "R" + str(location)
if correspondingDictID in strips:
# print "found correspondingDictID", correspondingDictID
mark(strips[correspondingDictID], strip.RCNum, type)
def checkStripComplete(strip): # check strip to see if it is complete
# print "check"
stripInputSum = np.nansum(strip.inputArray)
stripOutputSum = np.nansum(strip.outputArray)
if stripInputSum == stripOutputSum:
for i in range(len(strip.outputArray)):
# print "removing nan", strip.outputArray[i], np.nan
if np.isnan(strip.outputArray[i]):
# print "removing nan", strip.outputArray[i]
mark(strip, i, 0)
recalculatedInputArray = convertOutputArrayToInputArray(strip.outputArray)
#print(strip.ID, strip.inputArray, recalculatedInputArray)
if compareLists(strip.inputArray, recalculatedInputArray): # strip is complete
strip.complete = 1
else:
print("Error")
printStrip(strip.ID)
print("recalculatedInputArray:", recalculatedInputArray, "vs inputArray:", strip.inputArray, "outputArray:", strip.outputArray)
raise Exception("Strip is showing as complete but calculated outputArray does not reconcile with InputArray")
def convertOutputArrayToInputArray(outputArray):
outputString = ''.join(str(i) for i in outputArray)
newOutputArray = outputString.split('0')
newInputArray = []
for i in newOutputArray:
if len(i) == 0:
pass
else:
newInputArray.append(len(i))
if len(newInputArray) == 0:
newInputArray.append(0)
return(newInputArray)
def compareLists(a, b):
#print(a, len(a), b, len(b))
if len(a) == len(b):
for i in range(len(a)):
if a[i] == b[i]:
pass
else:
return False
else:
return False
return True
def isItOdd(x):
if x % 2 > 0:
return True
else:
return False
def removeWorkings(strip, location, type, element):
# after a cell has been marked, update and removes workings from said cell
if not np.isnan(element): # if element is known, then simply replace workingsArray with the given element
strip.workingsArray[location] = [element]
else: # if element isn't known, then remove possible elements that don't have the same type
for i in range(len(strip.workingsArray[location]) -1, -1, -1): # if type = 0, then only elements with an even number can be correct
if isItOdd(strip.workingsArray[location][i]) and type == 0:
del strip.workingsArray[location][i]
elif not isItOdd(strip.workingsArray[location][i]) and type == 1:
del strip.workingsArray[location][i]
def checkPermutations(strip):
# workingsArray has potentially been modified following a unit being marked
# as such cycle through permutations to see which no longer fit
for i in range(len(strip.outputArray)):
if not np.isnan(strip.outputArray[i]): # e.g. it has been marked/identified
possibleElements = strip.workingsArray[i]
#print("### possibleElements", possibleElements, i)
#printStrip(strip.ID)
for j in strip.workingsPermutations:
if strip.workingsPermutations[j].workingsArray[i] not in possibleElements:
if strip.workingsPermutations[j].identified == 1:
print("Error")
print(strip.workingsPermutations[j])
printStrip(strip.ID)
raise Exception("Error - identified permutation trying to be set inactive")
strip.workingsPermutations[j].active = 0
# if only 1 permutation left active, then it must be the identified permutation
counter = 0
identifiedPermutation = 0
for i in strip.workingsPermutations:
if strip.workingsPermutations[i].active == 1:
counter += 1
identifiedPermutation = i
if counter == 1:
if strip.workingsPermutations[identifiedPermutation].active == 0:
print("Error")
print(identifiedPermutation)
printStrip(strip.ID)
raise Exception("Permutation error - trying to set inactive permutation as identified")
else:
strip.workingsPermutations[identifiedPermutation].identified = 1
if counter == 0:
print("Error")
printStrip(strip.ID)
raise Exception("Permutation error - no permutations available")
buildWorkingsArray(strip) # rebuildWorkingsArray
def output():
output = []
for i in strips:
if strips[i].RC =='R':
output.append(strips[i].outputArray)
return output
def checkResults():
# check that sum of inputArray == sum of outputArray
for i in strips:
checkStripComplete(strips[i])
def solver(inputRows, inputColumns, inputShowPlot):
global showPlot, showWorkings, strips, rows, columns
rows = np.array(inputRows)
columns = np.array(inputColumns)
showPlot = inputShowPlot
showWorkings = True
strips = setup(rows, columns)
"""
for i in strips:
printStrip(strips[i].ID)
print(strips[i].ID, strips[i].RC, strips[i].inputArray)
for j in strips[i].elements:
print(strips[i].elements[j].ID, strips[i].elements[j].minimumLength, strips[i].elements[j].maximumLength)
"""
if showPlot == 1: # frame 0 shows set up
global figure
figure = plot.setupPlotFigure(rows, columns, strips, showWorkings)
firstPass()
#printStrip("C0")
#printStrip("R7")
#return
if showPlot == 1 and showWorkings == 1: # frame 1 will then shows workings
plot.addFrameToPlotFigure(rows, columns, strips, figure, showWorkings)
print("--------start of first pass---------")
"""
for i in strips:
printStrip(strips[i].ID)
"""
print("--------end of first pass---------")
i = 1
longstop = 15
tableComplete = 0
while i < longstop:
if tableComplete == 1: i = longstop # use this to do one final check!
checkTable()
# have all strips been completed?
tableComplete = 1
for j in strips:
if strips[j].complete == 0: tableComplete = 0
i += 1
if showPlot == 1: # final frame for completeness
plot.addFrameToPlotFigure(rows, columns, strips, figure, showWorkings)
plot.showPlotFigure(figure)
print("--------start of final results---------")
"""
for i in strips:
printStrip(strips[i].ID)
"""
#for i in strips:
#print(strips[i].ID, strips[i].complete, strips[i].outputArray)
if tableComplete == 1:
checkResults()
print(i)
return ["tableComplete", output()]
else:
print(i)
return ["tableNotComplete", output()]
|
import numpy as np
import gc
from scipy import io
from scipy.stats import multivariate_normal
from save_csv import results_to_csv
def sparse_to_np(sparse):
temp = []
for samp in range(sparse.shape[0]):
row = sparse[samp].toarray()[0]
temp.append(row)
return np.asarray(temp)
def permute_dictionaries(data, labels, rand=25):
perm = np.random.RandomState(seed=rand).permutation(training_data.shape[0])
return data[perm], labels[perm]
gc.enable()
spam_data = io.loadmat("spam-data/spam_data.mat")
print("Loaded spam data.")
training_data = sparse_to_np(spam_data["training_data"])
training_labels = spam_data["training_labels"]
training_data, training_labels = permute_dictionaries(training_data, training_labels)
training_data, validation_data = training_data[:4138], training_data[4138:]
training_labels, validation_labels = training_labels[:4138], training_labels[4138:]
classes = [0, 1]
n, features = training_data.shape
print("\nTraining data: ", training_data.shape)
print("Training data labels: ", training_labels.shape)
print("Validation data: ", validation_data.shape)
print("Validation labels: ", validation_labels.shape)
def empirical_mean(partitioned_data):
return {k : np.sum(partitioned_data[k], 0, keepdims=True).transpose() / len(partitioned_data[k]) for k in classes}
def empirical_cov(partitioned_data):
return {k : np.cov(partitioned_data[k].T, bias=True) for k in classes}
def calc_priors(partitioned_data, total):
return {k: partitioned_data[k].shape[0] / total for k in classes}
def partition_data(data, labels):
partitioned = {k: [] for k in classes}
for sample_num in range(data.shape[0]):
k = labels[sample_num][0]
sample_features = data[sample_num]
partitioned[k].append(sample_features)
for k in classes:
partitioned[k] = np.asarray(partitioned[k])
return partitioned
def error_rate(prediction, actual):
assert len(prediction) == len(actual)
return np.count_nonzero(prediction - actual) / prediction.shape[0]
def classify(distributions, samples, priors):
all_predictions = {}
for key in samples.keys():
predictions = []
for sample in samples[key]:
ll = {k: 0 for k in classes}
for k in classes:
sample = np.array(sample)
ll[k] = distributions[k].logpdf(sample) + np.log(priors[k])
predictions.append(max(ll, key=lambda key: ll[key]))
all_predictions[key] = predictions
return all_predictions
def pool_cov(covariances, priors):
cov = np.zeros(covariances[0].shape)
for k in classes:
cov += priors[k] * covariances[k]
return cov
def LDA(means, covariances, priors, inputs, c=0.0):
pooled_cov = pool_cov(covariances, priors)
pooled_cov += np.eye(features) * c * np.trace(pooled_cov)
distributions = {k: multivariate_normal(means[k].flatten(), pooled_cov, allow_singular=True) for k in classes}
return classify(distributions, inputs, priors)
def QDA(means, covariances, priors, inputs, c=0.0):
temp_covariances, distributions = {}, {}
for k in classes:
temp_covariances[k] = np.eye(features) * c * np.trace(covariances[k]) + covariances[k]
distributions[k] = multivariate_normal(means[k].flatten(), temp_covariances[k], allow_singular=True)
return classify(distributions, inputs, priors)
"""------------------------------------------------------------------------------------------------------------------"""
"""------------------------------------------------------------------------------------------------------------------"""
"""------------------------------------------------------------------------------------------------------------------"""
def test_QDA(training_data, training_labels, validation_data, validation_labels, c=0.0):
partitioned_training_data = partition_data(training_data, training_labels)
means = empirical_mean(partitioned_training_data)
covariances = empirical_cov(partitioned_training_data)
priors = calc_priors(partitioned_training_data, training_data.shape[0])
samples = {'validation' : validation_data}
predictions = QDA(means, covariances, priors, samples, c)
return error_rate(np.array([predictions['validation']]).T, validation_labels)
def test_LDA(training_data, training_labels, validation_data, validation_labels, c=0.0):
partitioned_training_data = partition_data(training_data, training_labels)
means = empirical_mean(partitioned_training_data)
covariances = empirical_cov(partitioned_training_data)
priors = calc_priors(partitioned_training_data, training_data.shape[0])
samples = {'validation' : validation_data}
predictions = LDA(means, covariances, priors, samples, c)
return error_rate(np.array([predictions['validation']]).T, validation_labels)
#print(test_QDA(training_data, training_labels, validation_data, validation_labels, .00064))
#print(test_LDA(training_data, training_labels, validation_data, validation_labels))
def kaggle(c):
data = sparse_to_np(spam_data["training_data"])
labels = spam_data["training_labels"]
test_data = sparse_to_np(spam_data["test_data"])
partitioned_data = partition_data(data, labels)
means = empirical_mean(partitioned_data)
partitioned_covariances = empirical_cov(partitioned_data)
priors = calc_priors(partitioned_data, len(data))
samples = {'training' : data}
predictions = QDA(means, partitioned_covariances, priors, samples, c)
train_predictions = predictions['training']
#test_predictions = predictions['test']
print(error_rate(np.array([train_predictions]).T, labels))
#results_to_csv(np.array(test_predictions))
#return
#print(kaggle(.00064))
#0.0004833252779120348 train error with 5000 @ .00064 no prior weighting (~95% test accuracy)
def opt_c_value(training_data, training_labels, validation_data, validation_labels, c_values):
results = {}
for c in c_values:
results[c] = k_fold(training_data, training_labels, 5, c)
print("Error rate ", results[c], " achieved with c value: ", c)
best_c = min(results, key=lambda key: results[key])
print("Optimal c_value was ", best_c, " with error: ", results[best_c])
return best_c
def k_fold(data, labels, k, c):
data, labels = permute_dictionaries(data, labels, np.random.randint(0,10000))
data_partitions = np.array_split(data, k)
label_partitions = np.array_split(labels, k)
errors = []
for k in range(k):
validation_data = data_partitions[0]
validation_labels = label_partitions[0]
training_data = np.concatenate(data_partitions[1:])
training_labels = np.concatenate(label_partitions[1:])
error = test_QDA(training_data, training_labels, validation_data, validation_labels, c)
data_partitions = np.roll(data_partitions, 1)
label_partitions = np.roll(label_partitions, 1)
errors.append(error)
return sum(errors) / k
#opt_c_value(training_data, training_labels, validation_data, validation_labels, np.arange(.0006, .0007, .0001))
|
from django.forms import ModelForm
from models import Author
class AuthorForm(ModelForm):
class Meta:
model = Author
exclude = ('user',)
|
def quick_sort(arr, left, right):
index = partition(arr, left, right)
if left < index - 1:
quick_sort(arr, left, index-1)
if index < right:
quick_sort(arr, index, right)
return arr
def partition(arr, left, right):
pivot = arr[(left + right) // 2]
while left <= right:
while arr[left] < pivot:
left += 1
while arr[right] > pivot:
right -= 1
if left <= right:
arr[left], arr[right] = arr[right], arr[left]
left += 1
right -= 1
return left
a = [6,5,4,3,2,1]
print(quick_sort(a, 0, 5))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 14:31:12 2018
@author: SD
"""
from flask import Flask as manhas,render_template,request
import urllib.request
import json
import random
import pickle
from sphere_engine import CompilersClientV3
from sphere_engine.exceptions import SphereEngineException
import time
import bs4 as bs
import urllib.request as req
T = True
F = False
# define access parameters
accessToken = 'd5549293157f874363dd97c218f73272'
endpoint = '59e166d8.compilers.sphere-engine.com'
# initialization
client = CompilersClientV3(accessToken, endpoint)
app = manhas(__name__)
CONNECTION_LIST = []
connection_dict = {}
problem = open('/home/as/Desktop/Code Royal/Wt/Problem.pickle','rb')
problem_list = pickle.load(problem)
problem.close()
input_file = open('/home/as/Desktop/Code Royal/Wt/Input.pickle','rb')
sari_input = pickle.load(input_file)
input_file.close()
output_file = open('/home/as/Desktop/Code Royal/Wt/Output.pickle','rb')
sari_output = pickle.load(output_file)
output_file.close()
language_compiler = {'C++' : 44 , 'Java' : 10 , 'Python3' : 116}
def idgen(code,lan,inp):
source = code
compiler = language_compiler[lan]
input_val = inp
try:
response = client.submissions.create(code, compiler, input_val)
z = response['id']
z = int(z)
#print(z)
return z
except SphereEngineException as e:
return -1
def outget(ID):
#out = client.submissions.get(z,F,T,T,F,F)
#print(out)
#time.sleep(20)
#str_out = client.submissions.getStream(ID, 'output')
#print(type(ID))
link = 'http://59e166d8.compilers.sphere-engine.com/api/v3/submissions/'+str(ID)+'/output?access_token=d5549293157f874363dd97c218f73272'
time.sleep(2)
#time.sleep(8)
sou = req.urlopen(link).read()
soup = bs.BeautifulSoup(sou,'lxml')
#print(soup.text)
return soup.text
#str_out = soup.text()
#print(str_out)
#return str_out
def problem_get(problem_detail):
#num = random.randint(0,19)
num = 0
problem_statment = problem_list['Problem Statement'][num]
problem_Constraints = problem_list['Constraints'][num]
problem_input = problem_list['Input'][num]
problem_input = problem_input[7:]
problem_output = problem_list['Output'][num]
problem_sample_input = problem_list['Sample Input'][num]
problem_sample_output = problem_list['Sample Output'][num]
problem_detail.append(problem_statment)
problem_detail.append(problem_Constraints)
problem_detail.append(problem_input)
problem_detail.append(problem_output)
problem_detail.append(problem_sample_input)
problem_detail.append(problem_sample_output)
return
@app.route('/')
def start():
return render_template("sdl.html")
@app.route('/compete',methods=['POST'])
def compete():
#print("hello")
print(request.environ['REMOTE_ADDR'])
problem_detail = []
problem_get(problem_detail)
#print(len(problem_detail))
return render_template("main.html",problem = problem_detail)
@app.route('/check',methods=['POST','GET'])
def check1():
problem_detail = []
problem_get(problem_detail)
lan = request.form['language']
code = request.form['message']
inp = request.form['input1']
#print(lan + code + inp)
id_gen = idgen(code,lan,inp)
#id_gen = 67853084
if id_gen != -1 :
output = outget(id_gen)
else :
output = "Error generated"
#output = '3'
return render_template("test.html",problem = problem_detail,out = output,code = code,lan = lan,inp = inp)
@app.route('/final_submit',methods=['POST'])
def submit1():
problem_detail = []
problem_get(problem_detail)
lan = request.form['language']
code = request.form['message']
#num = random.randint(0,19)
num = 0
inp = sari_input[num]
#print(inp)
#print("yoo")
exp_out = sari_output[num]
#print(exp_out)
id_gen = idgen(code,lan,inp)
output = ''
if id_gen != -1 :
output = outget(id_gen)
else :
output = "Error generated"
#print("------------------yoo--------------------")
#print(output)
status = 'Hello'
color = 'blue'
if output == exp_out:
status = "Accepted"
color = "green"
else:
status = "Wrong Answer"
color = "red"
#print(status)
return render_template("final.html",status = status)
if __name__ == "__main__": #checking the which file to run
app.run(host='0.0.0.0',debug=False,port=9600)
|
import theano.tensor as T
def masked_loss_func(loss_function):
""" Return function that computes loss only for those targets that are not -1."""
def masked_loss_fn(predictions, targets):
assert targets.ndim == 1
target_mask = T.neq(targets, -1)
valid_inds = T.nonzero(target_mask)
return loss_function(predictions[valid_inds], targets[valid_inds])
return masked_loss_fn
|
from flask import Blueprint
from flask import jsonify
from flask import request
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
from google.cloud import datastore
from google.cloud import bigquery
from google.cloud import storage
import logging
import uuid
import json
import urllib3
import urllib
import socket
import requests
import os
import dataflow_pipeline.massive as pipeline
import cloud_storage_controller.cloud_storage_controller as gcscontroller
from datetime import datetime, timedelta
import dataflow_pipeline.banco_agrario.gestiones_tempus_beam as gestiones_tempus_beam
import time
import sys
gestiones_api_banco_agricola = Blueprint('gestiones_api_banco_agricola', __name__) #[[[[[[[[[[[[[[[[[[***********************************]]]]]]]]]]]]]]]]]]
fecha = time.strftime('%Y%m%d')
fecha_fi = time.strftime('%Y%m%d')
fecha_c = str(fecha)
fecha = fecha_c[0:4] + "-" + fecha_c[4:6] + "-" + fecha[6:8]
KEY_REPORT = "api"
hora_ini = "06:00:00" ## En esta parte se define la hora inicial, la cual siempre sera a partir de las 7am
hora_fin = "23:00:00" ## Aqui convertimos la hora en string para la compatibilidad
""" En la siguiente parte se realizara la extraccion de la ruta donde se almacenara el JSON convertido """
Ruta = ("/192.168.20.87", "media")[socket.gethostname()=="contentobi"]
ext = ".csv"
ruta_completa = "/"+ Ruta +"/BI_Archivos/GOOGLE/Banco agricola/" + fecha + ext
"""Comenzamos con el desarrollo del proceso que ejecutaremos para la actualizacion del API """
@gestiones_api_banco_agricola.route("/" + "gestiones", methods=['POST','GET'])
def gestiones():
reload(sys)
sys.setdefaultencoding('utf8')
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-banco_agricola')
gcs_path = 'gs://ct-banco_agricola'
sub_path = "gesiones_tempus/"
output = gcs_path + "/" + sub_path + fecha + ext
blob = bucket.blob(sub_path + fecha + ext)
client = bigquery.Client()
QUERY = (
'SELECT Client_Service, Auth_Key, Content_Type, Authorization, User_ID FROM `contento-bi.unificadas.auth` ')
query_job = client.query(QUERY)
rows = query_job.result()
QUERY2 = (
'SELECT Id_Back_Hist FROM `contento-bi.banco_agricola.gestiones_tempus` ORDER BY CAST(ID_BACK_HIST AS INT64) DESC LIMIT 1 ')
query_job = client.query(QUERY2)
rows2 = query_job.result()
for row in rows2:
Id_Back_Hist = row.Id_Back_Hist
print("id en el que empieza la consulta")
print (int(Id_Back_Hist)+1)
try:
os.remove(ruta_completa) #Eliminar de aries
except:
print("Eliminado de aries")
try:
blob.delete() #Eliminar del storage-----
except:
print("Eliminado de storage")
# try:
# QUERY3 = ("DELETE FROM `contento-bi.banco_agricola.gestiones_tempus` WHERE 1=1")
# query_job = client.query(QUERY3)
# rows3 = query_job.result()
# except:
# print QUERY3
"""A continuacion, se procedera con abrir el archivo y realizar la escritura del JSON """
file = open(ruta_completa,"a")
for row in rows:
url = "https://prd.contentotech.com/services/microservices/bi/index.php/Rest_Bi_Reports/backOffice"
print ('URL es igual '+ url)
headers = {
'Client-Service': row.Client_Service,
'Auth-Key': row.Auth_Key,
'Content-Type': row.Content_Type,
'Authorization': row.Authorization,
'User-ID': str(row.User_ID),
}
# ----------------------------------------------
# id en el que comenzara la extraccion de informacion siempre va hacer el numero siguiente al valor del puntero :
# puntero = str(1)
puntero = str(Id_Back_Hist)
payload = '{"id":"' + str(puntero) + '"}'
datos = requests.request("POST", url, headers=headers, data=payload)
print(datos)
print("payload: " + payload)
# print ('Los datos son ' + str(datos.text))
i = datos.json()
cant_filas = len(i)
print("Cant Filas encontradas: " + str(cant_filas))
if len(datos.content) < 50:
continue
else:
i = datos.json()
for rown in i:
file.write(
str(rown["Id_Back_Hist" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Id_gestion" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Id_Campana" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Nombre_Campana" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Documento" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Num_Obligacion" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Id_Est_Back" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Nombre_Estado" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Fecha_Modif" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Usuario_Modif" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Nombre_usuario" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"|"+
str(rown["Observacion" ]).encode('utf-8').replace('\r\n', '').replace('\n', ' ').replace('\r', '').replace(' ', '').replace('|', '')+"\n")
file.close()
blob.upload_from_filename(ruta_completa)
ejecutar = gestiones_tempus_beam.run(output, KEY_REPORT) #[[[[[[[[[[[[[[[[[[***********************************]]]]]]]]]]]]]]]]]]
return("Gestion Finalizada con exito" + puntero + "filas cargadas" )
|
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
# モジュールのインポート
from bs4 import BeautifulSoup
import chromedriver_binary
# chromedriverのPATHを指定(Pythonファイルと同じフォルダの場合)
# driver_path = '/usr/local/bin/chromedriver'
options = webdriver.ChromeOptions()
options.add_argument('--user-data-dir=/Users/nakamurasatoshi/Desktop/chrome/aaa')
options.add_argument('--profile-directory=Default2') # この行を省略するとDefaultフォルダが指定されます
driver = webdriver.Chrome(options=options) #, executable_path=driver_path
# Chrome起動
# driver.maximize_window() # 画面サイズ最大化
# GoogleログインURL
url = 'https://mp.ex.nii.ac.jp/kuronet/'
driver.get(url)
time.sleep(5)
driver.find_element_by_xpath('//*[@onclick="dashboard();"]').click()
time.sleep(5)
html = driver.page_source.encode('utf-8')
soup = BeautifulSoup(html, "lxml")
trs = soup.find("tbody").find_all("tr")
# 予約の実行
for i in range(len(trs)):
index = len(trs) - (1 + i)
tr = trs[index]
print(str(i+1) + "/" + str(len(trs)) + " sequence")
tds = tr.find_all("td")
td3 = tds[3]
if len(td3.find_all("div")) == 1:
sequence = "https://mp.ex.nii.ac.jp" + td3.find("a").get("href")
time.sleep(1)
driver.get(sequence)
# driver.back()
# time.sleep(2)
# id = sequence.split("=")[1].split("&")[0]
# driver.get("https://mp.ex.nii.ac.jp/api/kuronet/index#" + id)
#全てのウィンドウを閉じる
driver.quit()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^update_db/', views.update_db, name='update database'),
url(r'^functions/', views.functions, name='functions'),
url(r'^hilg_attack/', views.hilg_attack, name='hilg attack')
]
|
# project/views.py
# IMPORT
from flask.ext.sqlalchemy import SQLAlchemy
from functools import wraps
from forms import RegisterForm, LoginForm, RequestTakeoffForm
from flask import Flask, flash, redirect, render_template, \
request, session, url_for
import datetime
# from sqlalchemy.exc import IntegrityError
# import pdb
import io
import csv
# CONFIG
app = Flask(__name__)
app.config.from_object('_config')
db = SQLAlchemy(app)
from models import Takeoff, User, Plan
###################################
########### HELPER FUNCTIONS ######
###################################
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first')
return redirect(url_for('login'))
return wrap
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text, error), 'error')
def open_takeoffs():
return db.session.query(Takeoff).order_by(Takeoff.due_datetime.desc())
def open_plans():
return db.session.query(Plan).order_by(Plan.id.desc())
###################################
########## ROUTE HANDLING #########
###################################
@app.route('/register/', methods=['GET', 'POST'])
def register():
error = None
form = RegisterForm(request.form)
if form.validate_on_submit():
if "@alpha-omegainc.com" in form.user_email.data:
new_user = User(
form.user_name.data,
form.user_email.data,
form.user_password.data,
'1',
'0'
)
else:
flash('You need to use your Alpha-Omega Email')
return render_template('register.html', form=form, error=error)
try:
db.session.add(new_user)
db.session.commit()
flash('Thank You for Registering. Please Login.')
return redirect(url_for('login'))
except:
error = 'That username and/or email already exists'
return render_template('register.html', form=form,
error=error)
return render_template('register.html', form=form, error=error)
@app.route('/', methods=['GET', 'POST'])
def login():
error = None
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(user_name=request.form['user_name']).first()
if user.active == 1:
if user is not None and user.user_password == request.form['user_password']:
session['logged_in'] = True
session['user_id'] = user.id
flash('Welcome ' + request.form['user_name'] + '!')
return redirect(url_for('takeoffs'))
else:
error = 'Invalid Username or Password'
else:
error = 'Account is not active, contact admin'
return render_template('login.html', form=form, error=error)
@app.route('/takeoffs/')
@login_required
def takeoffs():
print(open_takeoffs())
return render_template(
'takeoffs.html',
open_takeoffs=open_takeoffs(),
)
@app.route('/plans/')
@login_required
def plans():
print(open_plans())
return render_template(
'plans.html',
open_plans=open_plans(),
)
@app.route('/loadtakeoff/')
@login_required
def upload_takeoff():
return render_template('loadtakeoff.html')
@app.route('/requesttakeoff/')
def request_takeoff():
error = None
form = RequestTakeoffForm(request.form)
return render_template('requesttakeoff.html', form=form, error=error)
@app.route('/logout/')
@login_required
def logout():
session.pop('logged_in', None)
session.pop('user_id', None)
flash('Goodbye!')
return redirect(url_for('login'))
#############################################
############## ACTION FUNCTIONS #############
#############################################
@app.route('/readupload', methods=["POST"])
@login_required
def read_upload():
userID = ""
planName = ""
startDate = ""
endDate = ""
pitches = []
combos = []
options = []
elevations = []
planName = ""
marketID = ""
builderID = ""
trelloID = ""
complexity = 0
def row_data_collector(elev, option):
if (elev, option) == ('', ''):
return combos
elif (elev, option) not in combos:
combos.append((elev, option))
return combos
def date_parser(date):
new_date = date
check_months = date.split('/')
if len(check_months) == 1:
new_date = '0' + new_date
return datetime.datetime.strptime(new_date, '%m/%d/%Y~%I:%M:%S %p')
def pitch_counter(pitch, pitches):
if pitch not in pitches:
pitches.append(pitch)
return pitches
# Count Total Number of Elevations and Options
def count_elevations_and_options(elevopt):
for row in elevopt:
if "Base" in row:
elevations.append(row[0])
elif row not in options:
options.append(row)
def load_plan():
new_plan = Plan(
planName,
builderID,
marketID
)
db.session.add(new_plan)
db.session.commit()
return Plan.query.filter_by(plan_name=planName).first().id
def load_takeoff(planID):
new_takeoff = Takeoff(
planName,
startDate,
startDate,
endDate,
userID,
trelloID,
planID,
str(len(elevations)),
str(len(options)),
str(len(pitches)),
complexity
)
db.session.add(new_takeoff)
db.session.commit()
f = request.files['data_file']
if not f:
return "No file"
stream = io.StringIO(f.stream.read().decode("UTF8"), newline=None)
my_file_reader = csv.reader(stream)
next(my_file_reader)
for row in my_file_reader:
if row[1] == "START JOB":
jobData = row[9].split("_")
planName = jobData[0]
startDate = date_parser(jobData[1])
userID = jobData[2]
builderData = jobData[3].split(":")
builderID = builderData[0]
marketData = jobData[4].split(":")
marketID = marketData[0]
trelloID = jobData[5]
elif row[1] == "END JOB":
endDate = date_parser(row[9])
elif row[1] == "Section":
complexity += 1
else:
combos = row_data_collector(row[2], row[3])
if 'Shing' in row[1]:
pitches = pitch_counter(row[4], pitches)
count_elevations_and_options(combos)
load_takeoff(load_plan())
flash('Takeoff Uploaded successfully')
return redirect(url_for('upload_takeoff'))
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
N = int(input())
list1 = input().split()
M = int(input())
list2 = input().split()
list3=(set(list1).difference(set(list2))).union(set(list2).difference(set(list1)))
list3= list(map(int,list3))
for item in sorted(list3):
print(item)
|
from artiq.experiment import *
import numpy as np
import os
import time
class Run2(EnvExperiment):
"""2020-11-11-zEEMAN test-854"""
def build(self):
self.setattr_device("core")
self.setattr_device("ttl0")
self.setattr_device("ttl1")
self.setattr_device("ttl2")
self.setattr_device("ttl4")
self.setattr_device("ttl6")
self.setattr_device("ttl8")
self.setattr_device("ttl10")
self.setattr_device("ttl12")
self.setattr_device("ttl14")
self.setattr_device("ttl16")
self.setattr_device("ttl18")
self.setattr_device("ttl20")
self.setattr_device("ttl21")
self.setattr_device("ttl24")
self.setattr_device("ttl26")
self.setattr_device("ttl28")
self.setattr_device("ttl30")
self.setattr_device("ttl32")
self.setattr_device("urukul2_ch1")
self.setattr_device("urukul2_ch2")
self.setattr_device("urukul2_cpld")#定义设备
def prepare(self):
self.parameter=self.get_dataset("para")
#self.Rabi_Start=self.get_dataset("Run_Uint.Rabi.Start")
#self.Rabi_End=self.get_datase================================================m'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'm'mt("Run_Uint.Rabi.End")
#.Rabi_Step=self.get_dataset("Run_Uint.Rabi.Step")
self.Zeeman_Frequency=self.get_dataset("Run_Uint.Zeeman.Start")
self.Zeeman_Frequency_End=self.get_dataset("Run_Uint.Zeeman.End")
self.Zeeman_Frequency_Step=self.get_dataset("Run_Uint.Zeeman.Step")
#self.Zeeman_Repeat=self.get_dataset("Run_Uint.Zeeman.Repeat")
#self.Zeeman_Threshould=self.get_dataset("Run_Uint.Zeeman.Threshould")
#self.Rabi_Threshould=self.get_dataset("Run_Uint.Rabi.Threshould")
#self.Preparation_Frequency=self.get_dataset("Run_Uint.Preparation.Frequency")
#self.Preparation_Attenuation=self.get_dataset("Run_Uint.Preparation.Attenuation")
#self.Zeeman_Attenuation=self.get_dataset("Run_Uint.Zeeman.Attenuation")
self.length=int((self.Zeeman_Frequency_End-self.Zeeman_Frequency)/(self.Zeeman_Frequency_Step/1000))
@kernel
def run(self):
print(66)
self.core.reset()
#刷新时间轴防止报错
delay(2*ms)
self.urukul2_cpld.init()
self.urukul2_ch2.init()
self.urukul2_ch1.init()
self.urukul2_ch2.sw.on()#控制729的三种光频率与三种功率
self.urukul2_ch1.sw.on()
self.ttl2.input()
self.ttl1.input()
self.ttl0.input()
self.ttl4.output()
self.ttl6.output()
self.ttl8.output()
self.ttl10.output()
self.ttl18.output()
self.ttl12.output()
self.ttl14.output()
self.ttl16.output()
self.ttl20.output()
self.ttl21.output()
delay(50*ms)
print("ok1")
self.set_dataset("Time", np.full(1000, np.nan), broadcast=True)
self.set_dataset("Photon_Counts", np.full(1000, np.nan), broadcast=True)
delay(100*ms)
#self.urukul0_ch0.set(self.Preparation_Frequency*MHz)#设置729态制备频率
#self.urukul0_ch0.set_att(self.Preparation_Attenuation)#设置729态制备功率
#self.urukul0_ch1.set_att(self.Zeeman_Attenuation)#设置729扫Zeeman功率
#delay(50*ms)
try:
if self.parameter==2:
#self.length=int((self.Zeeman_Frequency_End-self.Zeeman_Frequency)/(self.Zeeman_Frequency_Step/1000))
self.set_dataset("FrequncyList", np.full(self.length, np.nan), broadcast=True)
self.set_dataset("D_List", np.full(self.length, np.nan), broadcast=True)
self.set_dataset("Data", np.full(self.length, np.nan), broadcast=True)
delay(5*s)
print(self.Zeeman_Frequency)
print(self.Zeeman_Frequency_End)
print(self.Zeeman_Frequency_Step/1000)
t=0
while self.Zeeman_Frequency<self.Zeeman_Frequency_End:
#print(self.Zeeman_Frequency)
delay(200*ms)
self.urukul2_ch1.set(self.Zeeman_Frequency*MHz)
self.urukul2_ch1.set_att(4.0)
delay(5*ms)
a=0
delay(5*ms)
#print(1)
for i in range(1000):
#try:
#delay(1*ms)
#self.core.reset()
t_end=self.ttl1.gate_rising(20*ms)#从当前时刻开始记录上升沿,直到括号内的时间为止。
t_edge=self.ttl1.timestamp_mu(t_end)
#rtio = self.core.get_rtio_counter_mu()
#now = now_mu()
#print(rtio-now)
if t_edge>0:#如果探测到触发信号的上
self.core.reset()
at_mu(t_edge)
delay(5*ms)
#print(i)
self.ttl20.off()#打开397Double pass
self.ttl4.off()#打开397--Z-的AOM大功率功率AOM
self.ttl8.on()#打开397-Z-小功率-Doppler cooling
self.ttl10.on()#关闭397-x&y-Doppler cooling
delay(2*ms)
self.ttl20.on()#关闭397Double pass
self.ttl6.on()#关闭854Double pass
self.ttl21.on()#输出854小功率
self.ttl18.off()#打开729Double Pass的AOM
delay(2*ms)
#self.ttl21.off()#输出0电平,彻底关死854
self.ttl18.on()#关闭729Double Pass的AOM
self.ttl20.off()#打开397Double Pass的AOM
self.ttl10.off()#打开397--x&y的AOM
self.ttl8.off()#打开397-z-大功率-态探测
gate_end_mu = self.ttl0.gate_rising(5700*us)
#记录探测时长内的上升沿并计数
delay(1.5*ms)
num_rising_edges=self.ttl0.count(gate_end_mu)
#self.set_dataset("Photon_Count",num_rising_edges, broadcast=True)
self.ttl6.off()#打开854-Double pass
self.ttl21.off()#打开854大功率
#self.set_dataset("Photon_Count",num_rising_edges, broadcast=True)
if num_rising_edges>0:
a+=1
delay(1*ms)
self.set_dataset("Photon_Count",num_rising_edges, broadcast=True)
self.mutate_dataset("Photon_Counts", i, num_rising_edges)
self.mutate_dataset("Time", i, i)
#except RTIOUnderflow:
#时间溢出报错时会打印"Error for time"
#print("Error for time")
self.ttl20.off()#打开397Double pass
self.ttl10.off()#打开397--x&y的AOM
self.ttl6.off()#打开854-Double pass
#self.ttl22.off()#打开854大功率
D=1-a/100
self.mutate_dataset("FrequncyList", t, self.Zeeman_Frequency)
self.mutate_dataset("D_List", t, D)
t+=1
self.Zeeman_Frequency+=self.Zeeman_Frequency_Step/1000
except:
self.ttl18.on()#关闭729Double Pass的AOM
self.ttl20.off()#打开397Double Pass的AOM
self.ttl10.off()#打开397--x&y的AOM
self.ttl8.off()#打开397-z-大功率-态探测
def analyze(self):
#print(44)
try:
name=time.strftime("%F")
filename="E:/data/"+str(name)
os.mkdir(filename)
except:
pass
D_List=self.get_dataset("D_List")
FrequncyList=self.get_dataset("FrequncyList")
name1=time.strftime("%H-%M-%S")+"-Zeeman"
filename1=filename+"/"+str(name1)
file=open(filename1+".txt","a")
str4="Fre"
str5="Jump"
str6=str4+" "+str5+"\n"
file.write(str6)
for i in range(self.length):
str1=str(D_List[i])
str2=str(FrequncyList[i])
str3=str2+" "+str1+"\n"
file.write(str3)
file.close()
|
from sqlalchemy import create_engine
import secret
from app import configured_app
from models.base_model import db
from models.board import Board
from models.reply import Reply
from models.topic import Topic
from models.user import User
from models.like import Like
def reset_database():
# 现在 mysql root 默认用 socket 来验证而不是密码
url = 'mysql+pymysql://root:{}@localhost/?charset=utf8mb4'.format(
secret.database_password
)
e = create_engine(url, echo=True)
with e.connect() as c:
c.execute('DROP DATABASE IF EXISTS bbs')
c.execute('CREATE DATABASE bbs CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci')
c.execute('USE bbs')
db.metadata.create_all(bind=e)
def generate_fake_date():
# 用户
form = dict(
username='密码123',
password='123',
image='/images/1.jpg'
)
u1 = User.register(form)
form = dict(
username='密码1234',
password='1234',
image='/images/2.png'
)
u2 = User.register(form)
form = dict(
username='密码abc',
password='abc',
image='/images/4.jpg'
)
u3 = User.register(form)
# 板块
form1 = dict(
title='吐槽'
)
b1 = Board.new(form1)
form2 = dict(
title='水区'
)
b2 = Board.new(form2)
form3 = dict(
title='干货'
)
b3 = Board.new(form3)
# 话题内容
with open('markdown_demo.md', encoding='utf8') as f:
content = f.read()
topic_form1 = dict(
title='吐槽 demo',
board_id=b1.id,
content=content
)
topic_form2 = dict(
title='水区 demo',
board_id=b2.id,
content=content
)
topic_form3 = dict(
title='干货 demo',
board_id=b3.id,
content=content
)
for i in range(1):
print('begin topic <{}>'.format(i))
t1 = Topic.new(topic_form1, u1.id)
t2 = Topic.new(topic_form2, u2.id)
t3 = Topic.new(topic_form3, u3.id)
t4 = Topic.new(topic_form1, u1.id)
t5 = Topic.new(topic_form2, u2.id)
t6 = Topic.new(topic_form3, u3.id)
t7 = Topic.new(topic_form1, u1.id)
t8 = Topic.new(topic_form2, u2.id)
t9 = Topic.new(topic_form3, u3.id)
reply_form = dict(
content="""
| 表格 | 表格 |
| - | - |
| 表格 | 表格 |
""",
)
for i in range(3):
reply_form['topic_id'] = i + 2
Reply.new(reply_form, u1.id)
Reply.new(reply_form, u2.id)
Reply.new(reply_form, u3.id)
if __name__ == '__main__':
app = configured_app()
with app.app_context():
reset_database()
generate_fake_date()
|
#
# @lc app=leetcode.cn id=665 lang=python3
#
# [665] 非递减数列
#
# @lc code=start
class Solution:
def checkPossibility(self, nums: List[int]) -> bool:
counter = 0
nums.insert(- 1e5+1)
temp = nums[1]
for i in range(1, len(nums)):
if temp > nums[i+1]:
counter += 1
if i == 0:
temp = nums[1]
else:
temp = nums[i-1]
if counter == 2:
nums.pop(-1)
return False
else:
temp = nums[i+1]
nums.pop(-1)
return True
# @lc code=end
|
import tensorflow as tf
import learn.data as data
from learn.settings import *
from learn.utils import *
batch_size = 30
alpha = .001
beta = 0.01
# def fully_connected_deprecated(dataset, labels, hidden_size):
# train_dataset, train_labels, \
# valid_dataset, valid_labels, \
# test_dataset, test_labels = data.divide(dataset, labels)
#
# graph = tf.Graph()
# with graph.as_default():
#
# # input data for training
# tf_train_dataset = tf.placeholder(tf.float32,
# shape=(batch_size, FEATURE_SIZE))
# tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, NUM_LABELS))
#
# tf_valid_dataset = tf.constant(valid_dataset)
# tf_test_dataset = tf.constant(test_dataset)
#
# theta_1 = tf.Variable(tf.truncated_normal([FEATURE_SIZE, hidden_size]))
# biases_1 = tf.Variable(tf.zeros([hidden_size]))
#
# theta_2 = tf.Variable(tf.truncated_normal([hidden_size, NUM_LABELS]))
# biases_2 = tf.Variable(tf.zeros(NUM_LABELS))
#
# logits_1 = tf.matmul(tf_train_dataset, theta_1) + biases_1
# hidden_layer = tf.nn.relu(logits_1)
# logits_2 = tf.matmul(hidden_layer, theta_2) + biases_2
#
# regularizer = tf.nn.l2_loss(theta_1) + tf.nn.l2_loss(theta_2)
# loss = tf.reduce_mean(
# tf.nn.softmax_cross_entropy_with_logits(logits=logits_2, labels=tf_train_labels)
# ) + beta * regularizer
#
# optimizer = tf.train.GradientDescentOptimizer(alpha).minimize(loss)
#
# train_prediction = tf.nn.softmax(logits_2)
#
# logits_1 = tf.matmul(tf_valid_dataset, theta_1) + biases_1
# hidden_layer = tf.nn.relu(logits_1)
# logits_2 = tf.matmul(hidden_layer, theta_2) + biases_2
# valid_prediction = tf.nn.softmax(logits_2)
#
# logits_1 = tf.matmul(tf_test_dataset, theta_1) + biases_1
# hidden_layer = tf.nn.relu(logits_1)
# logits_2 = tf.matmul(hidden_layer, theta_2) + biases_2
# test_prediction = tf.nn.softmax(logits_2)
#
# # num_steps = train_labels.shape[0] // batch_size
# num_steps = 60001
#
# with tf.Session(graph=graph) as session:
# tf.global_variables_initializer().run()
# print("Initialization complete, start iterations...")
# for step in range(num_steps):
# offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# batch_data = train_dataset[offset:offset+batch_size, :]
# batch_labels = train_labels[offset:offset+batch_size, :]
# feed = {
# tf_train_dataset: batch_data,
# tf_train_labels: batch_labels
# }
# _, cost, predictions = session.run(
# [optimizer, loss, train_prediction],
# feed_dict=feed
# )
# if step % 2000 == 0:
# print('Mini batch loss at step {}: {:.1f}'.format(step, cost))
# print('Training accuracy: {:.1f}%'.format(accuracy(predictions, batch_labels)))
# print('Validation accuracy: {:.1f}%'.format(accuracy(valid_prediction.eval(), valid_labels)))
# print()
# print('Training complete!')
# print('Test accuracy: {:.1f}%'.format(accuracy(test_prediction.eval(), test_labels)))
#
# return theta_1, biases_1, theta_2, biases_2
def fully_connected_(dataset, labels, neurons, iterations=4001):
train_dataset, train_labels, \
valid_dataset, valid_labels, \
test_dataset, test_labels = data.divide(dataset, labels)
feature_size = train_dataset.shape[1]
num_labels = train_labels.shape[1]
hidden_layers = len(neurons)
graph = tf.Graph()
with graph.as_default():
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, feature_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
theta_list = []
bias_list = []
n = feature_size
for layer in range(hidden_layers + 1):
if layer < hidden_layers:
l = neurons[layer]
else:
l = num_labels
theta = tf.Variable(tf.truncated_normal([n, l], stddev=.1))
biase = tf.Variable(tf.constant(1.0, shape=[l]))
theta_list.append(theta)
bias_list.append(biase)
n = l
def forward_prop(training_data):
for layer in range(hidden_layers):
hypo = tf.matmul(training_data, theta_list[layer]) + bias_list[layer]
logits = tf.nn.relu(hypo)
training_data = logits
logits = tf.matmul(training_data, theta_list[hidden_layers]) + bias_list[hidden_layers]
return logits
logits = forward_prop(tf_train_dataset)
regularizer = 0
for theta in theta_list:
regularizer += tf.nn.l2_loss(theta)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logits,
labels=tf_train_labels
)
) + beta * regularizer
optimizer = tf.train.GradientDescentOptimizer(alpha).minimize(loss)
train_prediction = tf.nn.softmax(logits)
valid_logits = forward_prop(tf_valid_dataset)
valid_prediction = tf.nn.softmax(valid_logits)
test_logits = forward_prop(tf_test_dataset)
test_prediction = tf.nn.softmax(test_logits)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialization complete, start training...')
for step in range(iterations):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:offset + batch_size, :]
batch_labels = train_labels[offset:offset + batch_size, :]
feed = {
tf_train_dataset: batch_data,
tf_train_labels: batch_labels
}
_, cost, predictions = session.run(
[optimizer, loss, train_prediction],
feed_dict=feed
)
if step % (iterations // 10) == 0:
print('Mini batch loss at step {}: {:.1f}'.format(step, cost))
print('Training accuracy: {:.1f}%'.format(accuracy(predictions, batch_labels)))
print('Validation accuracy: {:.1f}%'.format(accuracy(valid_prediction.eval(), valid_labels)))
print()
print('Training complete!')
print('Test accuracy: {:.1f}%'.format(accuracy(test_prediction.eval(), test_labels)))
return theta_list, bias_list
|
import mcpi.minecraft as minecraft
import mcpi.block as block
mc = minecraft.Minecraft.create()
# desenha uma linha de 5 blocos de pedra à frente do jogador
[x,y,z] = mc.player.getPos()
i = 0
while i < 5:
mc.setBlock(x+i,y,z+3,block.STONE)
i += 1
|
import random
deck = [['b',1],['b',1],['b',1],['b',2],['b',2],['b',3],['b',3],['b',4],
['w',1],['w',1],['w',1],['w',2],['w',2],['w',3],['w',3],['w',4]]
del deck[0:6]
def shuffle(deck):
myDeck=[]# we define a new list named myDeck to add shuffled list
while len(deck)!=0:# As long as the number of elements of the deck is not 0, loop will run.
rnd=random.choice(deck)#this choice method gives us that random element in the deck
myDeck.append(rnd)# given elements will be added
deck.remove(rnd) # given elements will be removed in the deck
myDeck=deck
shuffle(deck)
print(shuffle(deck))
|
import numpy
import random
import time
from cubelib import mywireframe
from cubelib import emulator
# TODO:
# shiftPlane(axis, plane, delta)
# moves the plane along the axis by delta steps, if it exceeds dimensions, just clear it out, don't rotate.
# swapPlanes(axis1, plane1, axis2, plane2)
# rain should set random LEDs on the first plane (not a lot of them)
# and shift the plane along that axis by one step---Fixed
# and shift the plane along that axis by one step
#
# THINK:
# The python code keeps sending a 125 byte string to redraw the
# cube as often as it can, this contains 1000 bit values that the MSP
# handles. Now, in our code we have been using time.sleep() a lot.
# We probably can have a counter that each of these functions uses to
# advance its steps, and then increment / decrement that
# counter according to music
def wireframeCubeCenter(cube,size):
if size % 2 == 1:
size = size+1
half = size/2
start = cube.dimension/2 - half
end = cube.dimension/2 + half - 1
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,0)
for x in (start,end):
for y in (start,end):
for z in range(start,end+1):
cube.set_led(x,y,z)
cube.set_led(x,z,y)
cube.set_led(z,x,y)
def wireframeCube(cube,START,END):
x0,y0,z0 = START
x1,y1,z1 = END
print "start:",START,"end:",END
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,0)
for x in (x0,x1):
for y in (y0,y1):
if z0<z1:
for z in range(z0,z1+1):
cube.set_led(x,y,z)
print x,y,z, "set-1st condition"
else:
for z in range(z1,z0+1):
cube.set_led(x,y,z)
print x,y,z, "set-2nd condition"
for x in (x0,x1):
for z in (z0,z1):
if y0<y1:
for y in range(y0,y1+1):
cube.set_led(x,y,z)
print x,y,z, "Set - 1st"
else:
for y in range(y1,y0+1):
cube.set_led(x,y,z)
print x,y,z, "Set - 2nd"
for y in (y0,y1):
for z in (z0,z1):
if x0<x1:
for x in range(x0,x1+1):
cube.set_led(x,y,z)
print x,y,z, "SET - 1st"
else:
for x in range(x1,x0+1):
cube.set_led(x,y,z)
print x,y,z, "SET - 2nd"
def solidCubeCenter(cube,size):
if size % 2 == 1:
size = size+1
half = size/2
start = cube.dimension/2 - half
end = cube.dimension/2 + half
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,0)
for i in range(start,end):
for j in range(start,end):
for k in range(start,end):
cube.set_led(i,j,k)
def solidCube(cube,START,END):
x0,y0,z0 = START
x1,y1,z1 = END
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,0)
for i in range(x0,x1+1):
for j in range(y0,y1+1):
for k in range(z0,z1+1):
cube.set_led(i,j,k)
def setPlane(cube,axis,x,level = 1):
plane = level
if isinstance(level, int):
plane = numpy.array([[level]*10]*10, dtype=bool)
if axis == 1:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
cube.set_led(x,i,j,plane[i][j])
elif axis == 2:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
cube.set_led(i,x,j,plane[i][j])
else:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
cube.set_led(i,j,x,plane[i][j])
def shiftPlane(cube,axis,plane,delta):
if axis == 1:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
try:
cube.set_led(plane+delta,i,j,cube.get_led(plane,i,j))
cube.set_led(plane,i,j,0)
except:
cube.set_led(plane,i,j,0)
elif axis == 2:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
try:
cube.set_led(i,plane+delta,j,cube.get_led(i,plane,j))
cube.set_led(i,plane,j,0)
except:
cube.set_led(i,plane,j,0)
else:
for i in range(0,cube.dimension):
for j in range(0,cube.dimension):
try:
cube.set_led(i,j,plane+delta,cube.get_led(i,j,plane))
cube.set_led(i,j,plane,0)
except:
cube.set_led(i,j,plane,0)
#def swapPlane(cube,axis,plane1,plane2):
def randPlane(cube,minimum,maximum):
array = numpy.array([[0]*cube.dimension]*cube.dimension,dtype = 'bool')
for i in range(minimum,maximum):
x = random.choice([i for i in range(0,cube.dimension)])
y = random.choice([i for i in range(0,cube.dimension)])
array[x][y] = 1
return array
def wireframeExpandContract(cube,start=(0,0,0)):
(x0, y0, z0) = start
for i in range(0,cube.dimension):
j = cube.dimension - i - 1
if(x0 == 0):
if(y0 == 0 and z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0+i))
elif(y0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0-i))
elif(z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0+i))
else:
wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0-i))
else:
if(y0 == 0 and z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0+i))
elif(y0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0-i))
elif(z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0+i))
else:
wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0-i))
time.sleep(0.1)
cube.redraw()
max_coord = cube.dimension - 1
corners = [0,max_coord]
x0 = random.choice(corners)
y0 = random.choice(corners)
z0 = random.choice(corners)
for j in range(0,cube.dimension):
i = cube.dimension - j - 1
if(x0 == 0):
if(y0 == 0 and z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0+i))
elif(y0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0-i))
elif(z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0+i))
else:
wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0-i))
else:
if(y0 == 0 and z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0+i))
elif(y0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0-i))
elif(z0 == 0):
wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0+i))
else:
wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0-i))
cube.redraw()
time.sleep(0.1)
return (x0, y0, z0) # return the final coordinate
def rain(cube,counter,minimum,maximum,axis=3):
shiftCube(cube,3,1)
setPlane(cube,axis,9,randPlane(cube,minimum,maximum))
def planeBounce(cube,axis,counter):
i = counter%20
if i:
if i<10: #to turn off the previous plane
setPlane(cube,axis,i-1,0)
elif i>10:
setPlane(cube,axis,20-i,0)
if i<10:
setPlane(cube,axis,i)
elif i>10:
setPlane(cube,axis,19-i)
def square(cube,size,translate=(0,0)):
x0,y0 = translate
array = numpy.array([[0]*cube.dimension] * cube.dimension)
for i in range(0,size):
for j in range(0,size):
array[i+x0][j+y0] = 1
return array
def distance(point1,point2):
x0,y0 = point1
x1,y1 = point2
return numpy.sqrt((x0-x1)**2 + (y0-y1)**2)
def circle(cube,radius,translate=(0,0)):
x1,y1 = translate
array = numpy.array([[0]*cube.dimension] * cube.dimension)
for i in range(0,2*radius):
for j in range(0,2*radius):
if distance((i,j),(radius,radius))<=radius:
array[i+x1][j+y1] = 1
return array
def wierdshape(cube,diagonal,translate=(0,0)):
x1,y1 = translate
array = numpy.array([[0]*cube.dimension] * cube.dimension)
if diagonal%2 == 0:
diagonal-=1
for y in range(0,diagonal):
for x in range(0,diagonal):
if(y>=diagonal/2):
if(x<=diagonal/2):
if(x>=y):
array[x][y] = 1
else:
if(x<=y):
array[x][y] = 1
else:
if(x<=diagonal/2):
if(x+y>=diagonal/2):
array[x][y] = 1
else:
if(x+y<=diagonal/2):
array[x][y] = 1
return array
def fillCube(cube,level=1):
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
for z in range(0,cube.dimension):
cube.set_led(x,y,z,level)
def voxel(cube,counter,point):
x,y = point
if(counter==0):
fillCube(cube,0)
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
cube.set_led(x,y,random.choice([0,cube.dimension-1]))
if counter%9==0:
x = random.choice([i for i in range(0,cube.dimension)])
y = random.choice([i for i in range(0,cube.dimension)])
if cube.get_led(x,y,counter%9)==1:
cube.set_led(x,y,counter%9+1)
cube.set_led(x,y,counter%9,0)
else:
cube.set_led(x,y,8-(counter%9))
cube.set_led(x,y,9-(counter%9),0)
return (x,y)
def shiftCube(cube,axis,delta):
for x in range(0,10):
for y in range(0,10):
for z in range(0,9):
if axis == 3:
cube.set_led(x,y,z,cube.get_led(x,y,z+delta))
cube.set_led(x,y,z+delta,0)
elif axis == 2:
cube.set_led(x,z,y,cube.get_led(x,z+delta,y))
cube.set_led(x,y,z+delta,0)
elif axis == 1:
cube.set_led(z,x,y,cube.get_led(z+delta,x,y))
cube.set_led(z+delta,x,y,0)
def pyramids(cube,counter,axis = 3):
if(counter%20 <cube.dimension):
size = counter%10 + 1
setPlane(cube,axis,cube.dimension-1,square(cube,counter%10 + 1,((cube.dimension-counter%10-1)/2,(cube.dimension-counter%10-1)/2)))
shiftCube(cube,axis,1)
else:
size = 9 - (counter-10)%10
translate = (cube.dimension - size)/2
setPlane(cube,axis,cube.dimension-1,square(cube,size,(translate,translate)))
shiftCube(cube,axis,1)
time.sleep(0)
print "counter = ",counter,"size=",size
def sine_wave(cube,counter):
fillCube(cube,0)
center = (cube.dimension-1)/2.0
for x in range(0,cube.dimension):
for y in range(0,cube.dimension):
dist = distance((x,y),(center,center))
cube.set_led(x,y,int(counter%10+numpy.sin(dist+counter)))
def side_waves(cube,counter):
fillCube(cube,0)
origin_x=4.5;
origin_y=4.5;
for x in range(0,10):
for y in range(0,10):
origin_x=numpy.sin(counter);
origin_y=numpy.cos(counter);
z=int(numpy.sin(numpy.sqrt(((x-origin_x)*(x-origin_x))+((y-origin_y)*(y-origin_y))))+counter%10);
cube.set_led(x,y,z);
def fireworks(cube,n):
origin_x = 3;
origin_y = 3;
origin_z = 3;
#Particles and their position, x,y,z and their movement,dx, dy, dz
origin_x = random.choice([i for i in range(0,4)])
origin_y = random.choice([i for i in range(0,4)])
origin_z = random.choice([i for i in range(0,4)])
origin_z +=5;
origin_x +=2;
origin_y +=2;
particles = [[None for _ in range(6)] for _ in range(n)]
print particles
#shoot a particle up in the air value was 600+500
for e in range(0,origin_z):
cube.set_led(origin_x,origin_y,e,1);
time.sleep(.05+.02*e);
cube.redraw()
fillCube(cube,0)
for f in range(0,n):
#Position
particles[f][0] = origin_x
particles[f][1] = origin_y
particles[f][2] = origin_z
rand_x = random.choice([i for i in range(0,200)])
rand_y = random.choice([i for i in range(0,200)])
rand_z = random.choice([i for i in range(0,200)])
try:
#Movement
particles[f][3] = 1-rand_x/100.0 #dx
particles[f][4] = 1-rand_y/100.0 #dy
particles[f][5] = 1-rand_z/100.0 #dz
except:
print "f:",f
#explode
for e in range(0,25):
slowrate = 1+numpy.tan((e+0.1)/20)*10
gravity = numpy.tan((e+0.1)/20)/2
for f in range(0,n):
particles[f][0] += particles[f][3]/slowrate
particles[f][1] += particles[f][4]/slowrate
particles[f][2] += particles[f][5]/slowrate;
particles[f][2] -= gravity;
cube.set_led(int(particles[f][0]),int(particles[f][1]),int(particles[f][2]))
time.sleep(1000)
def T():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for i in range(3,7):
for j in range(3,10):
plane[i][j] = 1
return plane
def E():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
return plane
def B():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(4,6):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,3):
plane[i][j] = 1
for i in range(7,10):
plane[i][j] = 1
plane[9][0] = 0
plane[9][9] = 0
return plane
def A():
plane = numpy.array([[0]*10] *10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,3):
plane[i][j] = 1
for i in range(7,10):
plane[i][j] = 1
return plane
def C():
plane = numpy.array([[0]*10] *10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
return plane
def D():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,2):
plane[i][j] = 1
for i in range(8,10):
plane[i][j] = 1
plane[9][0] = 0
plane[9][9] = 0
return plane
def F():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
return plane
def H():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(4,7):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(0,10):
plane[i][j] = 1
return plane
def G():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(4,10):
plane[i][j] = 1
for i in range(4,10):
for j in range(4,6):
plane[i][j] = 1
return plane
def J():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for i in range(3,7):
for j in range(3,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(7,10):
plane[i][j] = 1
return plane
def K():
plane = numpy.array([[0]*10]*10)
for j in range(0,10):
for i in range(0,2):
plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i][5+j/2] = 1
try:
plane[i-1][4+j/2] = 1
plane[i+1][4+j/2] = 1
except:
print "Blaaah"
if(i+j==9):
plane[i][j/2] = 1
try:
plane[i-1][j/2] = 1
plane[i+1][j/2] = 1
except:
print "Blaaah"
plane[9][5] = 0
plane[9][4] = 0
return plane
def L():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(7,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
return plane
def M():
plane = numpy.array([[0]*10] * 10)
for i in range(0,2):
for j in range(0,10):
plane[i][j] = 1
for i in range(8,10):
for j in range(0,10):
plane[i][j] = 1
#for i in range(4,7):
#for j in range(0,10):
# plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i/2][j] = 1
try:
plane[i/2][j-1] = 1
plane[i/2][j+1] = 1
except:
print "Blaaah"
if(i+j==9):
plane[5 + i/2][j] = 1
try:
plane[5+i/2][j-1] = 1
plane[5+i/2][j+1] = 1
except:
print "Blaaah"
return plane
def N():
plane = numpy.array([[0]*10] * 10)
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(0,10):
plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
return plane
def O():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,3):
plane[i][j] = 1
for i in range(7,10):
plane[i][j] = 1
return plane
def P():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(0,4):
plane[i][j] = 1
return plane
def Q():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,2):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,2):
plane[i][j] = 1
for i in range(8,10):
plane[i][j] = 1
for i in range(5,10):
for j in range(5,10):
if(i == j):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
return plane
def R():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(4,6):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,10):
plane[i][j] = 1
for i in range(7,10):
for j in range(0,4):
plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i][5+j/2] = 1
try:
plane[i-1][4+j/2] = 1
plane[i+1][4+j/2] = 1
except:
print "Blaaah"
return plane
def I():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
for i in range(3,7):
for j in range(3,10):
plane[i][j] = 1
return plane
def S():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(4,7):
plane[i][j] = 1
for j in range(8,10):
plane[i][j] = 1
for i in range(0,3):
for j in range(0,7):
plane[i][j] = 1
for i in range(7,10):
for j in range(4,10):
plane[i][j] = 1
return plane
def U():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(7,10):
plane[i][j] = 1
for j in range(0,10):
for i in range(0,3):
plane[i][j] = 1
for i in range(7,10):
plane[i][j] = 1
return plane
def V():
plane = numpy.array([[0]*10] * 10)
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i/2][j] = 1
try:
plane[i/2][j-1] = 1
plane[i/2][j+1] = 1
except:
print "Blaaah"
if(i+j==9):
plane[5 + i/2][j] = 1
try:
plane[5+i/2][j-1] = 1
plane[5+i/2][j+1] = 1
except:
print "Blaaah"
plane[0][9] = 0
plane[9][9] = 0
return plane
def W():
plane = numpy.array([[0]*10] * 10)
for i in range(0,2):
for j in range(0,10):
plane[i][j] = 1
for i in range(8,10):
for j in range(0,10):
plane[i][j] = 1
#for i in range(4,7):
#for j in range(0,10):
# plane[i][j] = 1
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[5+i/2][j] = 1
try:
plane[5+i/2][j+2] = 1
plane[5+i/2][j+1] = 1
except:
print "Blaaah"
if(i+j==9):
plane[i/2][j] = 1
try:
plane[i/2][j+2] = 1
plane[i/2][j+1] = 1
except:
print "Blaaah"
return plane
def X():
plane = numpy.array([[0]*10]*10)
for i in range(0,10):
for j in range(0,10):
if(i == j):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
if(i+j == 9):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
return plane
def Y():
plane = numpy.array([[0]*10]*10)
for i in range(0,10):
for j in range(0,5):
if(i == j):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
if(i+j == 9):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
for i in range(4,6):
for j in range(5,10):
plane[i][j] = 1
plane[0][9] = 0
plane[0][0] = 0
return plane
def Z():
plane = numpy.array([[0]*10]*10)
for i in range(0,10):
for j in range(0,10):
if(i+j == 9):
plane[i][j] = 1
try:
plane[i][j-1] = 1
plane[i][j+1] = 1
except:
print "Blaaah"
for i in range(0,10):
for j in range(0,3):
plane[i][j] = 1
for j in range(7,10):
plane[i][j] = 1
return plane
def stringPrint(cube,string,counter=0,axis = 3):
if counter%10 ==0:
fillCube(cube,0)
i = string[(counter/10)%len(string)]
if i == 'A':
setPlane(cube,axis,9,A())
elif i == 'B':
setPlane(cube,axis,9,B())
elif i == 'C':
setPlane(cube,axis,9,C())
elif i == 'D':
setPlane(cube,axis,9,D())
elif i == 'E':
setPlane(cube,axis,9,E())
elif i == 'F':
setPlane(cube,axis,9,F())
elif i == 'G':
setPlane(cube,axis,9,G())
elif i == 'H':
setPlane(cube,axis,9,H())
elif i == 'I':
setPlane(cube,axis,9,I())
elif i == 'J':
setPlane(cube,axis,9,J())
elif i == 'K':
setPlane(cube,axis,9,K())
elif i == 'L':
setPlane(cube,axis,9,L())
elif i == 'M':
setPlane(cube,axis,9,M())
elif i == 'N':
setPlane(cube,axis,9,N())
elif i == 'O':
setPlane(cube,axis,9,O())
elif i == 'P':
setPlane(cube,axis,9,P())
elif i == 'Q':
setPlane(cube,axis,9,Q())
elif i == 'R':
setPlane(cube,axis,9,R())
elif i == 'S':
setPlane(cube,axis,9,S())
elif i == 'T':
setPlane(cube,axis,9,T())
elif i == 'U':
setPlane(cube,axis,9,U())
elif i == 'V':
setPlane(cube,axis,9,V())
elif i == 'W':
setPlane(cube,axis,9,W())
elif i == 'X':
setPlane(cube,axis,9,X())
elif i == 'Y':
setPlane(cube,axis,9,Y())
elif i == 'Z':
setPlane(cube,axis,9,Z())
else:
shiftCube(cube,axis,1)
def stringfly(cube,axis):
shiftCube(cube,axis,1)
def technites(cube,counter,axis = 3):
alpha = counter/9
if(counter%90 == 0):
fillCube(cube,0)
setPlane(cube,axis,9,T(cube))
elif(counter%90 == 10):
fillCube(cube,0)
setPlane(cube,axis,9,E(cube))
elif(counter%90 == 20):
fillCube(cube,0)
setPlane(cube,axis,9,C(cube))
elif(counter%90 == 30):
fillCube(cube,0)
setPlane(cube,axis,9,H(cube))
elif(counter%90 == 40):
fillCube(cube,0)
setPlane(cube,axis,9,N(cube))
elif(counter%90 == 50):
fillCube(cube,0)
setPlane(cube,axis,9,I(cube))
elif(counter%90 == 60):
fillCube(cube,0)
setPlane(cube,axis,9,T(cube))
elif(counter%90 == 70):
fillCube(cube,0)
setPlane(cube,axis,9,E(cube))
elif(counter%90 == 80):
fillCube(cube,0)
setPlane(cube,axis,9,S(cube))
else:
stringfly(cube,axis)
def moveFaces(cube):
Z0 = numpy.array([[0]*cube.dimension]*cube.dimension)
Z9 = numpy.array([[0]*cube.dimension]*cube.dimension)
X0 = numpy.array([[0]*cube.dimension]*cube.dimension)
X9 = numpy.array([[0]*cube.dimension]*cube.dimension)
for i in range(1,cube.dimension):
for j in range(0,cube.dimension):
X0[i-1][j] = cube.get_led(i,j,0)
for j in range(0,cube.dimension):
X0[9][j] = cube.get_led(9,j,0)
for i in range(0,cube.dimension-1):
for j in range(0,cube.dimension):
Z0[i+1][j] = cube.get_led(0,j,i)
for j in range(0,cube.dimension):
Z0[0][j] = cube.get_led(0,j,0)
for i in range(0,cube.dimension-1):
for j in range(0,cube.dimension):
X9[i+1][j] = cube.get_led(i,j,9)
for j in range(0,cube.dimension):
X9[0][j] = cube.get_led(0,j,9)
for i in range(1,cube.dimension):
for j in range(0,cube.dimension):
Z9[i-1][j] = cube.get_led(9,j,i)
for j in range(0,cube.dimension):
Z9[9][j] = cube.get_led(9,j,9)
fillCube(cube,0)
setPlane(cube,3,0,X0)
setPlane(cube,1,0,Z0)
setPlane(cube,3,9,X9)
setPlane(cube,1,9,Z9)
|
import logging
from peewee import logger
from contextlib import contextmanager
class QueryLogHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self._queries = []
super().__init__(*args, **kwargs)
def emit(self, record):
self._queries.append(record)
def queries(self, ignore_txn=False):
queries = [x.msg for x in self._queries]
if ignore_txn:
skips = ('BEGIN', 'ROLLBACK', 'COMMIT', 'SAVEPOINT', 'RELEASE')
queries = [q for q in queries if not q[0].startswith(skips)]
return queries
@contextmanager
def assert_query_count(num, ignore_txn=False):
qh = QueryLogHandler()
logger.setLevel(logging.DEBUG)
logger.addHandler(qh)
try:
qc0 = len(qh.queries(ignore_txn=ignore_txn))
yield qh
finally:
logger.removeHandler(qh)
qc1 = len(qh.queries(ignore_txn=ignore_txn))
assert (qc1 - qc0) == num
def assert_queries_equal(queries, expected, db):
queries.sort()
expected.sort()
for i in range(len(queries)):
sql, params = queries[i]
expected_sql, expected_params = expected[i]
expected_sql = (expected_sql
.replace('`', db.quote_char)
.replace('%%', db.interpolation))
assert sql == expected_sql
assert params == expected_params
|
import gurobipy as gp
from gurobipy import GRB
from itertools import product, combinations
import os
from abc import ABC, abstractmethod
from project_thesis.model.BaseModelInput import BaseModelInput
class BaseModel(ABC):
def __init__(self, model_input: BaseModelInput, **kwargs):
"""
Formulation of mathematical problem in gurobi framework
:param model_input: ModelInput object with input variables for the model
"""
self.m = gp.Model("TOP")
self._ = model_input
# Setting a computational time limit for the model
if kwargs.get("time_limit", None):
self.m.Params.TimeLimit = kwargs.get("time_limit", None)
# Cartesian Products
self.cart_locs = list(product(self._.locations, repeat=2))
self.cart_loc_v = list(product(self._.locations, self._.service_vehicles))
self.cart_loc_loc_v = list(
product(self._.locations, self._.locations, self._.service_vehicles)
)
self.cart_loc_v_not_depot = list(
product(
[loc for loc in self._.locations if loc != self._.depot],
self._.service_vehicles,
)
)
self.cart_loc_v_scooters = list(
product(
[loc for loc in self._.locations if loc in self._.scooters],
self._.service_vehicles,
)
)
# Subset
subset_param = kwargs.get("subsets", None)
if subset_param is not None:
min_subset, max_subset, radius = subset_param
self.subsets = self.create_subsets(min_subset, max_subset, radius)
else:
self.subsets = subset_param
# Init variables
# x_ijv - 1 if, for service vehicle v, visit to location i is followed by a visit to location j- 0 otherwise
self.x = self.m.addVars(self.cart_loc_loc_v, vtype=GRB.BINARY, name="x")
# y_iv - 1 if location i is visited by service vehicle v- 0 otherwise
self.y = self.m.addVars(self.cart_loc_v, vtype=GRB.BINARY, name="y")
# p_iv - 1 if service vehicle v picks up a scooter at location i - 0 otherwise
self.p = self.m.addVars(self.cart_loc_v_scooters, vtype=GRB.BINARY, name="p")
# u_iv - position of location i for service vehicle v route
self.u = self.m.addVars(
self.cart_loc_v_not_depot, vtype=GRB.CONTINUOUS, name="u"
)
# l_iv - load (number of scooters) when entering location i
self.l = self.m.addVars(self.cart_loc_v, vtype=GRB.CONTINUOUS, name="l")
self.symmetry = kwargs.get("symmetry", [None])
self.valid_inequalities = kwargs.get("valid_inequalities", [None])
if kwargs.get("setup", True):
self.setup()
def get_parameters(self):
return self._
def setup(self):
self.set_objective()
self.set_constraints()
@abstractmethod
def set_objective(self):
pass
@abstractmethod
def to_string(self, short_name=True):
pass
@staticmethod
@abstractmethod
def get_input_class():
pass
def set_constraints(self):
# guarantee that each service vehicle starts and ends in at the depot.
self.m.addConstr(
gp.quicksum(
self.x[(self._.depot, j, v)] for j, v in self.cart_loc_v_not_depot
)
== self._.num_service_vehicles,
"must_visit_depot_first",
)
self.m.addConstr(
gp.quicksum(
self.x[(i, self._.depot, v)] for i, v in self.cart_loc_v_not_depot
)
== self._.num_service_vehicles,
"must_visit_depot_end",
)
# Ensure that every location is visited at most once.
self.m.addConstrs(
(
gp.quicksum(self.y[(k, v)] for v in self._.service_vehicles) <= 1
for k in self._.locations
if k != self._.depot
),
"only_one_visit_pr_scooter",
)
# Ensure that each vehicle capacity is not exceeded
self.m.addConstrs(
(
gp.quicksum(self.y[(k, v)] for k in self._.scooters)
<= self._.battery_capacity[v]
for v in self._.service_vehicles
),
"battery_capacity",
)
# guarantee the connectivity of each service vehicle path
self.m.addConstrs(
(
gp.quicksum(self.x[(i, k, v)] for i in self._.locations)
== self.y[(k, v)]
for k, v in self.cart_loc_v
),
"connectivity_inn",
)
self.m.addConstrs(
(
gp.quicksum(self.x[(k, j, v)] for j in self._.locations)
== self.y[(k, v)]
for k, v in self.cart_loc_v
),
"connectivity_out",
)
# Ensure that the length of the paths does not exceed the shift
self.m.addConstrs(
(
gp.quicksum(
self._.time_cost[(i, j)] * self.x[(i, j, v)]
for i, j in self.cart_locs
)
<= self._.shift_duration
for v in self._.service_vehicles
),
"time_constraints",
)
# Ensure that no scooters can be picked up in a demand zone
self.m.addConstrs(
(
gp.quicksum(
self.p[(i, v)]
for i in self._.zone_scooters[z]
if i in self._.scooters
for v in self._.service_vehicles
)
== 0
for z in self._.demand_zones
)
)
# Ensure that we cannot pick up more than the excess scooters in a zone
self.m.addConstrs(
gp.quicksum(
self.p[(i, v)]
for v in self._.service_vehicles
for i in self._.zone_scooters[z]
)
<= self._.deviation_from_optimal_state[z]
for z in self._.supply_zones
)
# Scooter capacity management
self.m.addConstrs(
(
self.l[(i, v)]
+ self.p[(i, v)]
- self.l[(j, v)]
- self._.scooter_capacity[v] * (1 - self.x[(i, j, v)])
<= 0
for i, j, v in self.cart_loc_loc_v
if i in self._.scooters and j != i
),
"vehicle_capacity_pick_up_less",
)
self.m.addConstrs(
(
self.l[(i, v)]
+ self.p[(i, v)]
- self.l[(j, v)]
+ self._.scooter_capacity[v] * (1 - self.x[(i, j, v)])
>= 0
for i, j, v in self.cart_loc_loc_v
if i in self._.scooters and j != i
),
"vehicle_capacity_pick_up_greater",
)
self.m.addConstrs(
(
self.l[(i, v)]
- self.y[(i, v)]
- self.l[(j, v)]
- self._.scooter_capacity[v] * (1 - self.x[(i, j, v)])
<= 0
for i, j, v in self.cart_loc_loc_v
if i in self._.delivery and j != i
),
"vehicle_capacity_delivery_less",
)
self.m.addConstrs(
(
self.l[(i, v)]
- self.y[(i, v)]
- self.l[(j, v)]
+ self._.scooter_capacity[v] * (1 - self.x[(i, j, v)])
>= 0
for i, j, v in self.cart_loc_loc_v
if i in self._.delivery and j != i
),
"vehicle_capacity_delivery_greater",
)
self.m.addConstrs(
(
self.l[(i, v)] <= self._.scooter_capacity[v]
for i, v in self.cart_loc_v
if i != self._.depot
),
"vehicle_capacity_cap",
)
self.m.addConstrs(
(self.l[(self._.depot, v)] == 0 for v in self._.service_vehicles),
"vehicle_capacity_depot_in",
)
self.m.addConstrs(
(
self.l[(i, v)]
- self._.scooter_capacity[v] * (1 - self.x[(self._.depot, i, v)])
<= 0
for i, v in self.cart_loc_v_not_depot
),
"vehicle_capacity_depot_out",
)
# Subtour elimination
self.m.addConstrs(
(
self.u[(i, v)]
<= gp.quicksum(self.x[j, k, v] for j, k in self.cart_locs if j != k)
for i, v in self.cart_loc_v_not_depot
),
"subtours_1",
)
self.m.addConstrs(
(
self.u[i, v] - self.u[j, v] + 1
<= (self._.num_locations - 1) * (1 - self.x[i, j, v])
for i, j, v in self.cart_loc_loc_v
if i != self._.depot and j != self._.depot
),
"subtours_2",
)
if not self.symmetry.__contains__(None):
for symmetry_const in self.symmetry:
for i, constr in enumerate(
self.get_symmetry_constraints()[symmetry_const]
):
self.m.addConstrs(constr, f"symmetry{i}_{constr}")
if not self.valid_inequalities.__contains__(None):
for valid_const in self.valid_inequalities:
for i, constr in enumerate(
self.get_valid_inequalities_constraints()[valid_const]
):
self.m.addConstrs(constr, f"{valid_const}_{i}")
def optimize_model(self):
self.m.optimize()
def print_solution(self):
# Print solution
for v in self.m.getVars():
if v.x > 0:
print(f"{v.varName}: {v.x}")
print(f"Obj: {self.m.objVal}")
print(f"Obj: {self.m.objVal}")
def print_model(self, delete_file=True):
self.m.write("model.lp")
with open("model.lp") as f:
for line in f.readlines():
print(line)
if delete_file:
os.remove("model.lp")
def get_symmetry_constraints(self):
return {
"number_of_arcs": [
(
(
gp.quicksum(self.x[(i, j, v)] for i, j in self.cart_locs)
>= gp.quicksum(self.x[(i, j, v + 1)] for i, j in self.cart_locs)
)
for v in range(self._.num_service_vehicles - 1)
)
],
"number_of_visits": [
(
(
gp.quicksum(self.y[(i, v)] for i in self._.locations)
>= gp.quicksum(self.y[(i, v + 1)] for i in self._.locations)
)
for v in range(self._.num_service_vehicles - 1)
)
],
"total_time_used": [
(
(
gp.quicksum(
self._.time_cost[(i, j)] * self.x[(i, j, v)]
for i, j in self.cart_locs
)
>= gp.quicksum(
self._.time_cost[(i, j)] * self.x[(i, j, v + 1)]
for i, j in self.cart_locs
)
)
for v in range(self._.num_service_vehicles - 1)
)
],
"advanced": [
(
gp.quicksum(self.y[(i, v)] for v in range(i)) <= 1
for i in range(1, self._.num_service_vehicles + 1)
),
(
self.y[(i, v)]
<= gp.quicksum(
self.y[(p, s)]
for p in range(v - 1, i)
for s in range(v - 1, min(p, self._.num_service_vehicles))
)
for i in self._.locations
if i not in [0, 1]
for v in self._.service_vehicles
if v != 0
),
],
}
def get_valid_inequalities_constraints(self):
return {
"back_and_forth": [
(
gp.quicksum(self.x[(i, j, v)] for v in self._.service_vehicles)
+ gp.quicksum(self.x[(j, i, v)] for v in self._.service_vehicles)
<= 1
for i, j in self.cart_locs
if i < j
)
],
"subtour_in_set": [
(
gp.quicksum(
self.x[(i, j, v)]
for i in s
for j in s
if i != j
for v in self._.service_vehicles
)
<= len(s) - 1
for s in self.subsets
)
],
"arcs_less_then_locations": [
(
gp.quicksum(self.x[(i, j, v)] for i, j, v in self.cart_loc_loc_v)
<= self._.num_locations + self._.num_service_vehicles
for i in range(1)
)
],
}
def create_subsets(self, min_size, max_size, radius=5):
subsets = []
for i in range(min_size, max_size + 1):
for j in range(1, self._.num_locations):
sets_for_j = []
for k in range(j, self._.num_locations):
if self._.time_cost[(j, k)] <= radius:
sets_for_j.append(k)
if len(sets_for_j) > 0:
subsets.extend(list(combinations(sets_for_j, i)))
return subsets
|
'''
'''
import argparse
import hashlib
import logging
import os, os.path
import sys
import time
import zipfile
logging.basicConfig(
format='%(levelname)8s:: %(message)s'
)
logger = logging.getLogger('debug')
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StrignIO import StringIO
def add_entry_to_archive (path, zfp, zip_manifest, stats):
abspath = os.path.abspath(path)
if os.path.isdir(abspath):
for entry in os.listdir(abspath):
entry_path = os.path.join(abspath, entry)
add_entry_to_archive(entry_path, zfp, zip_manifest, stats)
elif os.path.isfile(abspath):
stat = os.stat(abspath)
filesize = stat.st_size
try:
with open(abspath, 'rb') as fp:
checksum = hashlib.sha1(fp.read()).hexdigest()
except:
logger.error('Error opening `%s`', abspath)
stats['errors']['files'].append(abspath)
else:
zip_manifest.write(u'{} {} {}\n'.format(checksum, filesize, abspath))
zfp.write(abspath)
stats['total_size'] += filesize
logger.debug('%s %s %s', checksum, filesize, abspath)
else:
stats['errors']['unknown'].append(abspath)
def add_files_from_manifest (manifest, zfp, zip_manifest, stats):
try:
with open(manifest) as fp:
for line in fp:
line = line.strip()
if line == '' or line[0] in ['#', ';']:
continue
add_entry_to_archive(line, zfp, zip_manifest, stats)
except IOError:
logger.error('Error opening manifest file `%s`', manifest)
stats['errors']['manifests'].append(manifest)
def format_filesize (filesize):
formatted = ''
for suffix in ['B', 'KB', 'MB', 'GB', 'TB']:
if filesize < 1024:
formatted = '{0:.2f} {1:}'.format(filesize, suffix)
break
filesize /= 1024.0
return formatted
def main ():
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--quiet', action='store_true', default=False)
parser.add_argument('zipfile')
parser.add_argument('manifest', nargs='+')
args = parser.parse_args()
filename, extension = os.path.splitext(args.zipfile)
if args.quiet:
logger.setLevel('ERROR')
else:
logger.setLevel('DEBUG')
if extension != 'zip':
zip_filename = time.strftime('{}.zip'.format(filename))
else:
zip_filename = time.strftime(filename)
if os.path.isfile(zip_filename):
sys.stderr.write('ERROR: {} already exists.\n'.format(zip_filename))
sys.exit(-1)
stats = {
'errors': {
'manifests': [],
'files': [],
'unknown': [],
'dirs': [],
},
'total_size': 0,
}
logger.info('Writing data to %s', zip_filename)
started = time.time()
zip_manifest = StringIO()
with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED) as zfp:
for i, manifest in enumerate(args.manifest):
add_files_from_manifest(manifest, zfp, zip_manifest, stats)
zip_manifest.seek(0)
zfp.writestr('MANIFEST/archive.txt',
zip_manifest.getvalue().encode('utf-8'))
delta = time.time() - started
stat = os.stat(zip_filename)
filesize = stat.st_size
ratio = 100 * filesize / float(stats['total_size'])
print('\nDone in {0:.2f} seconds, summary:'.format(delta))
print(' Total filesize: {0:12d} ({1})'.format(stats['total_size'],
format_filesize(stats['total_size'])))
print(' Archive filesie: {0:12d} ({1}) {2:.1f}%'.format(filesize,
format_filesize(filesize), ratio))
for error_type, entries in stats['errors'].items():
if entries:
print('\n Some entries generated {} errors:'.format(error_type))
for e in entries:
print(' {}'.format(e))
if __name__ == '__main__':
main()
|
import random
from typing import Optional, List
import discord
from redbot.core import commands
from .randomstuff import (
kisslist, slaplist, punchlist, cuddlelist, sadlist, patlist, huglist, licklist, bitelist,
middlefingerlist, twerklist, dancelist, crylist, meowlist, rawrlist, angrylist, shylist,
blushlist, killlist, karatelist, rektlist, hungrylist, thirstylist, happylist, greetlist,
wavelist, hornylist, marrylist, praylist, curselist, smokelist, lewdlist, sleepylist,
lazylist, thinklist, richlist, poorlist, nomlist, pokelist, booplist, highfivelist,
ticklelist, bullylist, toxiclist, trashlist, popcornlist, lovelist, spanklist,
)
class Roleplaying(commands.Cog):
"""
Simple roleplaying cog by Mucski.
"""
def __init__(self, bot):
self.bot = bot
def img_grab(
self,
options: List[str],
action_self: str,
action_targetted: str,
author: discord.Member,
member: Optional[discord.Member],
):
if member is None:
description = f"{author.mention} {action_self}"
else:
description = f"{author.mention} {action_targetted} {member.mention}"
e = discord.Embed(description=description)
img = random.choice(options)
e.set_image(url=img)
return e
@commands.command()
async def kiss(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(kisslist, "sends kisses", "kisses", ctx.author, member))
@commands.command()
async def punch(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(punchlist, "punches", "punches", ctx.author, member))
@commands.command()
async def cuddle(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(cuddlelist, "cuddles", "cuddles with", ctx.author, member)
)
@commands.command()
async def hug(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(huglist, "wants to hug", "hugs", ctx.author, member))
@commands.command()
async def pat(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(patlist, "sends pats", "pats", ctx.author, member))
@commands.command()
async def slap(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(slaplist, "slaps", "slaps", ctx.author, member))
@commands.command()
async def sad(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(sadlist, "is sad", "is sad at", ctx.author, member))
@commands.command()
async def lick(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(licklist, "licks", "licks", ctx.author, member))
@commands.command()
async def bite(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(bitelist, "bites", "bites", ctx.author, member))
@commands.command()
async def middlefinger(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(
middlefingerlist, "flips off everyone", "flips off", ctx.author, member
)
)
@commands.command()
async def twerk(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(twerklist, "twerks", "twerks on", ctx.author, member))
@commands.command()
async def dance(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(dancelist, "dances", "dances with", ctx.author, member))
@commands.command()
async def cry(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(crylist, "cries", "cries", ctx.author, member))
@commands.command()
async def meow(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(meowlist, "meows", "meows at", ctx.author, member))
@commands.command()
async def rawr(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(rawrlist, "rawrs", "rawrs towards", ctx.author, member))
@commands.command()
async def angry(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(angrylist, "is angry", "is angry at", ctx.author, member)
)
@commands.command()
async def shy(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(shylist, "is shy", "is shy at", ctx.author, member))
@commands.command()
async def blush(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(blushlist, "blushes", "blushes because of", ctx.author, member)
)
@commands.command()
async def kill(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(killlist, "kills", "kills", ctx.author, member))
@commands.command()
async def karate(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(karatelist, "kicks", "kicks", ctx.author, member))
@commands.command()
async def rekt(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(rektlist, "says: Get rekt!", "rekts", ctx.author, member)
)
@commands.command()
async def hungry(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(hungrylist, "is hungry", "is hungry for", ctx.author, member)
)
@commands.command()
async def thirsty(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(thirstylist, "is thirsty", "is thirsty for", ctx.author, member)
)
@commands.command()
async def happy(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(happylist, "is happy", "is happy for", ctx.author, member)
)
@commands.command()
async def greet(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(greetlist, "greets everyone 👋", "greets", ctx.author, member)
)
@commands.command()
async def wave(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(wavelist, "waves", "waves at", ctx.author, member))
@commands.command()
@commands.is_nsfw()
async def horny(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(hornylist, "is horny", "is horny for", ctx.author, member)
)
@commands.command()
async def marry(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(marrylist, "wants to marry", "marries", ctx.author, member)
)
@commands.command()
async def pray(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(praylist, "prays", "prays to", ctx.author, member))
@commands.command()
async def curse(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(curselist, "curses", "curses at", ctx.author, member))
@commands.command()
async def smoke(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(smokelist, "smokes", "smokes with", ctx.author, member))
@commands.command()
@commands.is_nsfw()
async def lewd(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(lewdlist, "feels lewd", "feels lewd towards", ctx.author, member)
)
@commands.command(aliases=["tired"])
async def sleepy(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(sleepylist, "is sleepy", "wants to sleep with", ctx.author, member)
)
@commands.command()
async def lazy(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(lazylist, "is lazy", "is lazy", ctx.author, member))
@commands.command()
async def think(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(thinklist, "is thinking", "is thinking with", ctx.author, member)
)
@commands.command()
async def rich(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(richlist, "is rich", "is rich", ctx.author, member))
@commands.command()
async def poor(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(poorlist, "is poor", "is poor", ctx.author, member))
@commands.command()
async def nom(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(nomlist, "noms", "noms on", ctx.author, member))
@commands.command()
async def poke(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(pokelist, "pokes", "pokes", ctx.author, member))
@commands.command()
async def boop(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(booplist, "boops", "boops", ctx.author, member))
@commands.command()
async def highfive(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(highfivelist, "high fives", "high fives", ctx.author, member)
)
@commands.command()
async def tickle(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(ticklelist, "tickles", "tickles", ctx.author, member))
@commands.command()
async def bully(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(bullylist, "is a bully", "bullies", ctx.author, member))
@commands.command()
async def toxic(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(toxiclist, "is toxic", "is toxic towards", ctx.author, member)
)
@commands.command()
async def trash(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(trashlist, "trashes", "trashes", ctx.author, member))
@commands.command()
async def popcorn(self, ctx, member: discord.Member = None):
await ctx.send(
embed=self.img_grab(
popcornlist, "is eating popcorn", "is eating popcorn with", ctx.author, member
)
)
@commands.command()
async def love(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(lovelist, "loves", "loves", ctx.author, member))
@commands.command()
@commands.is_nsfw()
async def spank(self, ctx, member: discord.Member = None):
await ctx.send(embed=self.img_grab(spanklist, "spanks", "spanks", ctx.author, member))
|
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
import pyrobot
def web_submit_(submit,chrome_driver,debug=0):
# test
if debug == 1:
site = 'http://www.baidu.com'
submit['Site'] = site
# js = 'window.location.href="%s"'(submit['Site'])
chrome_driver.get(submit['Site'])
chrome_driver.maximize_window()
chrome_driver.refresh()
sleep(5000)
def test():
# db.email_test()
# date_of_birth = Submit_handle.get_auto_birthday('')
Mission_list = ['10023']
excel = 'Ukchoujiang'
Excel_name = [excel,'']
Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# [print(item,':',submit[excel][item]) for item in submit[excel] if submit[excel][item]!=None]
[print(item,':',submit[excel][item]) for item in submit[excel] if item == 'homephone']
submit['Mission_Id'] = '10023'
phone = submit[excel]['homephone']
phone = Submit_handle.get_uk_phone1(phone)
print(phone)
chrome_driver = Chrome_driver.get_chrome(submit)
web_submit(submit,chrome_driver,1)
def web_submit(submit,chrome_driver,debug=0):
# Mission_list = ['10023']
# excel = 'Ukchoujiang'
# Excel_name = [excel,'']
# Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
# submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# [print(item,':',submit[excel][item]) for item in submit[excel] if submit[excel][item]!=None]
# [print(item,':',submit[excel][item]) for item in submit[excel] if item == 'homephone']
# submit['Mission_Id'] = '10023'
# chrome_driver = Chrome_driver.get_chrome(submit)
# url = 'http://dategd.com/index.html'
# chrome_driver.close()
# chrome_driver = Chrome_driver.get_chrome_normal(submit)
submit['Site'] = 'http://dategd.com/index.html'
sleep(30)
flag = db.update_plan_status(1,submit['ID'])
print("submit['ID']",submit['ID'])
print('Status in Mission_Id 10064 after set:',flag)
a=b+1
chrome_driver.get(submit['Site'])
sleep(3)
handles=chrome_driver.window_handles
print(handles)
page_source = chrome_driver.page_source
a = page_source.find('Block</span>')
b = page_source.find('id="',a)
c = page_source.find('"',b+4)
element = page_source[b+4:c]
print(element)
# print(page_source)
handle = chrome_driver.current_window_handle
# '//*[@id="_nk54g1x38z2o"]/div[3]/span[2]'
for i in range(10):
try:
chrome_driver.find_element_by_id(element).click()
break
except:
sleep(1)
# chrome_driver.find_element_by_partial_link_text('Allow').click()
sleep(3)
handles=chrome_driver.window_handles
print(handles)
for i in handles:
if i != handle:
chrome_driver.switch_to.window(i)
# url = 'https://newsadsppush.com/v1/iframe-vac/63581.html?webmaster_id=63581&host=dategd.com&&isIframe=true&deviceId=t_dz2icinqupdm&locker_source=direct&n=1'
# chrome_driver.get(url)
for i in range(30):
try:
chrome_driver.find_element_by_xpath('/html/body/div/div').click()
break
except:
sleep(1)
print('==========')
robot = pyrobot.Robot()
Keys_ = pyrobot.Keys()
# robot.key_press(Keys.tab)
# robot.key_press(Keys.tab)
# robot.key_press(Keys.tab)
ActionChains(chrome_driver).key_down(Keys.TAB).key_up(Keys.TAB).perform()
ActionChains(chrome_driver).key_down(Keys.TAB).key_up(Keys.TAB).perform()
ActionChains(chrome_driver).key_down(Keys.TAB).key_up(Keys.TAB).perform()
robot.key_press(Keys_.enter)
# ActionChains(chrome_driver).key_down(Keys.TAB).key_up(Keys.TAB).perform()
# ActionChains(chrome_driver).key_down(Keys.TAB).key_up(Keys.TAB).perform()
# ActionChains(chrome_driver).key_down(Keys.TAB).key_up(Keys.TAB).send_keys(Keys.ENTER).perform()
# ActionChains(chrome_driver).send_keys(Keys.ENTER).perform()
print('++++')
sleep(30)
# isSucess=chrome_driver.switch_to.alert.text
# print(isSucess)
# #确定
# chrome_driver.switch_to.alert.accept()
# chrome_driver.find_element_by_partial_link_text('Allow').click()
sleep(30)
return 0
def cpl():
submit = {}
chrome_driver = Chrome_driver.get_chrome_normal()
web_submit(submit,chrome_driver)
if __name__=='__main__':
cpl()
|
from __future__ import absolute_import, print_function
import tweepy
import json
import folium
from folium import plugins
import time
import sys
import csv
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import threading
# Get authentication keys from file
f = open("keys.txt", "r")
# == OAuth Authentication ==
consumer_key = f.readline().rstrip()
consumer_secret = f.readline().rstrip()
access_token = f.readline().rstrip()
access_token_secret = f.readline().rstrip()
f.close()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Read data from csv and add to heatmap
def HeatMap():
try:
df = pd.read_csv('data.csv')
arr = df[['latitude', 'longitude']].values
m = folium.Map(location=[34.0522, -118.2437], zoom_start=10)
m.add_child(plugins.HeatMap(arr, radius=15))
m.save('map.html')
print("Map generated")
except Exception as e:
print(e)
# TODO PING FUNCTION
# TODO CLEAN CSV
# Function to run a function for a certain amount of time
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
# Refresh heatmap every x seconds
x = 60
set_interval(HeatMap, x)
with open('data.csv', mode='a') as csv_file:
fieldnames = ['latitude', 'longitude', 'date_created']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
class StreamListener(tweepy.StreamListener):
def __init__(self):
super(StreamListener, self).__init__()
def on_connect(self):
print("Connected to streaming API")
def on_data(self, data):
try:
# Get data from tweet
datajson = json.loads(data)
created_at = datajson["created_at"]
# Longitude first, then latitude
if(datajson["coordinates"]):
# Coordinates from tweet
coordinates = datajson["coordinates"]["coordinates"]
latitude = coordinates[1]
longitude = coordinates[0]
# Check if latitude and longitude are floats
if(isinstance(latitude, float) and isinstance(longitude, float)):
# Write to csv file
writer.writerow({'latitude': latitude, 'longitude': longitude, 'date_created': created_at})
# Ping(latitude, longitude)
print(latitude, longitude)
return True
except Exception as e:
print(e)
def on_error(self, status_code):
print(sys.stderr, 'Encountered error with status code:', status_code)
return False
# return True # Don't kill the stream
def on_timeout(self):
print(sys.stderr, 'Timeout...')
return False
# return True # Don't kill the stream
# Streams tweets from box around Los Angeles, CA area
GEOBOX_LA = [-119.2279,33.4263,-116.8997,34.7189]
sapi = tweepy.streaming.Stream(auth, StreamListener())
sapi.filter(locations=GEOBOX_LA)
# SHOULD NOT GET HERE, STREAM CRASHED
print("Stream stopped")
|
#!/usr/bin/env python3
# coding = utf-8
import os
__author__ = 'Hanzhiyun'
print('中文测试正常!')
os.system('pause')
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import inspect
import os
import re
import shlex
from enum import Enum
from typing import Iterable, Pattern, Sequence
from pants.option.errors import ParseError
from pants.util.eval import parse_expression
from pants.util.memo import memoized_method
from pants.util.strutil import softwrap
class UnsetBool:
"""A type that can be used as the default value for a bool typed option to indicate un-set.
In other words, `bool`-typed options with a `default=UnsetBool` that are not explicitly set will
have the value `None`, enabling a tri-state.
:API: public
"""
def __init__(self) -> None:
raise NotImplementedError(
"UnsetBool cannot be instantiated. It should only be used as a sentinel type."
)
@classmethod
def coerce_bool(cls, value: type[UnsetBool] | bool | None, default: bool) -> bool:
if value is None:
return default
if value is cls:
return default
assert isinstance(value, bool)
return value
def target_option(s: str) -> str:
"""Same type as 'str', but indicates a single target spec.
:API: public
TODO(stuhood): Eagerly convert these to Addresses: see https://rbcommons.com/s/twitter/r/2937/
"""
return s
def _normalize_directory_separators(s: str) -> str:
"""Coalesce runs of consecutive instances of `os.sep` in `s`, e.g. '//' -> '/' on POSIX.
The engine will use paths or target addresses either to form globs or to string-match against, and
including the directory separator '/' multiple times in a row e.g. '//' produces an equivalent
glob as with a single '/', but produces a different actual string, which will cause the engine to
fail to glob file paths or target specs correctly.
TODO: give the engine more control over matching paths so we don't have to sanitize the input!
"""
return os.path.normpath(s)
def dir_option(s: str) -> str:
"""Same type as 'str', but indicates string represents a directory path.
:API: public
"""
return _normalize_directory_separators(s)
def file_option(s: str) -> str:
"""Same type as 'str', but indicates string represents a filepath.
:API: public
"""
return _normalize_directory_separators(s)
def dict_with_files_option(s):
"""Same as 'dict', but fingerprints the file contents of any values which are file paths.
For any value which matches the path of a file on disk, the file path is not fingerprinted -- only
its contents.
:API: public
"""
return DictValueComponent.create(s)
def shell_str(s: str) -> str:
"""A member_type for strings that should be split upon parsing through `shlex.split()`.
For example, the option value `--foo --bar=val` would be split into `['--foo', '--bar=val']`,
and then the parser will safely merge this expanded list with any other values defined for the
option.
:API: public
"""
return s
def workspace_path(s: str) -> str:
"""Same type as 'str', but indicates string represents a directory path that is relative to
either the build root, or a BUILD file if prefix with `./`.
:API: public
"""
if s.startswith("/"):
raise ParseError(
softwrap(
f"""
Invalid value: `{s}`. Expected a relative path, optionally in the form
`./relative/path` to make it relative to the BUILD files rather than the build root.
"""
)
)
return s
def memory_size(s: str | int | float) -> int:
"""A string that normalizes the suffixes {GiB, MiB, KiB, B} into the number of bytes.
:API: public
"""
if isinstance(s, (int, float)):
return int(s)
if not s:
raise ParseError("Missing value.")
original = s
s = s.lower().strip()
try:
return int(float(s))
except ValueError:
pass
invalid = ParseError(
softwrap(
f"""
Invalid value: `{original}`. Expected either a bare number or a number with one of
`GiB`, `MiB`, `KiB`, or `B`.
"""
)
)
def convert_to_bytes(power_of_2) -> int:
try:
return int(float(s[:-3]) * (2**power_of_2)) # type: ignore[index]
except TypeError:
raise invalid
if s.endswith("gib"):
return convert_to_bytes(30)
elif s.endswith("mib"):
return convert_to_bytes(20)
elif s.endswith("kib"):
return convert_to_bytes(10)
elif s.endswith("b"):
try:
return int(float(s[:-1]))
except TypeError:
raise invalid
raise invalid
def _convert(val, acceptable_types):
"""Ensure that val is one of the acceptable types, converting it if needed.
:param val: The value we're parsing (either a string or one of the acceptable types).
:param acceptable_types: A tuple of expected types for val.
:returns: The parsed value.
:raises :class:`pants.options.errors.ParseError`: if there was a problem parsing the val as an
acceptable type.
"""
if isinstance(val, acceptable_types):
return val
try:
return parse_expression(val, acceptable_types)
except ValueError as e:
raise ParseError(str(e)) from e
def _convert_list(val, member_type, is_enum):
converted = _convert(val, (list, tuple))
if not is_enum:
return converted
return [item if isinstance(item, member_type) else member_type(item) for item in converted]
def _flatten_shlexed_list(shlexed_args: Sequence[str]) -> list[str]:
"""Convert a list of shlexed args into a flattened list of individual args.
For example, ['arg1 arg2=foo', '--arg3'] would be converted to ['arg1', 'arg2=foo', '--arg3'].
"""
return [arg for shlexed_arg in shlexed_args for arg in shlex.split(shlexed_arg)]
class ListValueComponent:
"""A component of the value of a list-typed option.
One or more instances of this class can be merged to form a list value.
A component consists of values to append and values to filter while constructing the final list.
Each component may either replace or modify the preceding component. So that, e.g., a config
file can append to and/or filter the default value list, instead of having to repeat most
of the contents of the default value list.
"""
REPLACE = "REPLACE"
MODIFY = "MODIFY"
# We use a regex to parse the comma-separated lists of modifier expressions (each of which is
# a list or tuple literal preceded by a + or a -). Note that these expressions are technically
# a context-free grammar, but in practice using this regex as a heuristic will work fine. The
# values that could defeat it are extremely unlikely to be encountered in practice.
# If we do ever encounter them, we'll have to replace this with a real parser.
@classmethod
@memoized_method
def _get_modifier_expr_re(cls) -> Pattern[str]:
# Note that the regex consists of a positive lookbehind assertion for a ] or a ),
# followed by a comma (possibly surrounded by whitespace), followed by a
# positive lookahead assertion for [ or (. The lookahead/lookbehind assertions mean that
# the bracket/paren characters don't get consumed in the split.
return re.compile(r"(?<=\]|\))\s*,\s*(?=[+-](?:\[|\())")
@classmethod
def _split_modifier_expr(cls, s: str) -> list[str]:
# This check ensures that the first expression (before the first split point) is a modification.
if s.startswith("+") or s.startswith("-"):
return cls._get_modifier_expr_re().split(s)
return [s]
@classmethod
def merge(cls, components: Iterable[ListValueComponent]) -> ListValueComponent:
"""Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
"""
# Note that action of the merged component is MODIFY until the first REPLACE is encountered.
# This guarantees associativity.
action = cls.MODIFY
appends = []
filters = []
for component in components:
if component._action is cls.REPLACE:
appends = component._appends
filters = component._filters
action = cls.REPLACE
elif component._action is cls.MODIFY:
appends.extend(component._appends)
filters.extend(component._filters)
else:
raise ParseError(f"Unknown action for list value: {component._action}")
return cls(action, appends, filters)
def __init__(self, action: str, appends: list, filters: list) -> None:
self._action = action
self._appends = appends
self._filters = filters
@property
def val(self) -> list:
ret = list(self._appends)
for x in self._filters:
# Note: can't do ret.remove(x) because that only removes the first instance of x.
ret = [y for y in ret if y != x]
return ret
@property
def action(self):
return self._action
@classmethod
def create(cls, value, member_type=str) -> ListValueComponent:
"""Interpret value as either a list or something to extend another list with.
Note that we accept tuple literals, but the internal value is always a list.
:param value: The value to convert. Can be an instance of ListValueComponent, a list, a tuple,
a string representation of a list or tuple (possibly prefixed by + or -
indicating modification instead of replacement), or any allowed member_type.
May also be a comma-separated sequence of modifications.
"""
if isinstance(value, cls): # Ensure idempotency.
return value
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, str):
comma_separated_exprs = cls._split_modifier_expr(value)
if len(comma_separated_exprs) > 1:
return cls.merge([cls.create(x) for x in comma_separated_exprs])
action = cls.MODIFY
appends: Sequence[str] = []
filters: Sequence[str] = []
is_enum = inspect.isclass(member_type) and issubclass(member_type, Enum)
if isinstance(value, (list, tuple)): # Ensure we can handle list-typed default values.
action = cls.REPLACE
appends = value
elif value.startswith("[") or value.startswith("("):
action = cls.REPLACE
appends = _convert_list(value, member_type, is_enum)
elif value.startswith("+[") or value.startswith("+("):
appends = _convert_list(value[1:], member_type, is_enum)
elif value.startswith("-[") or value.startswith("-("):
filters = _convert_list(value[1:], member_type, is_enum)
elif is_enum and isinstance(value, str):
appends = _convert_list([value], member_type, True)
elif isinstance(value, str):
appends = [value]
else:
appends = _convert(f"[{value}]", list)
if member_type == shell_str:
appends = _flatten_shlexed_list(appends)
filters = _flatten_shlexed_list(filters)
return cls(action, list(appends), list(filters))
def __repr__(self) -> str:
return f"{self._action} +{self._appends} -{self._filters}"
class DictValueComponent:
"""A component of the value of a dict-typed option.
One or more instances of this class can be merged to form a dict value.
Each component may either replace or extend the preceding component. So that, e.g., a config
file can extend the default value of a dict, instead of having to repeat it.
"""
REPLACE = "REPLACE"
EXTEND = "EXTEND"
@classmethod
def merge(cls, components: Iterable[DictValueComponent]) -> DictValueComponent:
"""Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
"""
# Note that action of the merged component is EXTEND until the first REPLACE is encountered.
# This guarantees associativity.
action = cls.EXTEND
val = {}
for component in components:
if component.action is cls.REPLACE:
val = component.val
action = cls.REPLACE
elif component.action is cls.EXTEND:
val.update(component.val)
else:
raise ParseError(f"Unknown action for dict value: {component.action}")
return cls(action, val)
def __init__(self, action: str, val: dict) -> None:
self.action = action
self.val = val
@classmethod
def create(cls, value) -> DictValueComponent:
"""Interpret value as either a dict or something to extend another dict with.
:param value: The value to convert. Can be an instance of DictValueComponent, a dict,
or a string representation (possibly prefixed by +) of a dict.
"""
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, cls): # Ensure idempotency.
action = value.action
val = value.val
elif isinstance(value, dict): # Ensure we can handle dict-typed default values.
action = cls.REPLACE
val = value
elif value.startswith("{"):
action = cls.REPLACE
val = _convert(value, dict)
elif value.startswith("+{"):
action = cls.EXTEND
val = _convert(value[1:], dict)
else:
raise ParseError(f"Invalid dict value: {value}")
return cls(action, dict(val))
def __repr__(self) -> str:
return f"{self.action} {self.val}"
|
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
def super_user_required(view_func=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url='login'):
"""
Decorator for views that checks that the user is logged in and is a super
user, redirecting to the login page if necessary.
this is a modified decorator @staff_member_required
"""
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_superuser,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if view_func:
return actual_decorator(view_func)
return actual_decorator
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-03 09:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bonus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client_code', models.CharField(max_length=50, verbose_name='Код')),
('balance', models.DecimalField(decimal_places=2, max_digits=15, verbose_name='Баланс')),
('expires', models.DateTimeField(default=None, null=True, verbose_name='Дата действия')),
],
options={
'verbose_name': 'Бонус',
'verbose_name_plural': 'Бонусы',
},
),
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client_code', models.CharField(max_length=50, verbose_name='Код')),
('number', models.CharField(max_length=15, verbose_name='Номер')),
('embossed_number', models.CharField(max_length=6, verbose_name='Эмбоссированный номер')),
],
options={
'verbose_name': 'Карта',
'verbose_name_plural': 'Карты',
},
),
migrations.CreateModel(
name='Client',
fields=[
('guid', models.CharField(max_length=50, verbose_name='GUID')),
('code', models.CharField(max_length=50, primary_key=True, serialize=False, verbose_name='Код')),
('full_name', models.CharField(max_length=100, verbose_name='ФИО')),
],
options={
'verbose_name': 'Клиент',
'verbose_name_plural': 'Клиенты',
},
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client_code', models.CharField(db_index=True, max_length=50, verbose_name='Код клиента')),
('type', models.CharField(choices=[('email', 'E-mail'), ('phone', 'Phone')], max_length=5, verbose_name='Тип')),
('value', models.CharField(max_length=100, verbose_name='Значение')),
],
options={
'verbose_name': 'Контакт',
'verbose_name_plural': 'Контакты',
},
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dt_from', models.DateTimeField(verbose_name='Дата начала')),
('dt_to', models.DateTimeField(db_index=True, verbose_name='Дата окончания')),
('imported', models.BooleanField(default=False, verbose_name='Импортирован')),
('name', models.CharField(max_length=60, verbose_name='Название')),
],
options={
'ordering': ['-dt_to'],
'verbose_name': 'Файл выгрузки',
'verbose_name_plural': 'Файлы выгрузки',
},
),
migrations.CreateModel(
name='PrechargedCard',
fields=[
('number', models.CharField(max_length=15, primary_key=True, serialize=False, verbose_name='Номер')),
('embossed_number', models.CharField(max_length=6, unique=True, verbose_name='Эмбоссированный номер')),
],
options={
'verbose_name': 'Предначисленная карта',
'verbose_name_plural': 'Предначисленные карты',
},
),
migrations.AlterIndexTogether(
name='contact',
index_together=set([('type', 'value')]),
),
migrations.AlterIndexTogether(
name='bonus',
index_together=set([('client_code', 'expires')]),
),
]
|
from datetime import date, timedelta
from pandas_datareader import data as pdr
import fix_yahoo_finance as yf
import os
import pandas
import util
import time
#main()
yf.pdr_override()
startdate = date.today() - timedelta(days=4)
def getDataFromYahoo(astock):
data = None
try:
data = pdr.get_data_yahoo([astock], start=str(startdate.isoformat()), end=str(date.today().isoformat()))
except Exception as e:
try:
data = pdr.get_data_yahoo([astock], start=str(startdate.isoformat()), end=str(date.today().isoformat()))
except Exception as e:
print (str(e))
return None
data.drop(columns = ["Adj Close", "Volume"], inplace=True)
for idx,row in data.iterrows():
for label in ["Open", "Close", "High", "Low"]:
data.at[idx, label] = round(data.at[idx, label], 3)
return data
pulled = False
def updateCsv(astock, directory = "../new"):
global pulled
path = "{}/{}/{}.csv".format(os.getcwd(), directory, astock)
loaded = None
# last = str(time.ctime(os.path.getmtime(path)))
# if "Feb 27" in last:
# return
if not os.path.exists(path):
util.pullNewCsvFromYahoo([astock], directory)
pulled = True
return
loaded = pandas.read_csv(path)
lastdate = loaded.tail(1)["Date"].item()
data = getDataFromYahoo(astock)
if data is None:
return
appending = False
for idx,row in data.iterrows():
cdate = str(idx.to_pydatetime()).split(" ")[0]
if appending:
with open(path, "a") as f:
opend = data.at[idx, "Open"]
high = data.at[idx, "High"]
low = data.at[idx, "Low"]
closed = data.at[idx, "Close"]
avg = round((float(opend) + float(high) +
float(low) + float(closed))/4, 4)
f.write("{},{},{},{},{},{}\n".format(cdate, opend, high, low, closed, avg))
if cdate == lastdate:
appending = True
stocks = util.getStocks("IVV", andEtfs=True)
#for astock in stocks:
# updateCsv(astock)
util.pullNewCsvFromYahoo(stocks, "../new")
# updateCsv(astock, directory = "ijh")
#if pulled:
util.saveJsonData(stocks, "ijh")
|
from eth_rpc_client import Client # ethereum-rpc-client
class Eth(object):
def __init__(self,rpc_port):
self.rpc_ip = '127.0.0.1'
self.rpc_port = rpc_port
self.rpc_eth = Client(self.rpc_ip,self.rpc_port)
def getnewaddress(self):
return self.rpc_eth.get_coinbase()
|
from flask import Flask, render_template,request
app = Flask(__name__)
@app.route('/',methods=["GET"])
def inicio():
return render_template("formulario.html")
@app.route("/procesar", methods=["post"])
def procesar_formulario():
passwd = request.form.get("pass_control")
if passwd == "asdasd":
return render_template("datos.html", datos=request.form)
else:
return render_template("error.html", error="Contraseña incorrecta")
app.run(debug=True)
|
import json
import os, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Adang_project.settings")
django.setup()
from mainapp.models import Product, Category
from redis import Redis
red = Redis(host='192.168.111.128', port=7000)
# red.set("name", "Mr_lee")
# red.set("age", 18)
#
# age = red.get("age")
# print(age)
#
# red.lpush("hobby1", "football", "basketball")
# hobby = red.lrange("hobby1", 0, -1)
# print(hobby)
def mydefault(u):
if isinstance(u,product):
return {"id":u.id,"name":u.name,"paernt_id":u.parent_id}
product= list(Category.objects.all().values())
# print(product)
print(type(product))
user_dump = json.dumps(product,default=mydefault)
# print(user_dump)
# print(type(user_dump))
#
#
red.set('userlist',user_dump)
s=red.get("userlist")
print(s)
print(type(s))
results = json.loads(s.decode("utf-8"))
print(type(results))
print(results)
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
import sys
import json
import os
import traceback
from musclex import __version__
try:
from ..utils.file_manager import getImgFiles
from ..modules.EquatorImage import EquatorImage
from ..utils.image_processor import *
from ..csv_manager import EQ_CSVManager
except: # for coverage
from utils.file_manager import getImgFiles
from modules.EquatorImage import EquatorImage
from utils.image_processor import *
from csv_manager import EQ_CSVManager
class EquatorWindowh:
"""
Window displaying all information of a selected image.
This window contains 3 tabs : image, fitting, results
"""
def __init__(self, filename, inputsettings, delcache, lock=None, dir_path=None, imgList=None, currentFileNumber=None, fileList=None, ext=None, settingspath=os.path.join('musclex', 'settings', 'eqsettings.json')):
"""
:param filename: selected file name
:param inputsettings: flag for input setting file
:param delcache: flag for deleting cache
:param settingspath: setting file directory
"""
self.version = __version__
self.editableVars = {}
self.bioImg = None # Current EquatorImage object
self.default_img_zoom = None # default zoom calculated after processing image
# self.img_zoom = None # Params for x and y ranges of displayed image in image tab
self.graph_zoom = None # Params for x and y ranges of displayed graph in fitting tab
self.function = None # Current active function
self.in_batch_process = False
self.fixedIntArea = None
self.orientationModel = None
self.modeOrientation = None
if dir_path is not None:
self.dir_path, self.imgList, self.currentImg, self.fileList, self.ext = dir_path, imgList, currentFileNumber, fileList, ext
else:
self.dir_path, self.imgList, self.currentImg, self.fileList, self.ext = getImgFiles(str(filename), headless=True)
if len(self.imgList) == 0:
self.inputerror()
return
self.inputsettings=inputsettings
self.delcache=delcache
self.settingspath=settingspath
self.lock = lock
self.onImageChanged() # Toggle window to process current image
def inputerror(self):
"""
Display input error to screen
"""
self.statusPrint('Invalid Input')
self.statusPrint("Please select non empty failedcases.txt or an image\n\n")
def onImageChanged(self):
"""
This will create a new EquatorImage object for the new image
Process the new image if there's no cache.
"""
fileName = self.imgList[self.currentImg]
file=fileName+'.info'
cache_path = os.path.join(self.dir_path, "eq_cache", file)
cache_exist = os.path.isfile(cache_path)
if self.delcache:
if os.path.isfile(cache_path):
os.remove(cache_path)
#prevInfo = self.bioImg.info if self.bioImg is not None else None
self.bioImg = EquatorImage(self.dir_path, fileName, self, self.fileList, self.ext)
self.bioImg.skeletalVarsNotSet = not ('isSkeletal' in self.bioImg.info and self.bioImg.info['isSkeletal'])
self.bioImg.extraPeakVarsNotSet = not ('isExtraPeak' in self.bioImg.info and self.bioImg.info['isExtraPeak'])
settings = None
settings = self.getSettings()
self.statusPrint("Settings in onImageChange before update")
self.statusPrint(settings)
# Process new image
if 'paramInfo' in settings:
paramInfo = settings['paramInfo']
#settings.pop('paramInfo')
self.processImage(paramInfo)
else:
self.processImage()
self.statusPrint('---------------------------------------------------')
if self.inputsettings and cache_exist and not self.delcache:
self.statusPrint('cache exists, provided setting file was not used ')
elif self.inputsettings and (not cache_exist or self.delcache):
self.statusPrint('setting file provided and used for fitting')
elif not self.inputsettings and cache_exist and not self.delcache:
self.statusPrint('cache exist, no fitting was performed')
elif not self.inputsettings and (self.delcache or not cache_exist):
self.statusPrint('fitting with default settings')
self.statusPrint('---------------------------------------------------')
def processImage(self, paramInfo=None):
"""
Process Image by getting all settings and call process() of EquatorImage object
Then, write data
"""
if self.bioImg is None:
return
settings = self.getSettings()
self.statusPrint("Settings in processImage:")
self.statusPrint(settings)
try:
self.bioImg.process(settings, paramInfo)
except Exception:
self.statusPrint('Unexpected error')
msg = 'Please report the problem with error message below and the input image\n\n'
msg += "Error : " + str(sys.exc_info()[0]) + '\n\n' + str(traceback.format_exc())
self.statusPrint(msg)
raise
self.updateParams()
# acquire the lock
if self.lock is not None:
self.lock.acquire()
self.csvManager = EQ_CSVManager(self.dir_path) # Create a CSV Manager object
self.csvManager.writeNewData(self.bioImg)
self.csvManager.writeNewData2(self.bioImg)
# release the lock
if self.lock is not None:
self.lock.release()
def updateParams(self):
"""
Update the parameters
"""
info = self.bioImg.info
if 'orientation_model' in info:
self.orientationModel = info['orientation_model']
if self.bioImg.quadrant_folded:
cx, cy = self.bioImg.info['center']
xlim, ylim = self.bioImg.initialImgDim
xlim, ylim = int(xlim/2), int(ylim/2)
self.default_img_zoom = [(cx-xlim, cx+xlim), (cy-ylim, cy+ylim)]
def getSettings(self):
"""
Get all settings for EquatorImage process() from widgets
:return: settings (dict)
"""
settings = {}
settingspath=self.settingspath
if self.inputsettings:
try:
with open(settingspath) as f:
settings=json.load(f)
except Exception:
self.statusPrint("Can't load setting file")
self.inputsettings=False
settings={"left_fix_sigmac": 1.0, "right_fix_sigmac": 1.0, \
"left_fix_sigmas": 0.0001, "right_fix_sigmas": 0.0001, "fix_k":0, \
"orientation_model": 0, "model": "Gaussian", "isSkeletal": False, \
"isExtraPeak": False, "mask_thres": 0.0, "90rotation": False,\
"blank_mask": False}
else:
settings={"left_fix_sigmac": 1.0, "right_fix_sigmac": 1.0, \
"left_fix_sigmas": 0.0001, "right_fix_sigmas": 0.0001, "fix_k":0, \
"orientation_model": 0, "model": "Gaussian", "isSkeletal": False, \
"isExtraPeak": False, "mask_thres": 0.0, "90rotation": False,\
"blank_mask": False}
for k in settings.keys():
if self.isDynamicParameter(k):
settings.pop(k)
return settings
def isDynamicParameter(self, paramName):
'''
Checks whether parameter is dynamically handelled by fitting mechanism
:param paramName: Name of the parameter to be checked
:return: bool True if it is in the dynamic parameter list
'''
dynamicParams = ['Speak', 'left_area', 'right_area']
for p in dynamicParams:
if p in paramName:
return True
return False
def statusPrint(self, text):
"""
Print the text in the window or in the terminal depending on if we are using GUI or headless.
:param text: text to print
:return: -
"""
if text != "":
pid = os.getpid()
ptext = "[Process "+str(pid)+"] "+str(text)
print(ptext)
else:
print(text)
|
# Clustering of timeseries data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import umap
from sklearn.manifold import TSNE
from sklearn.cluster import AgglomerativeClustering
from sklearn.preprocessing import RobustScaler, StandardScaler
import hdbscan
from src.models.dynamic_time_warping import *
class DataPreprocess(object):
def __init__(self):
pass
def read_data(self, sku_labels, features):
print('Reading in the data...')
self.labels = pd.read_csv(sku_labels)
self.features = pd.read_csv(features)
self.features.dropna(axis=1, inplace=True)
return self.labels, self.features
def scale_data(self, df, scaler):
scale = scaler
skus = df['id']
df.set_index('id', inplace=True)
X = scale.fit_transform(df)
return X
class DimensionalityReduction(object):
def __init__(self):
pass
def run_dimred(self, features, dimred):
print('Running Dimentionality Reduction...')
projection = dimred.fit_transform(features)
return projection
class Clustering(object):
def __init__(self):
pass
def cluster(self, dimred, clustering_algo):
print('Clustering...')
clusters_fit = clustering_algo.fit_predict(dimred[[0,1]])
return clusters_fit
def main():
dp = DataPreprocess()
subset = 'none'
df = pd.read_csv('extracted_features.csv')
df.set_index('id', inplace=True)
df.dropna(axis=1, inplace=True)
pp = Preprocessing()
feat = pd.read_csv('aggregate_products.csv')
pivot = pp.pivot_table(feat)
sorted = pp.sort_nas(pivot)
pivot_nans, nans, pivot_no_nans, no_nans = pp.split_nans(sorted, df)
scaler = StandardScaler()
if subset == 'nan':
use_df = nans
elif subset == 'no_nans':
use_df = no_nans
elif subset == 'none':
use_df = df
print('There are {} samples'.format(len(use_df)))
X = scaler.fit_transform(use_df)
dr = DimensionalityReduction()
tsne = umap.UMAP(n_neighbors = 2, min_dist=0.0, n_components=10)
tsne = dr.run_dimred(X, tsne)
plot_df = pd.DataFrame(tsne).join(df.reset_index())
cl = Clustering()
clus_algo = hdbscan.HDBSCAN(min_cluster_size=50)
clusters_fit = cl.cluster(plot_df, clus_algo)
tsne_cluster = plot_df.join(pd.DataFrame(clusters_fit), rsuffix='clus')
tsne_cluster.rename(columns={'0':'tsne1', 1:'tsne2', '0clus':'cluster'},
inplace=True)
print('Outputting...')
out_df = tsne_cluster[['id', 'cluster']]
out_df.to_csv('tsne_clusters.csv', index=False)
if __name__ == '__main__':
main()
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dependency tests for the `tensorboard.summary` APIs.
This test is isolated in its own file to avoid depending on TensorFlow (either
directly or transitively), since we need to test the *absence* of a TF dep.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
class SummaryV2DepTest(unittest.TestCase):
def test_summary_v2_has_no_immediate_tf_dep(self):
# Check that we can import the module (multiple ways) and list and reference
# symbols from it without triggering a tensorflow import.
import tensorboard.summary.v2
from tensorboard.summary import v2 as summary_v2
print(dir(summary_v2))
print(summary_v2.scalar)
self.assertEqual('notfound', sys.modules.get('tensorflow', 'notfound'))
if __name__ == '__main__':
unittest.main()
|
from discord.ext.commands import Bot
import os
from keep_online import keep_online
from host_address import get_Host_name_IP
import logging
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
startup_extensions = ["general", "games","general_bot"]
BOT_PREFIX = "?a"
token = os.environ.get("TOKEN")
client = Bot(command_prefix=BOT_PREFIX)
@client.event
async def on_ready():
print("--------\nOnline")
print('logged in as: {} \nBot Id is: {}\n--------'.format(client.user,client.user.id))
@client.command()
async def load(extension_name : str):
"""Loads an extension."""
try:
client.load_extension(extension_name)
except (AttributeError, ImportError) as e:
await client.say("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
return
await client.say("{} loaded.".format(extension_name))
@client.command()
async def unload(extension_name : str):
"""Unloads an extension."""
client.unload_extension(extension_name)
await client.say("{} unloaded.".format(extension_name))
@client.event
async def on_message( message):
if message.author == client.user:
return
if message.content.startswith('?hi'):
msg = "Hello {0.author.mention}".format(message)
await client.send_message(message.channel, msg)
elif message.content.startswith('?bot'):
await client.send_message(message.channel, "Yes")
await client.process_commands(message)
get_Host_name_IP()
keep_online()
if __name__ == "__main__":
for extension in startup_extensions:
try:
client.load_extension(extension)
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Failed to load extension {}\n{}'.format(extension, exc))
client.run(token)
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.forms.models import model_to_dict
from core.utils import model_to_dict_verbose
from core.models import TimestampedModel, Address, UUIDModel, UseCaseModel, WhenInterestedModel
from core.pricing import MIN_HOME_VALUE, MAX_HOME_VALUE
from custom_auth.models import Client
from .utils import send_new_inquiry_email
class Inquiry(UseCaseModel, UUIDModel, WhenInterestedModel, TimestampedModel):
class Meta:
verbose_name_plural = 'inquiries'
# META DATA
client = models.OneToOneField(Client, on_delete=models.CASCADE, related_name='inquiry')
ip_address = models.GenericIPAddressField(null=True)
# there are also `created_at` and `modified_at` fields from the TimestampedModel
# abstract base class
# HOME DATA
PROPERTY_TYPES = (
('sf', 'Single-Family Home'),
('mf', 'Multi-Family Home'),
('co', 'Condo, Townhouse or Apartment'),
('va', 'Vacation or Rental Property'),
)
property_type = models.CharField(max_length=2, choices=PROPERTY_TYPES)
RENT_TYPES = (
('no', 'No'),
('under_14', 'Yes, 14 days or fewer per year'),
('over_14', 'Yes, more than 14 days per year'),
)
rent_type = models.CharField(max_length=8, choices=RENT_TYPES)
address = models.ForeignKey(Address, null=True, on_delete=models.SET_NULL)
primary_residence = models.BooleanField()
TEN_YEAR_DURATION_TYPES = (
('over_10', 'Yes, more than 10 years'),
('10_or_less', 'No, 10 years or fewer'),
('dont_know', "Don't know"),
)
ten_year_duration_prediction = models.CharField(
max_length=10, choices=TEN_YEAR_DURATION_TYPES, default='10_or_less'
)
home_value = models.PositiveIntegerField(
validators=[MinValueValidator(MIN_HOME_VALUE),
MaxValueValidator(MAX_HOME_VALUE)]
)
# HOMEOWNER DATA
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
referrer_name = models.CharField(max_length=60, blank=True)
household_debt = models.PositiveIntegerField(validators=[MaxValueValidator(99999999)])
notes = models.CharField(max_length=1000, blank=True)
def __str__(self):
return "Inquiry submission by {0} {1} for {2}".format(
self.first_name, self.last_name, self.address
)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
# notify staff that there is a new Inquiry
send_new_inquiry_email(self.first_name, self.last_name)
@property
def full_name_short(self):
return self.first_name + ' ' + self.last_name
@property
def as_dict(self):
return model_to_dict(self)
@property
def as_dict_verbose(self):
return model_to_dict_verbose(self)
|
#coding:utf-8
def script(s, player=None):
from NaoQuest.objective import Objective
from NaoCreator.setting import Setting
from NaoQuest.quest import Quest
if not player:
Setting.error("Error in execution of post_script of objective \"choix\": player is None")
return
if hasattr(s, "kw_answer"):
if s.kw_answer == "qcm" or s.kw_answer == "QCM":
new_qst = Quest(player.current_scenario.inner_name, "qcm")
new_qst.point = 0
else:
new_qst = Quest(player.current_scenario.inner_name, "info")
l = len(player.current_quest.next_quests)
new_qst.branch_id = l + 1
player.current_quest.next_quests.append(new_qst)
|
#!/usr/bin/python3
BUILD_DIRECTORY = "../build/"
COMPONENTS_DIRECTORY = "components/"
BUILD_COMPONENTS_DIRECTORY = BUILD_DIRECTORY + COMPONENTS_DIRECTORY
LOGS_DIRECTORY = "logs/"
BUILD_LOGS_DIRECTORY = BUILD_DIRECTORY + LOGS_DIRECTORY
ENGINE_DIRECTORY = "engine/"
BUILD_COMPONENTS_ENGINE_DIRECTORY = BUILD_COMPONENTS_DIRECTORY + ENGINE_DIRECTORY
WEB_UI_DIRECTORY = "web-ui/"
DIST_DIRECTORY = "dist/machine-learning-swissknife"
BUILD_COMPONENTS_WEB_UI_DIST_DIRECTORY = BUILD_COMPONENTS_DIRECTORY + WEB_UI_DIRECTORY + DIST_DIRECTORY
COMPONENTS_WEB_UI_DIST_DIRECTORY = COMPONENTS_DIRECTORY + WEB_UI_DIRECTORY + DIST_DIRECTORY
SERVICE_JAR = "mlsk-service-impl.jar"
UI_JAR = "mlsk-ui-jar-with-dependencies.jar"
CONFIGURATION_FILE = "mlsk.ini"
PORT_SECTION = "port"
PORTS_SECTION = "ports"
SERVICE_SECTION = "SERVICE"
ENGINE_SECTION = "ENGINE"
WEB_UI_SECTION = "WEB_UI"
SERVICE_PORT_OPTION = "service_port"
ENGINE_PORTS_OPTION = "engine_ports"
WEB_UI_PORT_OPTION = "webui_port"
ANGULAR_SERVER_PORT_OPTION = "${SERVER_PORT}"
|
f = open ("myfile.txt", "w")
s = input ("enter text:")
f.write(s)
f.close ()
|
import tkinter, os
import tkinter.messagebox
from tkinter.filedialog import askopenfilename
from PyPDF2 import PdfFileMerger
window = tkinter.Tk()
window.title("PDF Merger")
files_path_list = []
def merge():
option_selected = var_int.get()
result_file_name = var_string.get()
if not result_file_name:
tkinter.messagebox.showinfo("Failed", "Input file name")
else:
pdfs = ['1.pdf', '2.pdf']if option_selected == 1 else files_path_list
try:
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(pdf)
merger.write(result_file_name+".pdf")
merger.close()
tkinter.messagebox.showinfo("Success", "Files merged.")
if option_selected == 1:
rename(result_file_name)
except:
tkinter.messagebox.showinfo("Failed", "Something went wrong.")
else:
reset()
def rename(file_name):
os.rename("1.pdf", file_name+"-1.pdf")
os.rename("2.pdf", file_name+"-2.pdf")
def select_files():
midFrame.pack()
filename = askopenfilename(initialdir="./", filetypes=[('pdf file', '*.pdf')], title = "Choose a file.")
if filename:
files_path_list.append(filename)
label_added.grid(column=0, row=1, columnspan=3)
if files_path_list:
label_added.config(text=("\n".join(files_path_list)))
add_more = tkinter.Button(midFrame, text="Add another", command = select_files)
add_more.grid(column=1, row=0)
def reset():
entry.delete(0, 'end')
midFrame.pack_forget()
label_added.config(text=(""))
radio_1.select()
del files_path_list[:]
var_string = tkinter.StringVar()
var_int = tkinter.IntVar()
topFrame = tkinter.Frame(window)
topFrame.pack()
radio_1 = tkinter.Radiobutton(topFrame, text="Default", variable=var_int, value=1, command = reset)
radio_1.grid(column=0, row=0)
radio_1.select()
radio_2 = tkinter.Radiobutton(topFrame, text="Chose files", variable=var_int, value=2, command = select_files)
radio_2.grid(column=1, row=0)
midFrame = tkinter.Frame(window)
label_added = tkinter.Label(midFrame)
bottomFrame = tkinter.Frame(window)
bottomFrame.pack(side="bottom")
label_1 = tkinter.Label(bottomFrame, text="New file name")
label_1.grid(column=0, row=1)
entry = tkinter.Entry(bottomFrame, textvariable = var_string)
entry.grid(column=1, row=1)
mergebutton = tkinter.Button(bottomFrame, text ="Merge", command = merge)
mergebutton.grid(column=0, row=2,columnspan=2)
window.mainloop()
|
from AESAlgorithm import process_key, SubBytes, ShiftRows, print_hex, InvShiftRows, InvSubBytes, MixColumns,\
InvMixColumns, AddRoundKey, KeyExpansionInv, sbox, isbox, print_binary, print_ascii
from Color import Color
class AESCrack:
def __init__(self, cipher_path):
self.cipher_path = cipher_path
f = open(cipher_path, "r")
self.cipher = f.read()
f.close()
# print(self.cipher)
self.c1, self.c2, self.c3, self.c4,\
self.c5, self.c6, self.c7, self.c8,\
self.c9 = self.cipher.split('\n')
# print(self.c1)
# print(self.c2)
# print(self.c3)
# print(self.c4)
# print(self.c5)
# print(self.c6)
# print(self.c7)
# print(self.c8)
# print(self.c9)
# print("")
def find_spaces(self, c1, c2):
space = " "
space_hex = int(space.encode().hex(), 16)
space_hex_s = sbox[space_hex]
state1 = InvSubBytes(InvShiftRows(c1))
state2 = InvSubBytes(InvShiftRows(c2))
# k_mix = SubBytes(ShiftRows(k))
state1_m = InvShiftRows(InvMixColumns(state1))
state2_m = InvShiftRows(InvMixColumns(state2))
res_hex12 = AddRoundKey(state1_m, state2_m) # rimuovo k!
# res_hex12 = AddRoundKey(state1, state2) # rimuovo k!
# res_hex12 = InvShiftRows(InvMixColumns(res_hex12)) # rimetto nell'ordine corretto, quindi ottengo sbox[plain1] XOR sbox[plain2]
"""k = [
[0x00, space_hex_s, 0x00, 0x00],
[0x00, 0x00, 0x00, 0x00],
[0x00, 0x00, 0x00, 0x00],
[0x00, 0x00, 0x00, 0x00]
]"""
"""k = [
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
]"""
"""
state = AddRoundKey(state, k)
state = InvMixColumns(state)
# state = InvMixColumns(state)
# state = AddRoundKey(state, InvMixColumns(k))
state = InvShiftRows(state)
state = InvSubBytes(state)
"""
k = [
[space_hex, space_hex, space_hex, space_hex],
[space_hex, space_hex, space_hex, space_hex],
[space_hex, space_hex, space_hex, space_hex],
[space_hex, space_hex, space_hex, space_hex]
]
"""k = [
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
]"""
print_hex("k", k)
res_k = state1
# res_k = AddRoundKey(res_k, k) # plain1 XOR k
# res_k = InvShiftRows(InvMixColumns(res_k))
# k = InvMixColumns(k)
# res_k = InvMixColumns(res_k)
# k = InvMixColumns(k)
res_k = AddRoundKey(res_k, k)
res_k = InvMixColumns(res_k)
res_k = InvShiftRows(res_k)
res_k = InvSubBytes(res_k)
print_hex("InvSubBytes", res_k)
print_ascii("ascii", res_k)
# print_hex("res_hex12", res_hex12)
print_binary("res_hex12", res_hex12)
space = " "
space_hex = int(space.encode().hex(), 16)
space_hex_s = sbox[space_hex]
# space_binary = bin(space_hex)[2:].zfill(8)
# space_binary_s = bin(space_hex_s)[2:].zfill(8)
res_xor12_s = [[None for j in range(4)] for i in range(4)]
for i, word in enumerate(res_hex12):
for j, byte in enumerate(word):
res_xor12_s[i][j] = byte ^ space_hex_s
print_binary("res_xor12_s", res_xor12_s)
final_s = InvSubBytes(res_xor12_s)
print_binary("final_s", final_s)
dict = {}
idx = 0
for word in final_s:
for byte in word:
# if byte >= 0x20 and byte <= 0x7A:
if byte == 0x20 or (byte >= 0x41 and byte <= 0x5A) or (byte >= 0x61 and byte <= 0x7A):
# ho un carattere ascii valido!
dict[idx] = chr(byte)
idx += 1
# print(dict)
return dict
def find_all_spaces(self):
list = [process_key(self.c1), process_key(self.c2), process_key(self.c3), process_key(self.c4),
process_key(self.c5), process_key(self.c6), process_key(self.c7), process_key(self.c8),
process_key(self.c9)]
dict_spaces = {}
for i in range(16):
dict_spaces[i] = 0
for i in range(9):
for j in range(i+1, 9):
# print(f"i: {i}, j: {j}")
c1 = list[i]
c2 = list[j]
dict = self.find_spaces(c1, c2)
for key in dict:
dict_spaces[key] += 1
print(dict_spaces)
def test_key(self):
key = "deadbeef12345678deadbeef12345678"
k = process_key(key)
r1 = self.test_decrypt(process_key(self.c1), k)
r2 = self.test_decrypt(process_key(self.c2), k)
r3 = self.test_decrypt(process_key(self.c3), k)
r4 = self.test_decrypt(process_key(self.c4), k)
r5 = self.test_decrypt(process_key(self.c5), k)
r6 = self.test_decrypt(process_key(self.c6), k)
r7 = self.test_decrypt(process_key(self.c7), k)
r8 = self.test_decrypt(process_key(self.c8), k)
r9 = self.test_decrypt(process_key(self.c9), k)
print_ascii("r1", r1)
print_ascii("r2", r2)
print_ascii("r3", r3)
print_ascii("r4", r4)
print_ascii("r5", r5)
print_ascii("r6", r6)
print_ascii("r7", r7)
print_ascii("r8", r8)
print_ascii("r9", r9)
# Dear Joan Daemen, I wanted to let you know that AES is great but can be simplified to 1 round
# without loosing any security. Best wishes, Blaise.
# essendo una mail, è molto probabile che il testo inizi con Dear Name Surname,
# nel nostro caso Dear Joan Daemen che fatalità misura 16 caratteri (la lunghezza della chiave AES)
# basta eseguire i passaggi dal plain_text fino al MixColumn e viceversa dal cipher al InvSubBytes
# per trovarsi con il valore plain XOR key = cipher
# eseguendo uno XOR con il plain si ottiene la chiave!
# con la chiave appena ottenuta, basta applicarla (vedi test_key) a tutti i blocchi (essendo ECB)
# e si ottiene il testo della mail
def test2(self):
plain1 = "Dear Joan Daemen"
cipher1 = self.c1
p1 = process_key(self.ascii_to_hex(plain1))
p1 = SubBytes(p1)
p1 = ShiftRows(p1)
p1 = MixColumns(p1)
c1 = process_key(cipher1)
c1 = InvShiftRows(c1)
c1 = InvSubBytes(c1)
res = [[None for j in range(4)] for i in range(4)]
for i, word in enumerate(res):
for j, byte in enumerate(word):
res[i][j] = p1[i][j] ^ c1[i][j]
print_hex("res", res)
print_ascii("res ascii", res)
# questo tentativo si basava sul fare c1 XOR c2 in modo da ottenere
# plain_text1 XOR plain_text2. A questo punto si cercava di individuare gli spazi anche se la funzione
# sbox mescola bene i caratteri e non è facile individuare con precisione il punto in cui è presente uno spazio
# l'idea era di fare XOR di tutti i blocchi in modo da avere una statistica più precisa della posizione
# degli spazi ed iniziare e decriptare parte del testo e della chiave fino a ricomporli
# però richiede parecchio tempo
def test(self):
# t1 = "HELLO NEW WORLD!"
# t2 = "YOU'RE WELCOMEEE"
# t2 = "HELLOONEW WORLD!"
# t1 = "A AAAAAAAAAAAAAA"
# t2 = "AR RRRRRRRRRRRRR"
# t1 = "AAAAAAAAAAAAAAAC"
# t2 = "ABCDEFGHILMNOPQ "
t1 = "ABCDEFGHIJKLMNOP"
t2 = "W "
# plain_key = "000102030405060708090a0b0c0d0e0f"
plain_key = "000102030405060708090a0b0c0d0e0f"
h1 = self.ascii_to_hex(t1)
h2 = self.ascii_to_hex(t2)
k = process_key(plain_key)
h1 = process_key(h1)
h2 = process_key(h2)
c1 = self.test_cipher(h1, k)
c2 = self.test_cipher(h2, k)
# print_hex("h1", h1)
print("decrypt", self.state_to_ascii(self.test_decrypt(c1, k)))
a = self.find_spaces(c1, c2)
print(a)
# self.find_all_spaces()
"""
# c1 = InvMixColumns(InvShiftRows(InvSubBytes(c1)))
# c2 = InvMixColumns(InvShiftRows(InvSubBytes(c2)))
c1 = InvSubBytes(InvShiftRows(c1))
c2 = InvSubBytes(InvShiftRows(c2))
# c1 ^ k - c2 ^ k
# c1^c2 !!
res_hex12 = AddRoundKey(c1, c2)
print_hex("res_hex12", res_hex12)
# res_hex12 = InvSubBytes(InvShiftRows(InvMixColumns(res_hex12)))
res_hex12 = InvShiftRows(InvMixColumns(res_hex12))
print_hex("res_hex12", res_hex12)
print_binary("res_hex12", res_hex12)
# quando 2 caratteri combaciano, esce il valore esadecimale 52 (ovvero R in ascii)
space = " "
space_hex = int(space.encode().hex(), 16)
space_binary = bin(space_hex)[2:].zfill(8)
print(space_binary)
space_hex_s = sbox[space_hex]
space_binary_s = bin(space_hex_s)[2:].zfill(8)
print(space_binary_s)
res_xor12_s = [[None for j in range(4)] for i in range(4)]
for i, word in enumerate(res_hex12):
for j, byte in enumerate(word):
res_xor12_s[i][j] = byte ^ space_hex_s
final_s = InvSubBytes(res_xor12_s)
print_hex("final_s", final_s)
print("decrypt", self.state_to_ascii(final_s))
"""
"""
print("decrypt", self.state_to_ascii(res_hex12))
print("\n")
dict12 = self.check_spaces(res_hex12)
self.test_word_by_position(dict12, c1, True)
self.test_word_by_position(dict12, c2, True)
print("\n")
"""
def test_cipher(self, block, k):
state = block
state = SubBytes(state)
state = ShiftRows(state)
state = MixColumns(state)
state = AddRoundKey(state, k)
state = SubBytes(state)
state = ShiftRows(state)
return state
def test_decrypt(self, block, k):
state = block
state = InvShiftRows(state)
state = InvSubBytes(state)
state = AddRoundKey(state, k)
state = InvMixColumns(state)
# state = InvMixColumns(state)
# state = AddRoundKey(state, InvMixColumns(k))
state = InvShiftRows(state)
state = InvSubBytes(state)
return state
def crack(self):
"""
plain_text = "00112233445566778899aabbccddeeff"
plain_key = "000102030405060708090a0b0c0d0e0f"
t = process_key(plain_text)
k = process_key(plain_key)
state = t
print_hex("Start:", state)
state = SubBytes(state)
print_hex("SubBytes:", state)
state = ShiftRows(state)
print_hex("ShiftRows:", state)
state = MixColumns(state)
print_hex("MixColumns:", state)
state = AddRoundKey(state, k)
print_hex("AddRoundKey:", state)
state = SubBytes(state)
print_hex("SubBytes:", state)
state = ShiftRows(state)
print_hex("ShiftRows:", state)
# state = InvShiftRows(state)
# state = InvSubBytes(state)
# state = AddRoundKey(state, k)
# state = InvMixColumns(state)
# state = InvShiftRows(state)
# state = InvSubBytes(state)
state = InvShiftRows(state)
state = InvSubBytes(state)
state = InvMixColumns(state)
state = AddRoundKey(state, InvMixColumns(k))
state = InvShiftRows(state)
state = InvSubBytes(state)
print_hex("AddRoundKey:", state)
"""
"""
k1 = SubBytes(k)
k2 = ShiftRows(k1)
print_hex("k2", k2)
inv_k = InvShiftRows(InvSubBytes(k2))
print_hex("inv_k", inv_k)
"""
# c1 = process_key(self.c1)
# c1 = InvMixColumns(InvShiftRows(InvSubBytes(c1)))
c1 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c1))))
c2 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c2))))
c3 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c3))))
c4 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c4))))
c5 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c5))))
c6 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c6))))
c7 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c7))))
c8 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c8))))
c9 = InvMixColumns(InvShiftRows(InvSubBytes(process_key(self.c9))))
# print_hex("c1", c1)
# print_hex("c2", c2)
# print_hex("c3", c3)
# print_hex("c4", c4)
# print_hex("c5", c5)
# print_hex("c6", c6)
# print_hex("c7", c7)
# print_hex("c8", c8)
# print_hex("c9", c9)
res_hex12 = AddRoundKey(c1, c2)
res_hex12 = InvSubBytes(InvShiftRows(res_hex12))
print_hex("res_hex12", res_hex12)
print("\n")
dict12 = self.check_spaces(res_hex12)
self.test_word_by_position(dict12, c1, True)
self.test_word_by_position(dict12, c2, True)
print("\n")
def check_spaces(self, state):
hex_str = ""
for word in state:
for byte in word:
hex_str += (hex(byte)[2:].zfill(2))
hex_list = [hex_str[i:i + 2] for i in range(0, len(hex_str), 2)]
binary_list = [bin(int(h, 16))[2:].zfill(8) for h in hex_list]
dict = {}
idx = 0
for binary in binary_list:
first_2_bit = binary[0:2]
if first_2_bit == "01":
dict[idx] = ' '
idx += 1
return dict
def test_word_by_position(self, dict, state, check_print):
idx = 0
Nb = len(state)
new_state = [[None for j in range(4)] for i in range(Nb)]
for i, word in enumerate(state):
for j, byte in enumerate(word):
if idx in dict:
word = dict[idx]
hex_word = self.ascii_to_hex(word)
new_state[i][j] = byte ^ int(hex_word, 16)
else:
new_state[i][j] = byte
idx += 1
new_state = InvSubBytes(InvShiftRows(new_state))
dict_res = {}
s = ""
idx = 0
for word in state:
for byte in word:
# h = hex(byte)[2:].zfill(2)
h = chr(byte)
if idx in dict:
dict_res[idx] = h
h = Color.GREEN + h + Color.RESET
s += h
idx += 1
if check_print:
print(s)
print(dict_res)
return new_state
"""
hex_str = ""
for word in state:
for byte in word:
hex_str += (hex(byte)[2:].zfill(2))
dict_res = {}
hex_list = [hex_str[i:i + 2] for i in range(0, len(hex_str), 2)]
idx = 0
res = ""
for h in hex_list:
if idx in dict:
word = dict[idx]
hex_word = self.ascii_to_hex(word)
hex_new = self.xor_new(h, hex_word)
word_new = self.hex_to_ascii(hex_new)
res += Color.GREEN + word_new + Color.RESET
dict_res[idx] = word_new
else:
res += h
idx += 1
if check_print:
print(res)
print(dict_res)
return res
"""
def xor_new(self, hex1, hex2):
n = 2
array1 = [hex1[i:i + n] for i in range(0, len(hex1), n)]
array2 = [hex2[i:i + n] for i in range(0, len(hex2), n)]
return "".join(hex(int(x, 16) ^ int(y, 16))[2:].zfill(2) for x, y in zip(array1, array2))
def ascii_to_hex(self, ascii):
return ascii.encode("ascii").hex()
def hex_to_ascii(self, hex):
n = 2
hex_list = [hex[i:i + n] for i in range(0, len(hex), n)]
return "".join(chr(int(h, 16)) for h in hex_list)
def state_to_ascii(self, state):
s = ""
for word in state:
for byte in word:
s += chr(byte)
return s
|
class Student(object):
def __init__(self,name,score):
self.__name = name
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name,self.__score))
def get_grade(self):
if self.__score >= 90:
return 'A'
elif self.__score >= 60:
return 'B'
else:
return 'C'
def get_name(self):
return self.__name
def get_score(self):
return self.__score
def set_score(self,score):
if 0 <= score <= 100:
self.__score = score
else:
raise ValueError('bad score')
bart = Student('Bart Simpson', 59)
lisa = Student('Lisa Simpson', 87)
bart.print_score()
lisa.print_score()
print(lisa.get_name(),lisa.get_grade())
class Animal(object):
"""docstring for Animal"""
def run(self):
print('Animal is running...')
class Dog(Animal):
"""docstring for Dog"""
def run(self):
print('Dog is running...')
class Cat(Animal):
"""docstring for Cat"""
pass
dog = Dog()
dog.run()
cat = Cat()
cat.run()
|
from django.template.loader import render_to_string
from .serializers import EventsSerializer
"""Serializers that are used by older versions of the API"""
class EventsV1Serializer(EventsSerializer):
def attr_date(self, obj):
o = {
"human": render_to_string("osmcal/date.l10n.txt", {"event": obj}).strip(),
"whole_day": obj.whole_day,
"start": str(obj.start_localized.replace(tzinfo=None)),
}
if obj.end:
o["end"] = str(obj.end_localized.replace(tzinfo=None))
return o
|
from django.contrib import auth
from django.contrib.auth.models import UNUSABLE_PASSWORD, \
SiteProfileNotAvailable
from django.utils.translation import ugettext_lazy as _
from google.appengine.ext import db
from string import ascii_letters, digits
import hashlib, random
def gen_hash(password, salt=None, algorithm='sha512'):
hash = hashlib.new(algorithm)
hash.update(password)
if salt is None:
salt = ''.join([random.choice(ascii_letters + digits) for _ in range(8)])
hash.update(salt)
return (algorithm, salt, hash.hexdigest())
class UserTraits(db.Model):
last_login = db.DateTimeProperty(verbose_name=_('last login'))
date_joined = db.DateTimeProperty(auto_now_add=True,
verbose_name=_('date joined'))
is_active = db.BooleanProperty(default=False, verbose_name=_('active'))
is_staff = db.BooleanProperty(default=False,
verbose_name=_('staff status'))
is_superuser = db.BooleanProperty(default=False,
verbose_name=_('superuser status'))
password = db.StringProperty(default=UNUSABLE_PASSWORD,
verbose_name=_('password'))
@property
def id(self):
# Needed for compatibility
return str(self.key())
def __unicode__(self):
return unicode(self.key().id_or_name())
def __str__(self):
return unicode(self).encode('utf-8')
def is_authenticated(self):
return True
def is_anonymous(self):
return False
def check_password(self, password):
if not self.has_usable_password():
return False
algorithm, salt, hash = self.password.split('$')
return hash == gen_hash(password, salt, algorithm)[2]
def set_password(self, password):
self.password = '$'.join(gen_hash(password))
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
return self.password != UNUSABLE_PASSWORD
@classmethod
def make_random_password(self, length=16,
allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"""
Generates a random password with the given length and given allowed_chars.
"""
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not settings.AUTH_PROFILE_MODULE:
raise SiteProfileNotAvailable
try:
appname, modelname = settings.AUTH_PROFILE_MODULE.rsplit('.', 1)
for app in settings.INSTALLED_APPS:
if app.endswith('.' + appname):
appname = app
break
model = getattr(
__import__(appname + '.models', {}, {}, ['']),
modelname)
self._profile_cache = model.all().filter('user =', self).get()
except ImportError:
raise SiteProfileNotAvailable
return self._profile_cache
def get_group_permissions(self):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(self))
return permissions
def has_perm(self, perm):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general.
"""
# Inactive users have no permissions.
if not self.is_active:
return False
# Superusers have all permissions.
if self.is_superuser:
return True
# Otherwise we need to check the backends.
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(self, perm):
return True
return False
def has_perms(self, perm_list):
"""Returns True if the user has each of the specified permissions."""
for perm in perm_list:
if not self.has_perm(perm):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
if not self.is_active:
return False
if self.is_superuser:
return True
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(self, app_label):
return True
return False
def get_and_delete_messages(self):
messages = []
for m in self.message_set:
messages.append(m.message)
m.delete()
return messages
class EmailUserTraits(UserTraits):
def email_user(self, subject, message, from_email=None):
"""Sends an e-mail to this user."""
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def __unicode__(self):
return self.email
class EmailUser(EmailUserTraits):
email = db.EmailProperty(required=True, verbose_name=_('e-mail address'))
# This can be used to distinguish between banned users and unfinished
# registrations
is_banned = db.BooleanProperty(default=False,
verbose_name=_('banned status'))
class User(EmailUserTraits):
"""Default User class that mimics Django's User class."""
username = db.StringProperty(required=True, verbose_name=_('username'))
email = db.EmailProperty(verbose_name=_('e-mail address'))
first_name = db.StringProperty(verbose_name=_('first name'))
last_name = db.StringProperty(verbose_name=_('last name'))
|
from functools import wraps
import sfml as _sf
# Mapping to readable key
_key_mapping = {
_sf.Keyboard.A : 'a',
_sf.Keyboard.B : 'b',
_sf.Keyboard.C : 'c',
_sf.Keyboard.D : 'd',
_sf.Keyboard.E : 'e',
_sf.Keyboard.F : 'f',
_sf.Keyboard.G : 'g',
_sf.Keyboard.H : 'h',
_sf.Keyboard.I : 'i',
_sf.Keyboard.J : 'j',
_sf.Keyboard.K : 'k',
_sf.Keyboard.L : 'l',
_sf.Keyboard.M : 'm',
_sf.Keyboard.N : 'n',
_sf.Keyboard.O : 'o',
_sf.Keyboard.P : 'p',
_sf.Keyboard.Q : 'q',
_sf.Keyboard.R : 'r',
_sf.Keyboard.S : 's',
_sf.Keyboard.T : 't',
_sf.Keyboard.U : 'u',
_sf.Keyboard.V : 'v',
_sf.Keyboard.W : 'w',
_sf.Keyboard.X : 'x',
_sf.Keyboard.Y : 'y',
_sf.Keyboard.Z : 'z',
_sf.Keyboard.UP : 'up',
_sf.Keyboard.DOWN : 'down',
_sf.Keyboard.LEFT : 'left',
_sf.Keyboard.RIGHT : 'right',
_sf.Keyboard.SPACE : 'space',
_sf.Keyboard.RETURN : 'enter',
_sf.Keyboard.ESCAPE : 'escape',
_sf.Keyboard.L_ALT : 'alt',
_sf.Keyboard.R_ALT : 'alt',
_sf.Keyboard.L_CONTROL : 'ctrl',
_sf.Keyboard.R_CONTROL : 'ctrl',
_sf.Keyboard.L_SHIFT : 'shift',
_sf.Keyboard.R_SHIFT : 'shift',
}
# inverse mapping to a list of *real* keys
_inverse_key_mapping = {}
for key, value in _key_mapping.iteritems():
_inverse_key_mapping.setdefault(value, []).append(key)
def _real_keys(key):
assert key in _inverse_key_mapping
return iter(_inverse_key_mapping[key])
_color_mapping = {
'red' : _sf.Color.RED,
'blue' : _sf.Color.BLUE,
'cyan' : _sf.Color.CYAN,
'black' : _sf.Color.BLACK,
'white' : _sf.Color.WHITE,
'green' : _sf.Color.GREEN,
'yellow' : _sf.Color.YELLOW,
'magenta' : _sf.Color.MAGENTA,
'transparent' : _sf.Color.TRANSPARENT,
}
def _to_actual_color(color):
if isinstance(color, str):
if color not in _color_mapping:
raise ValueError("Invalid color")
return _color_mapping[color]
elif isinstance(color, int):
b = (color >> (8 * 0)) & 0xff
g = (color >> (8 * 1)) & 0xff
r = (color >> (8 * 2)) & 0xff
color = r, g, b
return _sf.Color(*color)
class Listener(object):
def update(self):
pass
def key_pressed(self, key):
pass
def key_released(self, key):
pass
class Window(object):
def __init__(self, size=(800, 600), title=__name__, fps=40, icon=None,
closable=True, resizable=False, mouse=True, vsync=True,
fullscreen=False, enable_key_repeat=False):
style = 0
if closable: style |= _sf.Style.CLOSE
if resizable: style |= _sf.Style.RESIZE
if fullscreen: style |= _sf.Style.FULLSCREEN
self._impl = _sf.RenderWindow(_sf.VideoMode(*size), title, style)
self._impl.key_repeat_enabled = enable_key_repeat
self._impl.framerate_limit = fps
self._impl.vertical_synchronization = vsync
self._impl.mouse_cursor_visible = mouse
if icon is not None:
icon_image = _sf.Image.from_file(icon)
self._impl.icon = icon_image.pixels
self.graphics = Graphics(self)
def mouse_position(self):
return _sf.Mouse.get_position(self._impl)
def set_active(self, active):
self._impl.active = active
def display(self):
self._impl.display()
def clear(self):
self._impl.clear()
def close(self):
self._impl.close()
def is_opened(self):
return self._impl.is_open
def draw(self, drawable):
drawable.render(self.graphics)
@property
def width(self):
return self._impl.width
@width.setter
def set_width(self, w):
self._impl.width = w
@property
def height(self):
return self._impl.height
@height.setter
def set_height(self, h):
self._impl.height = h
class App(Listener):
def __init__(self):
super(App, self).__init__()
self.windows = []
self.key_events = []
def bind_key_event(self, f, bind_key=None, on_press=True):
if not callable(f):
raise TypeError("Bind object '%s' should be callable" % f)
self.key_events.append((bool(on_press), bind_key, f))
def key_event(self, bind_key=None, on_press=True):
@wraps(f)
def closure(f):
self.bind_key_event(f, bind_key=bind_key, on_press=on_press)
return f
return closure
def is_pressed(self, key):
if key in _inverse_key_mapping:
kb = _sf.Keyboard
return any(kb.is_key_pressed(k) for k in _real_keys(key))
return False
def is_released(self, key):
if key in _inverse_key_mapping:
kb = _sf.Keyboard
return any(not kb.is_key_pressed(k) for k in _real_keys(key))
return False
def _key_events_bound_to(self, key, on_press):
def decide_if_callback(key, bind):
if bind is None:
return True
elif isinstance(bind, (tuple, list)):
return key in bind
elif isinstance(bind, str):
return key == bind
else:
raise TypeError("Invalid bind '%s'" % bind)
for p, bind, callback in self.key_events:
if p == on_press:
if decide_if_callback(key, bind):
callback()
def run(self):
while self.windows:
closed = []
for window in self.windows:
for event in window._impl.events:
if type(event) is _sf.CloseEvent:
window.close()
elif type(event) is _sf.KeyEvent:
if event.code not in _key_mapping:
continue
key = event.code
if event.pressed:
action = _key_mapping[key]
self._key_events_bound_to(action, True)
self.key_pressed(action)
else:
action = _key_mapping[key]
self._key_events_bound_to(action, False)
self.key_released(action)
self.update()
window.clear()
self.render_to(window)
window.display()
if not window.is_opened():
closed.append(window)
for window in closed:
self.windows.remove(window)
def create_window(self, dtype=Window, *args, **kwargs):
win = dtype(*args, **kwargs)
self.windows.append(win)
return win
def render_to(self, window):
pass
class Graphics(object):
def __init__(self, window):
self.window = window
self.graphic_states = []
self.push_state()
def _draw(self, drawable):
assert isinstance(drawable, _sf.Drawable)
self.window._impl.draw(drawable, self.graphic_states[-1])
def translate(self, x, y):
state = self.graphic_states[-1]
state.transform.translate(_sf.Vector2(x, y))
def push_state(self):
state = _sf.RenderStates()
if len(self.graphic_states) > 0:
old_state = self.graphic_states[-1]
state.transform.combine(old_state.transform)
self.graphic_states.append(state)
def pop_state(self):
self.graphic_states.pop()
if len(self.graphic_states) == 0:
self.push_state()
def draw(self, drawable):
drawable.render(self)
class Drawable(object):
def render(self, graphics):
msg = "'render' method should be defined in custom Drawables"
raise NotImplementedError(msg)
class Image(Drawable):
def __init__(self, filename):
super(Drawable, self).__init__()
tx = _sf.Texture.from_file(filename)
self._impl = _sf.Sprite(tx)
def render(self, graphics):
graphics._draw(self._impl)
size = property(lambda x: x._impl.size)
class StatesApp(App):
def __init__(self):
super(StatesApp, self).__init__()
self.states = {}
self.current = None
try:
self.init_states()
except NotImplementedError:
msg = 'Current state should be set in init_states'
raise NotImplementedError(msg)
def init_states(self):
raise NotImplementedError
def add_state(self, state_id, state):
if state_id in self.states:
raise KeyError('State %s already defined' % state_id)
self.states[state_id] = state
def switch_state(self, state_id):
if state_id not in self.states:
raise KeyError('Undefined state %s' % state_id)
s = self.states[state_id]
if self.current is not None:
self.current.leave()
self.current = s
self.current.enter()
self.current.alive = True
def set_state(self, state_id):
if state_id not in self.states:
raise KeyError('Undefined state %s' % state_id)
self.current = self.states[state_id]
self.current.enter()
def update(self):
self.current.update()
if not self.current.alive:
self.switch_state(self.current.next_state)
def render_to(self, window):
self.current.render_to(window)
class BaseState(Listener):
def __init__(self):
super(BaseState, self).__init__()
self.alive = True
self.next_state = None
def switch_state(self, state_id):
self.alive = False
self.next_state = state_id
def enter(self): pass
def leave(self): pass
|
# -*- coding: utf-8 -*-
from collections import Counter
from typing import List
class Solution:
def makeEqual(self, words: List[str]) -> bool:
counts = Counter()
for word in words:
counts.update(word)
return all(count % len(words) == 0 for count in counts.values())
if __name__ == "__main__":
solution = Solution()
assert solution.makeEqual(["abc", "aabc", "bc"])
assert not solution.makeEqual(["ab", "a"])
|
# -*- coding: utf-8 -*-
"""Tests for v1 API viewsets."""
from __future__ import unicode_literals
from datetime import datetime
from json import dumps, loads
from pytz import UTC
import mock
from webplatformcompat.history import Changeset
from webplatformcompat.models import Browser, Feature, Version
from webplatformcompat.v2.viewsets import ViewFeaturesViewSet
from .base import APITestCase, NamespaceMixin
from ..test_viewsets import (
TestCascadeDeleteGeneric, TestUserBaseViewset, TestViewFeatureBaseViewset)
class TestBrowserViewset(APITestCase):
"""Test common viewset functionality through the browsers viewset."""
def test_get_browser_detail(self):
browser = self.create(
Browser,
slug='firefox',
name={'en': 'Firefox'},
note={'en': 'Uses Gecko for its web browser engine'})
url = self.full_api_reverse('browser-detail', pk=browser.pk)
response = self.client.get(url)
history_pk = browser.history.get().pk
expected_content = {
'links': {'self': url},
'data': {
'id': str(browser.pk),
'type': 'browsers',
'attributes': {
'slug': 'firefox',
'name': {'en': 'Firefox'},
'note': {'en': 'Uses Gecko for its web browser engine'},
},
'relationships': {
'versions': {
'data': [],
'links': {
'self': url + '/relationships/versions',
'related': url + '/versions',
},
},
'history_current': {
'data': {
'type': 'historical_browsers',
'id': str(history_pk),
},
'links': {
'self': url + '/relationships/history_current',
'related': url + '/history_current',
},
},
'history': {
'data': [
{'type': 'historical_browsers',
'id': str(history_pk)},
],
'links': {
'self': url + '/relationships/history',
'related': url + '/history',
},
}
},
},
}
actual_content = loads(response.content.decode('utf-8'))
self.assertDataEqual(expected_content, actual_content)
def test_get_browser_list(self):
firefox = self.create(
Browser,
slug='firefox', name={'en': 'Firefox'},
note={'en': 'Uses Gecko for its web browser engine'})
chrome = self.create(Browser, slug='chrome', name={'en': 'Chrome'})
url = self.full_api_reverse('browser-list')
response = self.client.get(url)
firefox_history_id = str(firefox.history.get().pk)
chrome_history_id = str(chrome.history.get().pk)
expected_content = {
'data': [
{
'links': {
'self': '%s/%s' % (url, firefox.pk),
},
'id': str(firefox.pk),
'type': 'browsers',
'attributes': {
'slug': 'firefox',
'name': {'en': 'Firefox'},
'note': {
'en': 'Uses Gecko for its web browser engine',
},
},
'relationships': {
'versions': {
'data': [],
},
'history_current': {
'data': {
'type': 'historical_browsers',
'id': firefox_history_id,
},
},
'history': {
'data': [
{'type': 'historical_browsers',
'id': firefox_history_id},
],
},
},
}, {
'links': {
'self': '%s/%s' % (url, chrome.pk),
},
'id': '%s' % chrome.pk,
'type': 'browsers',
'attributes': {
'slug': 'chrome',
'name': {'en': 'Chrome'},
'note': None,
},
'relationships': {
'versions': {
'data': [],
},
'history_current': {
'data': {
'type': 'historical_browsers',
'id': chrome_history_id,
},
},
'history': {
'data': [
{'type': 'historical_browsers',
'id': chrome_history_id},
],
},
},
},
],
'links': {
'self': url,
'prev': None,
'next': None,
},
'meta': {
'count': 2,
},
}
actual_content = loads(response.content.decode('utf-8'))
self.assertDataEqual(expected_content, actual_content)
def test_get_browsable_api(self):
browser = self.create(Browser)
url = self.api_reverse('browser-list')
response = self.client.get(url, HTTP_ACCEPT='text/html')
history_pk = browser.history.get().pk
expected_data = {
'count': 1,
'previous': None,
'next': None,
'results': [{
'id': browser.pk,
'slug': '',
'name': None,
'note': None,
'history': [history_pk],
'history_current': history_pk,
'versions': [],
}]}
self.assertDataEqual(response.data, expected_data)
self.assertTrue(response['content-type'].startswith('text/html'))
def test_get_related_versions_empty(self):
browser = self.create(Browser)
url = self.full_api_reverse('browser-versions', pk=browser.pk)
response = self.client.get(url)
expected_content = {
'links': {
'self': url,
'next': None,
'prev': None,
},
'data': [],
'meta': {'count': 0}
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_related_versions_populated(self):
browser = self.create(Browser)
version1 = self.create(Version, browser=browser, version='1.0')
url = self.full_api_reverse('browser-versions', pk=browser.pk)
response = self.client.get(url)
vhistory = str(version1.history.all()[0].history_id)
expected_content = {
'links': {
'self': url,
'next': None,
'prev': None,
},
'data': [
{
'links': {
'self': self.full_api_reverse(
'version-detail', pk=version1.pk)
},
'id': str(version1.pk),
'type': 'versions',
'attributes': {
'version': '1.0',
'status': 'unknown',
'release_notes_uri': None,
'release_day': None,
'retirement_day': None,
'note': None,
'order': 0,
},
'relationships': {
'browser': {
'data': {
'type': 'browsers',
'id': str(browser.pk),
},
},
'supports': {
'data': [],
},
'history_current': {
'data': {
'type': 'historical_versions',
'id': vhistory,
},
},
'history': {
'data': [{
'type': 'historical_versions',
'id': vhistory,
}],
},
},
},
],
'meta': {'count': 1},
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_relationship_versions_empty(self):
browser = self.create(Browser)
url = self.full_api_reverse(
'browser-relationships-versions', pk=browser.pk)
response = self.client.get(url)
expected_content = {
'links': {
'self': url,
'related': self.full_api_reverse(
'browser-versions', pk=browser.pk)
},
'data': [],
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_relationship_versions_populated(self):
browser = self.create(Browser)
version1 = self.create(Version, browser=browser, version='1.0')
url = self.full_api_reverse(
'browser-relationships-versions', pk=browser.pk)
response = self.client.get(url)
expected_content = {
'links': {
'self': url,
'related': self.full_api_reverse(
'browser-versions', pk=browser.pk)
},
'data': [
{'type': 'versions', 'id': str(version1.pk)},
],
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_related_history_current(self):
browser = self.create(
Browser, slug='browser', name={'en': 'A Browser'},
_history_date=datetime(2015, 12, 23, 16, 40, 18, 648045, UTC))
browser_url = self.full_api_reverse('browser-detail', pk=browser.pk)
url = self.full_api_reverse('browser-history-current', pk=browser.pk)
history = browser.history.all()[0]
response = self.client.get(url)
self.assertEqual(200, response.status_code, url)
expected_content = {
'links': {
'self': url
},
'data': {
'id': str(history.history_id),
'type': 'historical_browsers',
'attributes': {
'event': 'created',
'date': self.dt_json(history.history_date),
'archive_data': {
'id': str(browser.id),
'type': 'browsers',
'attributes': {
'slug': 'browser',
'name': {'en': 'A Browser'},
'note': None,
},
'relationships': {
'history_current': {
'data': {
'type': 'historical_browsers',
'id': str(history.pk),
},
},
'versions': {'data': []}
},
'links': {'self': browser_url},
},
},
'relationships': {
'browser': {
'data': {'type': 'browsers', 'id': str(browser.pk)},
'links': {
'self': self.full_api_reverse(
'historicalbrowser-relationships-browser',
pk=history.pk),
'related': self.full_api_reverse(
'historicalbrowser-browser', pk=history.pk)
},
},
'changeset': {
'data': {
'type': 'changesets',
'id': str(history.history_changeset_id),
},
'links': {
'self': self.full_api_reverse(
'historicalbrowser-relationships-changeset',
pk=history.pk),
'related': self.full_api_reverse(
'historicalbrowser-changeset', pk=history.pk)
},
},
}
},
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_related_history(self):
browser = self.create(
Browser, slug='browser', name={'en': 'A Browser'})
self.create(Browser, slug='other', name={'en': 'Other Browser'})
url = self.api_reverse('browser-history', pk=browser.pk)
history = browser.history.all()[0]
response = self.client.get(url)
self.assertEqual(200, response.status_code, url)
response_data = loads(response.content.decode('utf8'))
self.assertEqual(response_data['meta']['count'], 1)
self.assertEqual(
response_data['data'][0]['id'], str(history.history_id))
def test_post_minimal(self):
self.login_user()
data = {'slug': 'firefox', 'name': '{"en": "Firefox"}'}
response = self.client.post(self.api_reverse('browser-list'), data)
self.assertEqual(201, response.status_code, response.data)
browser = Browser.objects.get()
history_pk = browser.history.get().pk
expected_data = {
'id': browser.pk,
'slug': 'firefox',
'name': {'en': 'Firefox'},
'note': None,
'history': [history_pk],
'history_current': history_pk,
'versions': [],
}
self.assertDataEqual(response.data, expected_data)
@mock.patch('webplatformcompat.signals.update_cache_for_instance')
def test_put_as_json_api(self, mock_update):
"""If content is application/vnd.api+json, put is partial."""
browser = self.create(
Browser, slug='browser', name={'en': 'Old Name'})
data = dumps({
'data': {
'id': str(browser.pk),
'type': 'browsers',
'attributes': {
'name': {
'en': 'New Name'
}
}
}
})
url = self.api_reverse('browser-detail', pk=browser.pk)
mock_update.reset_mock()
response = self.client.put(
url, data=data, content_type='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.data)
histories = browser.history.all()
expected_data = {
'id': browser.pk,
'slug': 'browser',
'name': {'en': 'New Name'},
'note': None,
'history': [h.pk for h in histories],
'history_current': histories[0].pk,
'versions': [],
}
self.assertDataEqual(response.data, expected_data)
mock_update.assert_has_calls([
mock.call('User', self.user.pk, mock.ANY),
mock.call('Browser', browser.pk, mock.ANY),
])
self.assertEqual(mock_update.call_count, 2)
@mock.patch('webplatformcompat.signals.update_cache_for_instance')
def test_put_in_changeset(self, mock_update):
browser = self.create(
Browser, slug='browser', name={'en': 'Old Name'})
changeset = Changeset.objects.create(user=self.user)
data = dumps({
'browsers': {
'name': {
'en': 'New Name'
}
}
})
url = self.api_reverse('browser-detail', pk=browser.pk)
url += '?use_changeset=%s' % changeset.pk
mock_update.reset_mock()
mock_update.side_effect = Exception('not called')
response = self.client.put(
url, data=data, content_type='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.data)
def test_put_as_json(self):
"""If content is application/json, put is full put."""
browser = self.create(
Browser, slug='browser', name={'en': 'Old Name'})
data = {'name': '{"en": "New Name"}'}
url = self.api_reverse('browser-detail', pk=browser.pk)
response = self.client.put(url, data=data)
self.assertEqual(200, response.status_code, response.data)
histories = browser.history.all()
expected_data = {
'id': browser.pk,
'slug': 'browser',
'name': {'en': 'New Name'},
'note': None,
'history': [h.pk for h in histories],
'history_current': histories[0].pk,
'versions': [],
}
self.assertDataEqual(response.data, expected_data)
@mock.patch('webplatformcompat.signals.update_cache_for_instance')
def test_delete(self, mock_update):
self.login_user(groups=['change-resource', 'delete-resource'])
browser = self.create(Browser, slug='firesux', name={'en': 'Firesux'})
url = self.api_reverse('browser-detail', pk=browser.pk)
mock_update.reset_mock()
response = self.client.delete(url)
self.assertEqual(204, response.status_code, response.content)
self.assertFalse(Browser.objects.filter(pk=browser.pk).exists())
mock_update.assert_has_calls([
mock.call('User', self.user.pk, mock.ANY),
mock.call('Browser', browser.pk, mock.ANY),
])
self.assertEqual(mock_update.call_count, 2)
def test_delete_not_allowed(self):
self.login_user()
browser = self.create(
Browser, slug='browser', name={'en': 'Old Name'})
url = self.api_reverse('browser-detail', pk=browser.pk)
response = self.client.delete(url)
self.assertEqual(403, response.status_code)
expected_data = {
'detail': 'You do not have permission to perform this action.'
}
self.assertDataEqual(response.data, expected_data)
@mock.patch('webplatformcompat.signals.update_cache_for_instance')
def test_delete_in_changeset(self, mock_update):
self.login_user(groups=['change-resource', 'delete-resource'])
browser = self.create(
Browser, slug='internet_exploder',
name={'en': 'Internet Exploder'})
url = self.api_reverse('browser-detail', pk=browser.pk)
url += '?use_changeset=%d' % self.changeset.id
mock_update.reset_mock()
mock_update.side_effect = Exception('not called')
response = self.client.delete(url)
self.assertEqual(204, response.status_code, response.content)
self.assertFalse(Browser.objects.filter(pk=browser.pk).exists())
def test_options(self):
self.login_user()
browser = self.create(Browser)
url = self.api_reverse('browser-detail', pk=browser.pk)
response = self.client.options(url)
self.assertEqual(200, response.status_code, response.content)
expected_keys = {'actions', 'description', 'name', 'parses', 'renders'}
self.assertEqual(set(response.data.keys()), expected_keys)
def test_query_reserved_namespace_is_error(self):
"""Test that an unknown, lowercase query parameter is an error."""
url = self.api_reverse('browser-list')
response = self.client.get(url, {'foo': 'bar'})
self.assertEqual(400, response.status_code, response.content)
expected = {
'errors': [{
'status': '400',
'detail': 'Query parameter "foo" is invalid.',
'source': {'parameter': 'foo'}
}]
}
self.assertEqual(expected, loads(response.content.decode('utf8')))
def test_unreserved_query_is_ignored(self):
"""Test that unknown but unreserved query strings are ignored."""
url = self.api_reverse('browser-list')
params = {
'camelCase': 'ignored',
'hyphen-split': 'ignored',
'low_line': 'ignored',
}
response = self.client.get(url, params)
self.assertEqual(200, response.status_code, response.content)
self.assertEqual(0, response.data['count'])
def test_page_params_is_ok(self):
"""
Test that pagination params are OK.
bug 1243128 will change these to page[number] and page[size].
"""
for number in range(5):
self.create(Browser, slug='slug%d' % number)
url = self.api_reverse('browser-list')
pagination = {'page': 2, 'page_size': 2}
response = self.client.get(url, pagination)
self.assertEqual(200, response.status_code, response.content)
self.assertEqual(5, response.data['count'])
def assert_param_not_implemented(self, key, value):
"""Assert that a valid but optional parameter is not implemented."""
url = self.api_reverse('browser-list')
response = self.client.get(url, {key: value})
self.assertEqual(400, response.status_code, response.content)
expected = {
'errors': [{
'status': '400',
'detail': 'Query parameter "%s" is not implemented.' % key,
'source': {'parameter': key}
}]
}
self.assertEqual(expected, loads(response.content.decode('utf8')))
def test_param_include_not_implemented(self):
"""
Confirm parameter include is unimplemented.
TODO: bug 1243190, use param 'include' for included resources.
"""
self.assert_param_not_implemented('include', 'versions')
def test_param_fields_unimplemented(self):
"""
Confirm JSON API v1.0 parameter 'fields' is unimplemented.
TODO: bug 1252973, use param 'fields' for sparse fieldsets.
"""
self.assert_param_not_implemented('fields', 'name')
self.assert_param_not_implemented('fields[browsers]', 'slug,name')
def test_param_sort_unimplemented(self):
"""
Confirm JSON API v1.0 parameter 'sort' is unimplemented.
TODO: bug 1252973, use param 'fields' for sparse fieldsets.
"""
self.assert_param_not_implemented('sort', 'name')
class TestFeatureViewSet(APITestCase):
"""Test FeatureViewSet."""
def test_filter_by_slug(self):
parent = self.create(Feature, slug='parent', name={'en': 'Parent'})
feature = self.create(
Feature, slug='feature', parent=parent, name={'en': 'A Feature'})
self.create(Feature, slug='other', name={'en': 'Other'})
response = self.client.get(
self.api_reverse('feature-list'), {'filter[slug]': 'feature'})
self.assertEqual(200, response.status_code, response.data)
self.assertEqual(1, response.data['count'])
self.assertEqual(feature.id, response.data['results'][0]['id'])
def test_filter_by_parent(self):
parent = self.create(Feature, slug='parent', name={'en': 'Parent'})
feature = self.create(
Feature, slug='feature', parent=parent, name={'en': 'A Feature'})
self.create(Feature, slug='other', name={'en': 'Other'})
response = self.client.get(
self.api_reverse('feature-list'),
{'filter[parent]': str(parent.id)})
self.assertEqual(200, response.status_code, response.data)
self.assertEqual(1, response.data['count'])
self.assertEqual(feature.id, response.data['results'][0]['id'])
def test_filter_by_no_parent(self):
parent = self.create(Feature, slug='parent', name={'en': 'Parent'})
self.create(
Feature, slug='feature', parent=parent, name={'en': 'The Feature'})
other = self.create(Feature, slug='other', name={'en': 'Other'})
response = self.client.get(
self.api_reverse('feature-list'), {'filter[parent]': ''})
self.assertEqual(200, response.status_code, response.data)
self.assertEqual(2, response.data['count'])
self.assertEqual(parent.id, response.data['results'][0]['id'])
self.assertEqual(other.id, response.data['results'][1]['id'])
def test_filter_by_unknown_param(self):
"""Test that filtering by an unknown parameter is an error."""
response = self.client.get(
self.api_reverse('feature-list'), {'filter[unknown]': 'value'})
self.assertEqual(400, response.status_code, response.data)
expected_content = {
'errors': [{
'detail': 'Unknown filter "unknown" requested.',
'status': '400'
}]
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_related_parent_null(self):
feature = self.create(Feature)
url = self.full_api_reverse('feature-parent', pk=feature.pk)
response = self.client.get(url)
expected_content = {
'links': {'self': url},
'data': None,
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_related_parent_set(self):
parent = self.create(Feature, slug='parent')
feature = self.create(Feature, slug='feature', parent=parent)
url = self.full_api_reverse('feature-parent', pk=feature.pk)
response = self.client.get(url)
phistory = str(parent.history.all()[0].history_id)
expected_content = {
'links': {
'self': url
},
'data': {
'id': str(parent.pk),
'type': 'features',
'attributes': {
'slug': 'parent',
'name': None,
'experimental': False,
'mdn_uri': None,
'obsolete': False,
'stable': True,
'standardized': True,
},
'relationships': {
'children': {
'data': [{
'type': 'features',
'id': str(feature.pk),
}],
'links': {
'self': self.full_api_reverse(
'feature-relationships-children',
pk=parent.pk),
'related': self.full_api_reverse(
'feature-children', pk=parent.pk),
},
},
'parent': {
'data': None,
'links': {
'self': self.full_api_reverse(
'feature-relationships-parent',
pk=parent.pk),
'related': self.full_api_reverse(
'feature-parent', pk=parent.pk),
},
},
'supports': {
'data': [],
'links': {
'self': self.full_api_reverse(
'feature-relationships-supports',
pk=parent.pk),
'related': self.full_api_reverse(
'feature-supports', pk=parent.pk),
},
},
'references': {
'data': [],
'links': {
'self': self.full_api_reverse(
'feature-relationships-references',
pk=parent.pk),
'related': self.full_api_reverse(
'feature-references', pk=parent.pk),
},
},
'history_current': {
'data': {
'type': 'historical_features',
'id': phistory,
},
'links': {
'self': self.full_api_reverse(
'feature-relationships-history-current',
pk=parent.pk),
'related': self.full_api_reverse(
'feature-history-current', pk=parent.pk),
},
},
'history': {
'data': [{
'type': 'historical_features',
'id': phistory,
}],
'links': {
'self': self.full_api_reverse(
'feature-relationships-history',
pk=parent.pk),
'related': self.full_api_reverse(
'feature-history', pk=parent.pk),
},
},
},
},
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_relationship_parent_null(self):
feature = self.create(Feature)
url = self.full_api_reverse(
'feature-relationships-parent', pk=feature.pk)
response = self.client.get(url)
expected_content = {
'links': {
'self': url,
'related': self.full_api_reverse(
'feature-parent', pk=feature.pk)
},
'data': None,
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_get_relationship_parent_set(self):
parent = self.create(Feature, slug='parent')
feature = self.create(Feature, slug='feature', parent=parent)
url = self.full_api_reverse(
'feature-relationships-parent', pk=feature.pk)
response = self.client.get(url)
expected_content = {
'links': {
'self': url,
'related': self.full_api_reverse(
'feature-parent', pk=feature.pk)
},
'data': {
'type': 'features', 'id': str(parent.pk),
},
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_set_parent_to_null(self):
parent = self.create(Feature, slug='parent')
feature = self.create(Feature, slug='feature', parent=parent)
url = self.full_api_reverse('feature-detail', pk=feature.pk)
data = dumps({
'data': {
'id': str(feature.pk),
'type': 'features',
'relationships': {
'parent': {
'data': None
}
}
}
})
response = self.client.patch(
url, data=data, content_type='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.content)
actual_data = loads(response.content.decode('utf-8'))
self.assertIsNone(
actual_data['data']['relationships']['parent']['data'])
def test_set_relationship_parent_to_null(self):
parent = self.create(Feature, slug='parent')
feature = self.create(Feature, slug='feature', parent=parent)
url = self.full_api_reverse(
'feature-relationships-parent', pk=feature.pk)
data = dumps({'data': None})
response = self.client.patch(
url, data=data, content_type='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.content)
expected_content = {
'links': {
'self': url,
'related': self.full_api_reverse(
'feature-parent', pk=feature.pk)
},
'data': None,
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_set_relationship_parent(self):
parent = self.create(Feature, slug='parent')
feature = self.create(Feature, slug='feature')
url = self.full_api_reverse(
'feature-relationships-parent', pk=feature.pk)
data = dumps({
'data': {
'type': 'features', 'id': str(parent.pk),
}
})
response = self.client.patch(
url, data=data, content_type='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.content)
expected_content = {
'links': {
'self': url,
'related': self.full_api_reverse(
'feature-parent', pk=feature.pk)
},
'data': {
'type': 'features', 'id': str(parent.pk),
},
}
actual_content = response.content.decode('utf-8')
self.assertJSONEqual(actual_content, expected_content)
def test_set_children(self):
feature = self.create(Feature, slug='feature')
child1 = self.create(Feature, slug='child1', parent=feature)
child2 = self.create(Feature, slug='child2', parent=feature)
url = self.full_api_reverse('feature-detail', pk=feature.pk)
new_children = [
{'type': 'features', 'id': str(child2.pk)},
{'type': 'features', 'id': str(child1.pk)},
]
data = dumps({
'data': {
'id': str(feature.pk),
'type': 'features',
'relationships': {
'children': {
'data': new_children,
}
}
}
})
response = self.client.patch(
url, data=data, content_type='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.content)
actual_data = loads(response.content.decode('utf-8'))
self.assertEqual(
actual_data['data']['relationships']['children']['data'],
new_children)
def test_set_relationship_children(self):
feature = self.create(Feature, slug='feature')
child1 = self.create(Feature, slug='child1', parent=feature)
child2 = self.create(Feature, slug='child2', parent=feature)
url = self.full_api_reverse(
'feature-relationships-children', pk=feature.pk)
new_children = [
{'type': 'features', 'id': str(child2.pk)},
{'type': 'features', 'id': str(child1.pk)},
]
data = dumps({'data': new_children})
response = self.client.patch(
url, data=data, content_type='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.content)
expected = {
'data': new_children,
'links': {
'self': url,
'related': self.full_api_reverse(
'feature-children', pk=feature.pk)
}
}
self.assertJSONEqual(response.content.decode('utf8'), expected)
class TestHistoricaBrowserViewset(APITestCase):
"""Test common historical viewset functionality through browsers."""
def setUp(self):
self.browser = self.create(
Browser, slug='browser', name={'en': 'A Browser'},
_history_date=datetime(2014, 8, 25, 20, 50, 38, 868903, UTC))
self.history = self.browser.history.all()[0]
def test_get_historical_browser_detail(self):
url = self.full_api_reverse(
'historicalbrowser-detail', pk=self.history.pk)
response = self.client.get(url, HTTP_ACCEPT='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.data)
expected_json = {
'links': {'self': url},
'data': {
'id': str(self.history.pk),
'type': 'historical_browsers',
'attributes': {
'date': self.dt_json(self.browser._history_date),
'event': 'created',
'archive_data': {
'id': str(self.browser.pk),
'type': 'browsers',
'attributes': {
'slug': 'browser',
'name': {'en': 'A Browser'},
'note': None,
},
'relationships': {
'history_current': {
'data': {
'type': 'historical_browsers',
'id': str(self.history.pk),
},
},
'versions': {'data': []},
},
'links': {
'self': self.full_api_reverse(
'browser-detail', pk=self.browser.pk)
}
},
},
'relationships': {
'browser': {
'data': {
'type': 'browsers', 'id': str(self.browser.pk)},
'links': {
'self': url + '/relationships/browser',
'related': url + '/browser',
},
},
'changeset': {
'data': {
'type': 'changesets',
'id': str(self.history.history_changeset_id),
},
'links': {
'self': url + '/relationships/changeset',
'related': url + '/changeset',
},
},
},
},
}
actual_json = loads(response.content.decode('utf-8'))
self.assertDataEqual(expected_json, actual_json)
def test_related_browser(self):
url = self.full_api_reverse(
'historicalbrowser-browser', pk=self.history.pk)
response = self.client.get(url, HTTP_ACCEPT='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.data)
def test_relationships_browser(self):
url = self.full_api_reverse(
'historicalbrowser-relationships-browser', pk=self.history.pk)
response = self.client.get(url, HTTP_ACCEPT='application/vnd.api+json')
self.assertEqual(200, response.status_code, response.data)
class TestCascadeDelete(NamespaceMixin, TestCascadeDeleteGeneric):
"""Test cascading deletes."""
class TestUserViewset(NamespaceMixin, TestUserBaseViewset):
"""Test users/me UserViewSet."""
class TestViewFeatureViewset(NamespaceMixin, TestViewFeatureBaseViewset):
"""Test helper functions on ViewFeaturesViewSet."""
def setUp(self):
super(TestViewFeatureViewset, self).setUp()
self.view = ViewFeaturesViewSet()
|
#! /usr/bin/python3
from nes_py.wrappers import JoypadSpace
import gym_super_mario_bros
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT
env = gym_super_mario_bros.make('SuperMarioBros-1-1-v0')
env = JoypadSpace(env, SIMPLE_MOVEMENT)
done = True
t = 0
def print_info(info, reward):
global t
t += 1
if not t % 100:
print(info, reward)
for step in range(5000):
if done:
state = env.reset()
action = env.action_space.sample()
state, reward, done, info = env.step(env.action_space.sample())
print_info(info, reward)
env.render()
env.close()
|
import json, os, mysql.connector
from dotenv import load_dotenv
from mysql.connector import pooling, Error
load_dotenv()
connection_pool = pooling.MySQLConnectionPool(
pool_name = attractionInfo,
pool_size = 5,
host=os.getenv("DBhost"),
user=os.getenv("DBuser"),
password=os.getenv("DBpw"),
database=os.getenv("DB")
)
mydb = connection_pool.get_connection()
mycursor = mydb.cursor(buffered=True)
filename = "taipei-attractions.json"
with open (filename) as file:
data = json.load(file)
landlist = data["result"]["results"]
# print(data["result"]["results"][0]["stitle"])
# print(data["result"]["results"][0])
for x in landlist:
getInfo = x["info"]
getStitle = x["stitle"]
getMRT = x["MRT"]
getCAT2 = x["CAT2"]
getFile = x["file"].split("http")
resFile = []
for fileUrl in getFile:
suff_string = str(fileUrl)
suff_list = ("jpg", "JPG", "PNG", "png")
result = suff_string.endswith(suff_list)
if result == True:
resFile.append("http"+suff_string)
else:
continue
# resFile = str(resFile)
separator = ","
resFile=separator.join(resFile)
# print(separator.join(resFile))
getXbody = x["xbody"]
getAddress = x["address"]
getLongitude = x["longitude"]
getLatitude = x["latitude"]
mycursor.execute("INSERT INTO information (name, category, description, address, transport, mrt, latitude, longitude, images) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",(getStitle, getCAT2, getXbody, getAddress, getInfo, getMRT, getLatitude, getLongitude, resFile))
mydb.commit()
mydb.close()
|
#!/usr/bin/python
import logging
import os
import glob
import xml.etree.cElementTree as ElementTree
logger = logging.getLogger("Notification")
QUEUE_ITEM_SEPERATOR = "###"
class QueueItem:
def __init__(self):
self.attempts = 0
self.filecreated = 0
self.case_id = ""
self.customer_id = 0
self.customer_name = ""
self.stage = 0
self.ticket_number = ""
def print_data(self):
logger.info("Queue Item: Customer: %s (%s), Attempts: %s, Ticket Number: %s" %
(self.customer_name, self.customer_id,
str(self.attempts), self.ticket_number))
class Queue:
def __init__(self, case_id, dir_path = None):
logger.info("Creating %s instance for incident id: %s." % (self.__class__.__name__, case_id))
self.data = QueueItem()
self.case_id = case_id
if not dir_path:
self.dir_path = os.path.join("data","queue")
else:
self.dir_path = dir_path
if not os.path.exists(self.dir_path):
os.makedirs(self.dir_path)
self.file_path = os.path.join(self.dir_path, str(case_id) + ".q")
self.load_data()
self.data.print_data()
# def get_queue_data(self, file_path):
# attempts = 0
# customer_id = 0
# customer_name = ""
# try:
# if os.path.exists(file_path):
# with open(file_path, "r") as casefile:
# line = casefile.readline()
# elements = line.split(QUEUE_ITEM_SEPERATOR)
# if(elements and len(elements) >= 3):
# if unicode(elements[0]).isnumeric():
# attempts = int(elements[0])
# else:
# logger.error("Garbage data (%s) found in queue file %s"
# % (line, file_path))
# if unicode(elements[1]).isnumeric():
# customer_id = int(elements[1])
# else:
# logger.error("Garbage data (%s) found in queue file %s"
# % (line, file_path))
# customer_name = elements[2]
#
# except Exception, e:
# logger.exception("Failed to get exception information for file %s" % file_path, e)
#
# return (attempts, customer_id, customer_name)
#
# def get_attempt(self, file_path):
# attempts = 0
# try:
# if os.path.exists(file_path):
# with open(file_path, "r") as casefile:
# line = casefile.readline()
# elements = line.split(QUEUE_ITEM_SEPERATOR)
# if(elements and len(elements) > 0):
# if unicode(elements[0]).isnumeric():
# attempts = int(elements[0])
# else:
# logger.error("Garbage data (%s) found in queue file %s"
# % (line, file_path))
# except Exception, e:
# logger.exception("Failed to get attempt details for file %s" % file_path, e)
#
# return attempts
def parse_node(self, element, item_text, default_value = ""):
item_node = element.findall(item_text)
if item_node and len(item_node) > 0:
return item_node[0].text
return str(default_value)
def load_data(self):
if not os.path.exists(self.file_path):
return
try:
tree = ElementTree.parse(self.file_path)
if not tree:
return
root_node = ElementTree.parse(self.file_path).getroot()
if not root_node:
return
self.data.attempts = int( self.parse_node(root_node, "attempt", 0) )
self.data.customer_id = self.parse_node(root_node, "cid")
self.data.customer_name = self.parse_node(root_node, "cname")
self.data.stage = int(self.parse_node(root_node, "stage", 0))
self.data.ticket_number = self.parse_node(root_node, "ticket")
self.data.filecreated = os.path.getctime(self.file_path)
self.data.case_id = self.case_id
except Exception, ex:
logger.exception(ex)
def write_data(self):
root = ElementTree.Element("queue")
ElementTree.SubElement(root, "attempt").text = str(self.data.attempts)
ElementTree.SubElement(root, "cid").text = str(self.data.customer_id)
ElementTree.SubElement(root, "cname").text = str(self.data.customer_name)
ElementTree.SubElement(root, "stage").text = str(self.data.stage)
ElementTree.SubElement(root, "ticket").text = str(self.data.ticket_number)
tree = ElementTree.ElementTree(root)
tree.write(self.file_path)
def add(self, customer_id, customer_name, ticket_number, stage):
logger.error("Adding Case (%d) for customer %s (%s) in queue at %s stage"
% (self.case_id, customer_name, customer_id, stage))
self.data.attempts = self.data.attempts + 1
self.data.customer_id = customer_id
self.data.customer_name = customer_name
self.data.stage = stage
self.data.ticket_number = ticket_number
self.write_data()
def remove(self, case_id):
logger.info("Removing case %s from queue." %
(self.case_id))
if os.path.exists(self.file_path):
os.remove(self.file_path)
|
'''
For all symbols used in Python, refer to https://shop.learncodethehardway.org/paid/python3/ex37.html
'''
|
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras import losses
import keras
model = Sequential([
Dense(32, input_shape=(84, )),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
sgd = keras.optimizers.SGD(learning_rate=0.01, momentum=0.0, nesterov=False)
model.compile(loss=losses.mean_absolute_error, optimizer=sgd, metrics=['accuracy'])
env = gym.make('StarGunner-v0')
env = gym.wrappers.AtariPreprocessing(env)
env.reset()
space = env.action_space
obs = []
done = False
while not done:
env.render()
obv, reward, done, info = env.step(env.action_space.sample()) # take a random action
obs.append((obv, reward))
env.close()
|
import re
s = "The Rain in Spain"
search_patern = r"The.*Spain$"
x = re.search(search_patern, s)
print(x)
|
from common.run_method import RunMethod
import allure
@allure.step("用户/钉钉/h5免密登录")
def dingAuth_h5_periodicalAuth_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "用户/钉钉/h5免密登录"
url = f"/service-user/dingAuth/h5/periodicalAuth"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("教师基本功大赛/钉钉登录")
def dingAuth_h5_basicSkillAuth_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "教师基本功大赛/钉钉登录"
url = f"/service-user/dingAuth/h5/basicSkillAuth"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.