text stringlengths 8 6.05M |
|---|
from flask import Flask
from flask import request
app = Flask(__name__)
@app.errorhandler(404)
def not_found(error):
return '404 Not Found'
@app.route('/')
def index():
return 'Index Page'
@app.route('/hello')
def hello():
return 'Hello Page'
# @app.route('/user/<username>')
# def show_user_profile(username):
# return 'User %s' % username
@app.route('/user/<int:user_id>', methods=['GET', 'POST'])
def user(user_id):
if request.method == 'POST':
return 'create ' \
'user %s' % user_id
if request.method == 'GET':
return 'show user %s' % user_id
if __name__ == '__main__':
app.debug = True
app.run()
|
#-*-coding:utf-8-*-
#__author__='maxiaohui'
from test_device import terminal
from config import config
t=terminal.frDevice(config.deviceId)
t.updateDailyBuild()
if __name__ == "__main__": #当前脚本运行实例
pass |
from models import inference
class InferenceApp:
def __init__(self, graph_fp):
self.graph_fp = graph_fp
self.predictor = None
self.prediction = None
def _init_graph(self):
self.predictor = inference.Net(graph_fp=self.graph_fp)
def predict(self, img):
self.predictor.predict(img=img)
self.prediction = self.predictor.get_prediction()
|
import os
import sys
os.chdir(os.path.dirname(__file__))
command = '/home/ubuntu/virtual_environments/sku/bin/gunicorn'
pythonpath = '/home/ubuntu/frappe/extracter/django_wrapper'
bind = '127.0.0.1:8060'
workers = 2
user = 'nobody'
|
# ====== imports block ================================== #
from random import *
# ====== defining functions ============================= #
def get_word_rus(): # Возвращает случайное слово из функции набора слов для игры
return word_rus[randint(0, len(word_rus) - 1)]
def correct_answer(answer): # Проверят корректность ответа на альтернативный вопрос о продолжении игры
if answer.lower() == 'y' or answer.lower() == 'да' or answer.lower() == 'n' or answer.lower() == 'нет':
return True
return False
#графическое изображение висилицы
def display_hangman(tries):
stages = [ # финальное состояние: голова, торс, обе руки, обе ноги
'''
--------
| |
| O
| \\|/
| |
| / \\
-
''',
# голова, торс, обе руки, одна нога
'''
--------
| |
| O
| \\|/
| |
| /
-
''',
# голова, торс, обе руки
'''
--------
| |
| O
| \\|/
| |
|
-
''',
# голова, торс и одна рука
'''
--------
| |
| O
| \\|
| |
|
-
''',
# голова и торс
'''
--------
| |
| O
| |
| |
|
-
''',
# голова
'''
--------
| |
| O
|
|
|
-
''',
# начальное состояние
'''
--------
| |
|
|
|
|
-
'''
]
return stages[tries]
def play(word): #Основная функция для запуска игры
word_completion = list(len(word) * '_') # список из угаданных букв(изначально из прочерков)
guessed = False # сигнальная метка
guessed_letters = [] # список уже названных букв
guessed_words = [] # список уже названных слов
tries = 6 # количество попыток
print('Давайте играть в угадайку слов!')
while True:
print(display_hangman(tries))
if tries == 0: #
break
print(f'Допустимых промахов: {tries}')
print(*word_completion, sep = '') #Распаковка объектов в списке из угаданных букв(сепаратор без пробела)
print('Введите букву или слово')
word_player = input().lower()
while True:
if word_player.lower() in guessed_letters or word_player.lower() in guessed_words : #Вводились ли раньше
print('Буква или слово уже вводились')
print('Введите букву или слово')
word_player = input().lower()
continue
flag_letters = True
for i in word_player: #Итерация всех введёных символов для проверки наличия в алфавите
if i not in alpha_rus:
flag_letters = False #Если хоть 1 буква не подходит, то попытка считается некорректной
if flag_letters:
break
print('Неверный формат')
print('Введите букву или слово')
word_player = input().lower()
if len(word_player) >= 2: # Если больше 1 буквы, то инпут отправляется в список уже введёных слов
guessed_letters.append(word_player)
elif len(word_player) == 1: # Если меньше 1 буквы, то инпут отправляется в список уже введёных букв
guessed_words.append(word_player)
if word_player in word: # Если указанная буква или слово есть в загаданном слове
for i in range(len(word) - len(word_player) + 1): #Замена буквы\части слово в списке из прочерков на угаданное значение
if word[i:i + len(word_player)] == word_player:
word_completion[i:i + len(word_player)] = word_player
print(*word_completion, sep = '')
word_completion_string = ''.join(word_completion) # Преобразование списка угаданных букв в строку
if word_completion_string == word: # Сравнение строки указанных букв с загаданным словом
guessed = True
break
continue
else:
tries -= 1 # Уменьшение доступных попыток на одну
if guessed == True:
print('Вы выиграли!')
else:
print('Вы проиграли')
print()
print('Продолжить выполнение программы? y/ДА n/НЕТ')
answer_exit = input()
while True:
if correct_answer(answer_exit) == True:
break
print('Введите корректный ответ')
print('Продолжить выполнение программы? y/ДА n/НЕТ')
answer_exit = input()
if answer_exit.lower() == 'y' or answer_exit.lower() == 'да':
play(get_word_rus())
# ====== main code ====================================== #
word_rus = ['список_слов'] #Набор слов для игры
z = ord('а')
alpha_rus = ''.join([chr(i) for i in range(z,z+32)]) #Формирование букв из русского алфавита(прописного формата)
play(get_word_rus()) #Запуск игры
# ====== end of code ==================================== #
|
## Copyright 2013 Sean McKenna
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
# takes in many images as input, converting them based on CSV data
# defines the CSV data file
csvFile = "input.csv"
# necessary imports to convert from *.jpg to *.png
import os, sys
import Image
import ImageOps
import csv
# number of iterations
iter = 0
# run for every file specified as input
for infile in sys.argv[1:]:
# process the input file for the name/data
input = os.path.splitext(infile)[0]
inputVars = input.split('_')
img = Image.open(infile).convert("RGBA")
#print "Opening file: " + infile
# use the input filename to grab the epoch data
expStart = inputVars[1]
expStart = list(expStart)
expStart.insert(5, '.')
expStart = ''.join(expStart)
#print "Unique Identifier Found: " + expStart
# get the arcsecond scale info
finderArcs = float(inputVars[2])
# measured 60 arcseconds in the Finder to span 1048 pixels (0.057 arcseconds/pixel)
# I will assume that this Finder is similar output to the other sizes
# seems to be a good assumption in comparing Jupiter charts
finderRatio = finderArcs / 1048
# get the CSV data file
reader = csv.reader(open(csvFile))
# process CSV file
for row in reader:
try:
# round the epoch to three decimal places to match
roundExpStart = "%.3f" % float(row[1])
# get the full data from the original filename
if expStart in roundExpStart or expStart in row[1]:
# generate the output filename, uniquely identified by rootname/dataset
outfile = row[0] + ".png"
#print "Now generating output file: " + outfile
# amount of degrees to rotate image
orientat = row[33]
# ratio of archive image (arcseconds / pixel)
idcScale = float(row[34])
# might throw an error on first line, processing the column name, count number of files
except ValueError: iter += 1
# crop output file to zoom in on object
box = (155, 170, 1195, 1210)
img = img.crop(box)
# further zoom on object to match scaling of archive images
finderScale = idcScale / finderRatio
w = img.size[0] * finderScale
h = img.size[1] * finderScale
scale = (int(w), int(h))
img.resize(scale)
# rotate the image according to the data, keep transparent background
rot = img.rotate(-float(orientat), expand=1)
img = Image.new("RGBA", rot.size, (255,255,255,0))
img.paste(rot, (0,0), rot)
# invert the full value scale and enable it as the alpha band
alpha = img.convert("L")
alpha = ImageOps.invert(alpha)
img.putalpha(alpha)
# turn the image to a yummy turquoise color
img.paste((0,133,134), (0,0), img)
# save output file if non-existent
if infile != outfile:
try:
img.save(outfile)
except IOError:
print "Cannot convert: " + infile
iter += -1
# output success
print "You have successfully converted " + str(iter) + " files."
|
from django.contrib import admin
from BreastCancerApp.models import Quotes
from BreastCancerApp.models import Stories
# Register your models here.
admin.site.register(Quotes)
admin.site.register(Stories)
|
from zutils.task.task_redis import TaskRedis
from zutils.logger import Logger
from zutils.utils import relative_project_path
import traceback
import time
import os
import importlib
from zutils.task.server.task_multi_template import TaskMultiTemplate
import queue
import threading
class TaskMultiThread:
def __init__(self, max_size, thread_pool_num, redis_host, redis_port, redis_timeout, log_level, is_debug):
self.task_dict = dict()
self.task_queue = queue.Queue(max_size)
self.redis_host = redis_host
self.redis_port = redis_port
self.redis_timeout = redis_timeout
self.log_level = log_level
self.is_debug = is_debug
self.thread_pool_num = thread_pool_num
self.load_task_class()
self.kill_last_task()
def kill_last_task(self):
os.makedirs(relative_project_path('logs', 'pid'), exist_ok=True)
pid_filepath = relative_project_path(relative_project_path('logs', 'pid', 'multi_task'))
if os.path.isfile(pid_filepath):
with open(pid_filepath) as f:
pid = f.readline()
if len(os.popen('ps -ef | grep %s | grep -v grep' % pid).readlines()) > 0:
os.system('kill -9 %s' % pid)
with open(pid_filepath, 'w') as f:
f.write(str(os.getpid()))
def load_task_class(self):
for parent, dirnames, filenames in os.walk(relative_project_path('src')):
for filename in filenames:
if filename.endswith('.py'):
with open(os.path.join(parent, filename)) as f:
code = f.read()
if ('##multi_task' in code) and ('##hahaha' not in code):
package_name = parent[len(relative_project_path('src/')):].replace('/', '.')
module_name = package_name + '.' + filename[:filename.find('.')]
# print(module_name)
try:
# print('name:', module_name)
module = importlib.import_module(module_name)
class_list = dir(module)
for class_name in class_list:
try:
taskclass = getattr(module, class_name)
if issubclass(taskclass, TaskMultiTemplate) and taskclass != TaskMultiTemplate:
# print(class_name)
print('add multi task:',module_name + '.' + class_name)
run_instance = taskclass()
self.task_dict[run_instance.task_name()] = {
'run_instance': run_instance,
'logger': Logger(self.log_level, run_instance.task_name(), self.is_debug),
'recv_task_redis': TaskRedis(run_instance.task_name(), self.redis_host, self.redis_port, self.redis_timeout)
}
# print(class_name)
except:
pass
except:
pass
def recv_task_thread(self, task_name):
self.task_redis = self.task_dict[task_name]['recv_task_redis']
self.logger = self.task_dict[task_name]['logger']
while True:
try:
task = self.task_redis.get_json_task()
if task is None:
self.logger().info(task_name + ' is free')
continue
self.logger().info('RECV:' + task['taskId'])
while True:
try:
self.task_queue.put((task_name, task), True, 5)
break
except Exception as e:
self.logger().error(str(e))
# print(self.task_queue.qsize())
except Exception as e:
self.logger().error('%s' % traceback.format_exc())
def work_task_thread(self):
task_redis = TaskRedis('taskname', self.redis_host, self.redis_port, self.redis_timeout)
while True:
task_id = ''
task_name = ''
try:
try:
task_name, task = self.task_queue.get(True, 5)
except:
pass
# self.logger().info('work thread is free')
else:
task_id = task['taskId']
run_instance = self.task_dict[task_name]['run_instance']
result = run_instance.run(task_redis, task)
task_redis.set_task_name(task_name)
task_redis.set_json_task_succ_result(result, task_id)
self.logger().info('SEND:' + task_id)
except Exception as e:
self.logger().error('%s' % traceback.format_exc())
if task_id:
task_redis.set_task_name(task_name)
task_redis.set_json_task_error_result(str(e), task_id)
def start_recv_task_thread(self):
recv_thread_list = list()
for task_name in self.task_dict:
t = threading.Thread(target=self.recv_task_thread,args=(task_name,))
recv_thread_list.append(t)
t.start()
def start_work_task_thread(self):
work_thread_list = list()
for i in range(self.thread_pool_num):
t = threading.Thread(target=self.work_task_thread)
work_thread_list.append(t)
t.start()
def start_all(self):
self.start_recv_task_thread()
self.start_work_task_thread()
while True:
time.sleep(5)
if __name__ == '__main__':
a = TaskMultiThread(10,10, '127.0.0.1', 6379, 8, 'INFO', True)
a.start_all()
#
#
# for file in file_list:
# if file.endswith('.py') and ('fall' in file):
# print(file)
# module_name = package_name + '.' + file[:file.find('.')]
# print(module_name)
# try:
# module = importlib.import_module(module_name)
# class_list = dir(module)
# for class_name in class_list:
# try:
# if issubclass(getattr(module, class_name), TaskTemplate) and getattr(module, class_name) != TaskTemplate:
# print(class_name)
# except:
# pass
# except:
# pass
# module = importlib.import_module('task_client.INPUT_CONVERT_%s' % self.task_name)
# print(x)
|
import cv2
import os
import time
import numpy
myPath="images/collect"
cameraNo =0
cameraBrightness= 190
moduleVal=10
minBlur = 500
grayImage = False
saveData= True
showImage=True
imgWidth = 180
imgHeight = 120
########################
global countFolder
cap=cv2.VideoCapture(cameraNo)
cap.set(3,640)
cap.set(4,480)
cap.set(10,cameraBrightness)
count = 0
countSave=0
def saveDataFunc():
global countFolder
countFolder=0
while os.path.exists (myPath + str(countFolder)):
countFolder=countFolder+1
os.makedirs(myPath + str(countFolder))
if saveData:
saveDataFunc()
while True:
success, img=cap.read()
img = cv2.resize(img,(imgWidth,imgHeight))
if grayImage:
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if saveData:
blur = cv2.Laplacian(img,cv2.CV_64F).var()
if count % moduleVal==0 and blur> minBlur:
nowTime= time.time()
cv2.imwrite(myPath + str(countFolder) + '/'+str(countSave)+"_"+str(int(blur))+"_"+str(nowTime)+".png",img )
countSave+=1
count+=1
if showImage:
cv2.imshow("Image",img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
#!/usr/bin/python
# author fengjiening
# -*- coding: UTF-8 -*-
from ctypes import *
class FaceService:
def __init__(self,logger):
self.logger=logger
self.flag = False
#self.path = "lib"
self.path = "../lib"
logger.info("sdk加载路径:%s" % self.path)
try:
self.face = CDLL(self.path + "\FaceCompare.dll")
logger.info("加载人脸识别需要的DLL成功,开始初始化人脸识别。。。。")
ret = self.Init()
if ret == 1:
self.flag = True
logger.info("初始化人脸识别成功")
else:
self.flag = False
logger.error("初始化人脸识别失败,原因【%s】"%ret)
except Exception as e:
logger.error("加载人脸识别所需要的DLL失败,原因【%s】"%e)
def Init(self):
self.logger.debug("HKFace_Init ...")
a=c_char_p()
ret = self.face.Init()
return ret
def Get1stCameraID(self):
self.logger.debug("Get1stCameraID ...")
ret = self.face.Get1stCameraID()
print(ret)
def Get2ndCameraID(self,st2):
self.logger.debug("Get2ndCameraID ...")
ret = self.face.Get2ndCameraID(st2)
print(ret)
def FaceCompare(self,imgFileName,nVISCameraID,nNIRCameraID):
self.logger.debug("FaceCompare ...")
img=c_char_p(imgFileName)
ret = self.face.FaceCompare(byref(img),nVISCameraID,nNIRCameraID)
print(ret)
def UnInit(self):
self.logger.info(" HKFace_UnInit ...")
ret = self.face.UnInit()
print(ret)
from constants.configuration import Configuration
from constants.result import R
from util.jt_logging import JtLogging
logger =JtLogging.getLogger("card_service")
a = FaceService(logger)
a.UnInit()
#a.HKFace_UnInit()
# EndTime = c_short * 5
# EndTime1 = EndTime(2003, 8, 8, 19, 11)
#
# print(dir(EndTime1))
# c=pointer(EndTime1)
# print(c) |
import serial
import requests
from ws4py.client.geventclient import WebSocketClient
ser = serial.Serial('COM8', 57600)
while True:
if ord(ser.read()) == 1:
print("sending")
try:
ws = WebSocketClient('ws://127.0.0.1:5050')
ws.connect()
ws.send('confirm')
except:
print("Could not connect to server")
# requests.get('http://localhost:5000/codes?send=true', data={'confirm':'true'})
ser.reset_input_buffer()
|
import turbo_transformers.turbo_transformers_cxx as cxx
import torch
import numpy as np
from .return_type import convert_returns_as_type, ReturnType
from .utils import try_convert, convert2tt_tensor, to_param_dict_convert_tt, to_param_dict, create_empty_if_none, AnyTensor
from .modeling_bert import BertAttention, BertEmbeddings, BertEncoder, BertPooler, SequencePool, PoolingType, PoolingMap
__all__ = [
'QBertIntermediate', 'QBertOutput', 'QBertLayer', 'QBertEncoder', 'QBertModel'
]
class QBertIntermediate:
def __init__(self, intermediate):
# assert intermediate.intermediate_act_fn is gelu
self.bias_act = cxx.FusedAddBiasGELU(convert2tt_tensor(intermediate.dense.bias))
self.qlinear = torch.quantization.quantize_dynamic(intermediate).dense
self.qlinear.set_weight_bias(self.qlinear.weight(), None)
def __call__(self, input_tensor):
if not isinstance(input_tensor, torch.Tensor):
input_tensor = convert_returns_as_type(input_tensor, ReturnType.TORCH)
output = convert2tt_tensor(self.qlinear(input_tensor))
self.bias_act(output)
return convert_returns_as_type(output, ReturnType.TORCH)
@staticmethod
def from_torch(intermediate):
return QBertIntermediate(intermediate)
class QBertOutput:
def __init__(self, bert_output):
self.bias_layernorm = cxx.FusedAddBiasLayerNorm(
convert2tt_tensor(bert_output.dense.bias),
convert2tt_tensor(bert_output.LayerNorm.weight),
convert2tt_tensor(bert_output.LayerNorm.bias))
self.qlinear = torch.quantization.quantize_dynamic(bert_output).dense
self.qlinear.set_weight_bias(self.qlinear.weight(), None)
def __call__(self, intermediate_output, attention_output):
if not isinstance(intermediate_output, torch.Tensor):
intermediate_output = convert_returns_as_type(intermediate_output, ReturnType.TORCH)
output = convert2tt_tensor(self.qlinear(intermediate_output))
self.bias_layernorm(convert2tt_tensor(attention_output), output)
return convert_returns_as_type(output, ReturnType.TORCH)
@staticmethod
def from_torch(bert_output):
return QBertOutput(bert_output)
class QBertLayer:
def __init__(self, bert_layer):
self.attention = BertAttention.from_torch(bert_layer.attention)
self.intermediate = QBertIntermediate.from_torch(bert_layer.intermediate)
self.output = QBertOutput.from_torch(bert_layer.output)
def __call__(self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
return_type=ReturnType.TORCH)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output, ) + outputs
return outputs
@staticmethod
def from_torch(bert_layer):
return QBertLayer(bert_layer)
class QBertEncoder:
def __init__(self, layers):
self.layers = layers
def __call__(self,
hidden_states,
attention_mask = None,
head_mask = None,
output_attentions = False,
output_hidden_states = False):
all_hidden_states = ()
all_attentions = ()
for l in self.layers:
layer_outputs = l(hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions)
if output_hidden_states:
all_hidden_states = all_hidden_states + (
convert_returns_as_type(hidden_states, ReturnType.TORCH), )
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1], )
outputs = (hidden_states, )
if output_hidden_states:
outputs = outputs + (all_hidden_states, )
if output_attentions:
outputs = outputs + (all_attentions, )
return outputs
@staticmethod
def from_torch(encoder):
layers = [QBertLayer.from_torch(bert_layer) for bert_layer in encoder.layer]
return QBertEncoder(layers)
def _build_onnxrt_session(model):
# using https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers
dummy_input = {'input_ids': torch.ones(1,128, dtype=torch.int64),
'attention_mask': torch.ones(1,128, dtype=torch.int64),
'token_type_ids': torch.ones(1,128, dtype=torch.int64)}
symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}
onnx_model_path = "/tmp/temp_turbo_onnx.model"
onnx_opt_model_path = "/tmp/temp_turbo_onnx_opt.model"
quantized_model_path = "/tmp/temp_turbo_onnx_q.model"
# (1) export to onnx fp32 model
with open(onnx_model_path, 'wb') as f:
torch.onnx.export(model, (dummy_input['input_ids'], dummy_input['attention_mask'], dummy_input['token_type_ids']),
f, input_names=['input_ids', 'attention_mask', 'token_type_ids'], output_names=['output'],
opset_version=11,
dynamic_axes={'input_ids': symbolic_names, 'attention_mask': symbolic_names, 'token_type_ids': symbolic_names})
# (2) optimize the fp32 model
from onnxruntime_tools import optimizer
from onnxruntime_tools.transformers.onnx_model_bert import BertOptimizationOptions
opt_options = BertOptimizationOptions('bert')
opt_options.enable_embed_layer_norm = False
opt_model = optimizer.optimize_model(
onnx_model_path,
'bert',
num_heads=model.config.num_attention_heads,
hidden_size=model.config.hidden_size,
optimization_options=opt_options)
opt_model.save_model_to_file(onnx_opt_model_path)
# (3) quantize the model
from onnxruntime.quantization import quantize, QuantizationMode
import onnx
import onnxruntime
import onnxruntime.backend
opt_model = onnx.load(onnx_opt_model_path)
quantized_onnx_model = quantize(opt_model, quantization_mode=QuantizationMode.IntegerOps, symmetric_weight=True, force_fusions=True)
onnx.save(quantized_onnx_model, quantized_model_path)
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
return onnxruntime.InferenceSession(quantized_model_path, sess_options)
class QBertModel:
def __init__(self, model, backend='onnxrt'):
if backend == 'turbo':
self.backend = 'turbo'
self.embeddings = BertEmbeddings.from_torch(model.embeddings)
self.encoder = QBertEncoder.from_torch(model.encoder)
self.pooler = BertPooler.from_torch(model.pooler)
self.prepare = cxx.PrepareBertMasks()
else:
self.backend = 'onnxrt'
self.session = _build_onnxrt_session(model)
def __call__(self, inputs,
attention_masks = None,
token_type_ids = None,
position_ids = None,
head_mask = None,
inputs_embeds = None,
output_attentions = None,
output_hidden_states = None,
pooling_type = PoolingType.FIRST,
pooler_output = None):
if self.backend == 'turbo':
attention_masks = try_convert(create_empty_if_none(attention_masks))
token_type_ids = try_convert(create_empty_if_none(token_type_ids))
position_ids = try_convert(create_empty_if_none(position_ids))
inputs = try_convert(inputs)
extended_attention_masks = cxx.Tensor.create_empty()
self.prepare(inputs, attention_masks, token_type_ids, position_ids, extended_attention_masks)
hidden_cache = self.embeddings(
inputs,
position_ids=position_ids,
token_type_ids=token_type_ids,
return_type=ReturnType.TORCH)
encoder_outputs = self.encoder(
hidden_states=hidden_cache,
attention_mask=extended_attention_masks,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states)
sequence_output = encoder_outputs[0]
self.seq_pool = SequencePool(PoolingMap[pooling_type])
sequence_pool_output = self.seq_pool(
input_tensor=sequence_output,
return_type=ReturnType.TORCH)
pooler_output = self.pooler(sequence_pool_output, ReturnType.TORCH,
pooler_output)
return (sequence_output, pooler_output, ) + encoder_outputs[1:]
else:
if attention_masks is None:
attention_masks = np.ones(inputs.size(), dtype=np.int64)
else:
attention_masks = attention_masks.cpu().numpy()
if token_type_ids is None:
token_type_ids = np.zeros(inputs.size(), dtype=np.int64)
else:
token_type_ids = token_type_ids.cpu().numpy()
ort_inputs = {'input_ids': inputs.cpu().numpy(),
'attention_mask': attention_masks,
'token_type_ids': token_type_ids}
outputs = self.session.run(None, ort_inputs)
for idx, item in enumerate(outputs):
outputs[idx] = torch.tensor(item, device=inputs.device)
return tuple(outputs)
@staticmethod
def from_torch(model, backend='onnxrt'):
return QBertModel(model, backend)
|
import pytest
from condor.util import LanguageGuesser
@pytest.fixture(scope='module')
def guesser(request):
return LanguageGuesser()
def test_language_guesser_can_be_instantiated(guesser):
assert guesser is not None
def test_language_guesser_counts_right(guesser):
es_counts = guesser.counts('hola, ¿qué hace?')
assert 3 == es_counts['es_CO']
assert 0 == es_counts['en_US']
assert 0 == es_counts['fr_FR']
def test_language_guesser_guesess_languages_for_different_sentences(guesser):
assert 'spanish' == guesser.guess('hola, ¿qué hace?')
assert 'english' == guesser.guess('hello, how are you?')
assert 'french' == guesser.guess('je suis un homme')
assert 'german' == guesser.guess('ich trinke das Wasser')
def test_language_guesser_falls_back_to_default_lang(guesser):
assert 'english' == guesser.guess(
'askdfjlask klajsd flkajslk ajlsdkfj alskdf'
)
def test_language_guesser_checks_dominant_language(guesser):
assert 'english' == guesser.guess('hello how are you man hola que hace')
assert 'spanish' == guesser.guess('hello how are hola que hay de nuevo')
def test_language_guesser_preferences(guesser):
assert 'english' == guesser.guess(
'Communications in Computer and Information Science'
)
|
import os
from auth.model.user import User
from flask_mail import Message
from exception import MyException
from extensions.extensions import mail, db
from flask_jwt_extended import create_access_token, decode_token, get_jwt
from datetime import timedelta
from flask import jsonify
def generate_email_token(email):
"""return email verify token and send it to the user email address"""
user = User.find_user_by_email(email)
if not user:
raise MyException('could not find this email', status_code=404)
email_verify_token = create_access_token(
identity=user.user_id, fresh=True, expires_delta=timedelta(hours=1),
additional_claims={'email': user.email, 'username': user.username})
try:
msg = Message(subject="email verification", sender=os.environ.get('MAIL_USERNAME'),
recipients=[email])
msg.body = 'click the link below to verify email'
msg.html = "<href>" f"{email_verify_token}" "</href>"
mail.send(msg)
except Exception:
print('message:- this are test email, you can use real email in a sender and recipients')
finally:
return email_verify_token
def email_verify(token):
""" verify user email and update email_status to active """
try:
token_data = decode_token(token)
email = token_data['email']
except Exception:
raise MyException('please click the link to verify your email', status_code=404)
user = User.find_user_by_email(email)
if not user:
return {'message': 'invalid email, 404'}, 404
user.email_status = True
db.session.commit()
return jsonify(
email=user.email,
email_status=user.email_status,
)
def generate_update_email_token(email):
"""return email update token and send it to the user email address"""
user = User.query.filter(User.username == get_jwt()['sub']).first()
if not user:
raise MyException('invalid user', status_code=404)
email_verify_token = create_access_token(
identity=user.user_id, fresh=True, expires_delta=timedelta(hours=1),
additional_claims={'email': email, 'username': user.username})
try:
msg = Message(subject="email verification", sender=os.environ.get('MAIL_USERNAME'),
recipients=[email])
msg.body = 'click the link below to verify email'
msg.html = "<href>" f"{email_verify_token}" "</href>"
mail.send(msg)
except Exception:
print('message:- this are test email, you can use real email in a sender and recipients')
finally:
return email_verify_token
|
import re
import socket
import datetime
import config
def parseTimeDelta(s):
"""Create timedelta object representing time delta
expressed in a string
Takes a string in the format produced by calling str() on
a python timedelta object and returns a timedelta instance
that would produce that string.
Acceptable formats are: "X days, HH:MM:SS" or "HH:MM:SS".
P.s (lucas): function copied from
<http://kbyanc.blogspot.com.br/2007/08/python-reconstructing-timedeltas-from.html>.
"""
if s is None:
return None
d = re.match(r'((?P<days>\d+) days, )?(?P<hours>\d+):' r'(?P<minutes>\d+):(?P<seconds>\d+)', s).groupdict(0)
return datetime.timedelta(**dict(((key, int(value)) for key, value in d.items())))
class TimeClient(object):
def __init__(self, server_address, algorithm='cristian', connection=None):
self.server_address = server_address
self.buffer = config.buffer
self.algorithm = algorithm
self.connection = connection or socket.socket(type=socket.SOCK_DGRAM)
self.connection.bind(('127.0.0.1', 0))
def _cristian(self):
start = datetime.datetime.now()
self.connection.sendto(b':sync-cristian', self.server_address)
data, address = self.connection.recvfrom(self.buffer)
elapsed = datetime.datetime.now() - start
# Parse remote time from string.
remote_time = datetime.datetime.strptime(str(data, encoding='utf-8'), "%Y-%m-%d %H:%M:%S.%f")
print('%s:%i: %s, taking %s.' % (address[0], address[1], data, str(elapsed)))
# Calculate Cristian's algorithm result.
return remote_time + elapsed / 2
def _berkley(self):
self.connection.sendto(b':sync-berkley', self.server_address)
# The server answered with a new socket, its reference is stored in address.
data, address = self.connection.recvfrom(self.buffer)
print('%s: %s.' % (str(address), data))
# Finally, send our current time.
data = bytes(datetime.datetime.now(), encoding='utf-8')
self.connection.sendto(data, address)
data, address = self.connection.recvfrom(self.buffer)
data = str(data, encoding='utf-8')
print('%s -> %s.' % (str(address), data))
offset_to_sync = parseTimeDelta(data)
return offset_to_sync
def start(self):
print('%s request -> %s.' % (self.algorithm, str(self.server_address)))
try:
if self.algorithm == 'berkley-commit':
self.connection.sendto(b':berkley-commit', self.server_address)
return
elif self.algorithm == 'cristian':
synced_time = self._cristian()
elif self.algorithm == 'berkley':
offset_to_sync = self._berkley()
synced_time = datetime.datetime.now() + offset_to_sync
print('Synchronized time is %s.' % synced_time)
except KeyboardInterrupt:
pass
return self.stop()
def stop(self):
if self.connection:
self.connection.close()
self.connection = None
return self
if __name__ == '__main__':
import argparse
default_server_address = '127.0.0.1:' + str(config.port)
parser = argparse.ArgumentParser(description='TimeClient')
parser.add_argument('algorithm', help='The algorithm to be executed (cristian, berkley or berkley-commit)')
parser.add_argument('--address', default=default_server_address,
help='The address of the time server (default: %s)' % default_server_address)
args = parser.parse_args()
address = args.address.split(':')
address = address[0], int(address[1])
TimeClient(address, args.algorithm).start()
|
#Bottle provies a simpe server that can run Python code and connect it to a website.
#Unlike Django and others, there's no database or other full-stack components, making it useful for quick display-based tools.
#Import the stuff we need first:
from bottle import route, run, template, view, debug
#Obviously you can import other modules here.
#Run arbitrary code
my_var = {'foo': 200, 'bar': 'This comes after bar!'}
print("I'm succesfully running Bottle!")
#Tell Bottle what pages to create
#The most simple version
@route('/hello')
def hello():
return 'Hello World'
#Bottle uses templates. Templates are saved in /views and named filename.tpl
@route('/yo')
def yo():
return template('main')
#You can create dynamic URLs. Use <> in the @route and bind it in the function definition.
@route('/yo/<ending>')
def yo(ending):
#and we can pass arbitrary variables to the template
return template('main', ending = ending, my_page_var = 21, other_var = my_var)
#An alternative definition method that allows you to return a dictionary
@route('/')
@view('example_template')
def main():
return dict(my_var=my_var)
#When it's all set up, run the
run(host='localhost', port=8080, debug=True)
#not sure why, can't get reloader to work. This would be via reloader=True. Restart the server if you edit this file.
#TODO - remove the debug call in production! |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Facebook idle status obfuscator for Libpurple via Pidgin/Finch
username = "zuck"
protocol_id = "prpl-facebook"
NAP_MIN = 15*60
NAP_MAX = 120*60
from pydbus import SessionBus
from time import sleep
from random import randint
bus = SessionBus()
purple = bus.get("im.pidgin.purple.PurpleService", "/im/pidgin/purple/PurpleObject")
def account_info(acc):
#alias = purple.PurpleAccountGetAlias(acc)
name = purple.PurpleAccountGetNameForDisplay(acc)
user = purple.PurpleAccountGetUsername(acc)
type = purple.PurpleAccountGetProtocolName(acc)
id = purple.PurpleAccountGetProtocolId(acc)
return name + " " + str(acc) + " " + user + " " + type + " " + id
acc_id = purple.PurpleAccountsFind(username, protocol_id)
if acc_id == 0:
for acc in purple.PurpleAccountsGetAll():
alias = purple.PurpleAccountGetAlias(acc)
name = purple.PurpleAccountGetNameForDisplay(acc)
user = purple.PurpleAccountGetUsername(acc)
type = purple.PurpleAccountGetProtocolName(acc)
id = purple.PurpleAccountGetProtocolId(acc)
if user == username and id == protocol_id:
acc_id = acc
break
print account_info(acc)
if acc_id != 0:
print account_info(acc_id) + " OK!"
while acc_id != 0:
nap = randint(NAP_MIN, NAP_MAX)
print "nap " + str(nap)
sleep(nap)
if randint(0,1) == 0 and purple.PurpleAccountIsDisconnected(acc_id):
purple.PurpleAccountConnect(acc_id)
print "connected"
elif purple.PurpleAccountIsConnected(acc_id):
purple.PurpleAccountDisconnect(acc_id)
print "disconnected"
print "no account '" + username + "' found"
|
import pandas as pd
import numpy as np
import scipy.stats as stats
class machine:
def __init__(self, type, i, **kwargs):
self.type = type
self.id = type+str(i)
self.facility = kwargs.get('facility')
self.queue = pd.DataFrame(
{
'jb_color':None,
'jb_size':None,
'jb_flavor':None,
'amount':0
},
index=[1]
)
def __repr__(self):
return(self.id)
@property
def available(self):
if (self.queue.amount == 0).all():
return True
else:
return False
def get_rate(self, **kwargs):
def fixed_rate(filename, facility):
rates = pd.read_csv('files/classifier_rate.csv')
rate = int(rates[rates['Site'] == facility]['Processing_Rate'])
return(rate)
def random_rate(filename, facility, **kwargs):
# Locate historical rate data for a given location, flavor, and size
jb_size = kwargs.get('jb_size')
jb_flavor = kwargs.get('jb_flavor')
package_type = kwargs.get('package_type')
hist_rate = pd.read_csv(filename)
if 'Flavor' in hist_rate.columns:
diff_var = 'Flavor'
diff_val = jb_flavor
else:
diff_var = 'Packaging_Type'
diff_val = package_type
## TODO: figure out how to mask dataframe on
## either flavor OR package type
mask = (
(hist_rate.Site == facility)
& (hist_rate.Size == jb_size)
& (hist_rate[diff_var] == diff_val)
)
hist_rate = hist_rate[mask]['Processing_Rate']
# Determine sample mean and standard deviation
s_mean = np.mean(hist_rate)
s_sd = np.std(hist_rate)
# Simulate processing rate using random variable
# Following sample normal distribution
rate = s_sd*stats.norm.ppf(np.random.random())+s_mean
# Rate units are pounds/hour
return(rate)
if self.type == 'classifier':
filename = 'files/classifier_rate.csv'
self.rate = fixed_rate(filename, self.facility)
elif self.type == 'pfo':
filename = 'files/pfo_rate.csv'
self.rate = random_rate(filename, self.facility, **kwargs)
elif (self.type == 'bagging_machine') | (self.type=='boxing_machine'):
filename = 'files/packaging.csv'
self.rate = random_rate(filename, self.facility, **kwargs)
else:
raise ValueError('Invalid Machine Type')
return(self.rate)
def load(self, **kwargs):
self.jb_color = kwargs.get('jb_color')
self.jb_size = kwargs.get('jb_size')
self.jb_flavor = kwargs.get('jb_flavor')
self.package_type = kwargs.get('package_type')
def process(self, amount, **kwargs):
if self.type == 'classifier':
split = pd.read_csv('files/classifier_split.csv')
split = split[split.jb_color == self.jb_color]
split['amount'] = split.apply(
lambda x: int(amount*x.percentage/100),
axis=1
)
self.queue = split[['jb_color', 'jb_size', 'percentage', 'amount']]
else:
self.queue = pd.DataFrame(
{
'jb_color':self.jb_color,
'jb_size':self.jb_size,
'jb_flavor':self.jb_flavor,
'amount':amount
},
index=[1]
)
rate = self.get_rate(
jb_color=self.jb_color,
jb_size=self.jb_size,
jb_flavor=self.jb_flavor,
package_type = self.package_type
)
process_time = amount/rate
# Process time in hours
return(self.id, process_time)
def unload(self, amount):
out = self.queue.copy()
out.amount = out.amount.apply(
lambda x: min(x, amount)
)
self.queue.amount = self.queue.amount.apply(
lambda x: max(0, x-amount)
)
return(self.id, out)
|
import unittest
from katas.kyu_8.adam_and_eve import God, Man
class AdamAndEveTestCase(unittest.TestCase):
def test_equals(self):
self.assertIsInstance(God()[0], Man)
|
# macros for mkdocs-macros-plugin
import os
import requests
_inline_code_styles = {
".py": "python",
".sh": "bash",
".h": "cpp",
".cpp": "cpp",
".c": "c",
".rs": "rs",
".js": "js",
".md": None
}
def define_env(env):
@env.macro
def insert_zenodo_field(*keys: str):
""" This is the *released* version not the dev one """
try:
response = requests.get('https://zenodo.org/api/deposit/depositions/7838395', params={'access_token': os.getenv("ZENODO_PAT")})
response.raise_for_status()
result = response.json()
for k in keys:
result = result[k]
return result
except Exception as e:
return f"{e.__class__.__name__}:{e} while retrieving {keys}"
@env.macro
def include_snippet(filename, tag=None, show_filename=True):
""" looks for code in <filename> between lines containing "!<tag>!" """
full_filename = os.path.join(env.project_dir, filename)
_, file_type = os.path.splitext(filename)
# default to literal "text" for inline code style
code_style = _inline_code_styles.get(file_type, "text")
with open(full_filename, 'r') as f:
lines = f.readlines()
if tag:
tag = f"!{tag}!"
span = []
for i, l in enumerate(lines):
if tag in l:
span.append(i)
if len(span) != 2:
return f"```ERROR {filename} ({code_style}) too few/many tags ({len(span)}) for '{tag}'```"
lines = lines[span[0] + 1: span[1]]
if show_filename:
footer = f"\n[file: **{filename}**]\n"
else:
footer = ""
if code_style is not None:
return f"```{code_style}\n{''.join(lines)}```{footer}"
else:
return "".join(lines) + footer
|
#!/usr/bin/env python
"""
Author: Alberto Quattrini Li
Affiliation: AFRL - University of South Carolina
Date: 12/20/2016
Description:
Run the actual script for reading depth sensor and switch.
Needed for correctly killing that script, as it runs with sudo permission
for the FT232H API.
Usage:
python run_node.py
or
rosrun depth_node_py run_node.py
Note:
sudo is necessary for Adafruit driver to work properly.
"""
import signal
import subprocess
import sys
def signal_handler(frame, signal):
"""Correctly killing depth.py.
"""
ps = subprocess.Popen(('ps', 'ax'), stdout=subprocess.PIPE)
output = subprocess.check_output(('grep', 'depth.py'), stdin=ps.stdout)
subprocess.call(["sudo", "kill", str(output.split()[0])])
sys.exit(-1)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
process = subprocess.call(["roslaunch", "depth_node_py", "depth.launch"])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
""" Zimbra specific objects, handle (un)parsing to/from XML and other glue-code
Note that they do *not* handle themselves communication with
zimbra API. It is left to
ZimbraAdminClient/ZimbraAccountClient/ZimbraMailClient...
"""
from zimsoap import utils
__ALL__ = ('admin', 'account', 'mail')
class NotEnoughInformation(Exception):
"""Raised when we try to get information on an object but have too litle
data to infer it."""
pass
class ZObject(object):
""" An abstract class to handle Zimbra Concepts
A ZObject map to a tag name (subclasses have to define cls.TAG_NAME) :
A ZObject can be parsed from XML ;
XML tag attributes are mapped to ZObject attributes named identically and
typed to str.
"""
# In <a name="zimbraPrefForwardReply">></a> it would be 'name'
ATTRNAME_PROPERTY = 'n'
SELECTORS = []
@classmethod
def from_dict(cls, d):
""" Given a dict in python-zimbra format or XML, generate
a Python object.
"""
if type(d) != dict:
raise TypeError('Expecting a <dict>, got a {0}'.format(type(d)))
obj = cls()
obj._full_data = d
# import attributes
obj._import_attributes(d)
# import <a> child tags as dict items, see __getitem__()
obj._a_tags = obj._parse_a_tags(d)
return obj
def __init__(self, *args, **kwargs):
""" By default, import the attributes of kwargs as object attributes
"""
self._import_attributes(kwargs)
self._a_tags = {}
self._full_data = {}
def __hash__(self):
return hash(str(self))
def get_full_data(self):
return self._full_data
def get_full_xml(self):
return self._full_data
def __eq__(self, other):
if type(self) != type(other):
raise TypeError('Cannot compare %s with %s' %
(type(self), type(other)))
try:
if not utils.is_zuuid(self.id) or not utils.is_zuuid(other.id):
raise AttributeError()
except AttributeError:
raise ValueError(
'Both comparees should have a Zimbra UUID as "id" attribute')
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, k):
""" Returns an item which is one of the <a> tags (if any). Attributes
are parsed oportunisticly at the first __getitem__ call.
"""
return self._a_tags[k]
def __setitem__(self, k, v):
self._a_tags[k] = utils.auto_type(v)
def __repr__(self):
most_significant_id = getattr(self, 'id',
hex(id(self)))
return '<%s.%s:%s>' % (
self.__class__.__module__,
self.__class__.__name__,
most_significant_id
)
def __str__(self):
most_significant_id = getattr(self, 'name',
getattr(self, 'id',
hex(id(self))))
return '<%s.%s:%s>' % (
self.__class__.__module__,
self.__class__.__name__,
most_significant_id
)
def _import_attributes(self, dic):
for k, v in dic.items():
# We ignore attributes array, they will be handled as properties
if (k != '_content' and k != 'a'):
setattr(self, k, v)
def property(self, property_name, default=Ellipsis):
""" Returns a property value
:param: default will return that value if the property is not found,
else, will raise a KeyError.
"""
try:
return self._a_tags[property_name]
except KeyError:
if default != Ellipsis:
return default
else:
raise
def has_property(self, property_name):
return (property_name in self._a_tags)
def property_as_list(self, property_name):
""" property() but encapsulates it in a list, if it's a
single-element property.
"""
try:
res = self._a_tags[property_name]
except KeyError:
return []
if type(res) == list:
return res
else:
return [res]
@classmethod
def _parse_a_tags(cls, dic):
""" Iterates over all <a> tags and builds a dict with those.
If a tag with same "n" attributes appears several times, the
dict value is a list with the tags values, else it's a string.
:param: dic the dict describing the tag
:returns: a dict
"""
props = {}
if 'a' in dic:
children = dic['a']
# If there is only one attribute
# make it a list anyway for use below
if not isinstance(children, (list, tuple)):
children = [children]
else:
children = []
for child in children:
k = child[cls.ATTRNAME_PROPERTY]
try:
v = child['_content']
except KeyError:
v = None
try:
v = utils.auto_type(str(v))
except UnicodeEncodeError:
# Some times, str() fails because of accents...
v = utils.auto_type(v)
if k in props:
prev_v = props[k]
if type(prev_v) != list:
props[k] = [prev_v]
props[k].append(v)
else:
props[k] = v
return props
@classmethod
def _unparse_a_tags(cls, attrs_dict):
""" Iterates over the dictionary
:param: attrs_dict a dict of attributes
:returns: a SimpleXMLElement list containing <a> tags
"""
prop_tags = []
for k, v in attrs_dict.items():
node = {cls.ATTRNAME_PROPERTY: k, '_content': utils.auto_type(v)}
prop_tags.append(node)
return prop_tags
def to_selector(self):
selector = None
for s in self.SELECTORS:
if hasattr(self, s):
selector = s
if selector is None:
raise ValueError("At least one %s has to be set as attr."
% str(self.SELECTORS))
val = getattr(self, selector)
return {'by': selector, '_content': val}
|
import testpaho.mqtt.client as mqtt
import random, string, sys
def gen_cli_id(size):
return ''.join([random.choice(string.ascii_letters + string.digits) \
for n in range(size)])
if __name__ == '__main__':
if len(sys.argv) != 5:
print('usage: python3 bad_sub.py HOST PORT TOPIC SYBIL_NODES')
exit(0)
host = sys.argv[1]
port = int(sys.argv[2])
topic = sys.argv[3]
sybil_nodes = int(sys.argv[4])
clients = []
print('Subscribing sybil nodes...')
for i in range(sybil_nodes):
clients.append(mqtt.Client(gen_cli_id(10)))
clients[i].connect(host, port)
clients[i].subscribe(topic)
print('Publishing messages, flooding broker...')
i = 0
while True:
clients[i].publish(topic, 'assdadsd')
i = (i + 1)%sybil_nodes
|
#!/bin/python3
import sys
N=int(sys.argv[1])
a=0
b=1
s=0
for i in range(N):
print(a, end =" ")
s=a+b
a=b
b=s
|
# -*- coding: utf-8 -*-
# Author: Simone Marsili <simomarsili@gmail.com>
# License: BSD 3 clause
"""Utility functions."""
import functools
import logging
logger = logging.getLogger(__name__)
__all__ = [
'is_command',
]
def is_command(cmds):
"""Given one command returns its path, or None.
Given a list of commands returns the first recoverable path, or None.
"""
try:
from shutil import which # python3 only
except ImportError:
from distutils.spawn import find_executable as which
if isinstance(cmds, str):
return which(cmds)
for cmd in cmds:
path = which(cmd)
if path is not None:
return path
return path
def open_tempfile():
"""Open a temp file."""
import tempfile
tempfile = tempfile.NamedTemporaryFile
kwargs = {'delete': True, 'mode': 'r+'}
return tempfile(**kwargs)
def timeit(func):
"""Timeit decorator."""
@functools.wraps(func)
def timed(*args, **kwargs):
import time
ts0 = time.time()
result = func(*args, **kwargs)
ts1 = time.time()
logging.debug('%r: %2.4f secs', func, ts1 - ts0)
return result
return timed
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import json
import sys
# In[113]:
filenames = sys.argv[1]
output_json = sys.argv[2]
fields = ["Time","Person","Message"]
list1 = []
def split_data(s):
temp = s.split()
ans = ''
k = ''
i = 1
while(temp[i][-1]!=':'):
k+=temp[i]
k+=" "
i+=1
k+=temp[i][:-1]
return k
def split_data_info(s):
temp = s.split()
ans = ''
k = ''
l=0
for i in range(len(temp)):
if temp[i][-1]==':':
l=i
while l < len(temp)-1:
k+=temp[l+1]
k+=' '
l+=1
return k[:-1]
with open(filenames) as fh:
l = 1
for line in fh:
description = list(line.strip().split(None, 4))
dict1 = {}
i=0
while i<len(fields):
dict1[fields[0]]= description[0]
dict1[fields[1]]= split_data(line)
dict1[fields[2]]= split_data_info(line)
i = i + 1
list1.append(dict1)
#out_file = open(output, "w")
#json.dump(list1, out_file, indent = 0)
#out_file.close()
with open(output_json, 'w') as output:
json.dump(list1, output)
#file.close()
# In[114]:
with open(output_json) as f:
data = json.load(f)
print(data)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
# -*- coding: utf-8 -*-
"""Console script for dk_earth_engine_downloader."""
import click
import dateparser
import os
from enum import Enum
from imageCollection import ImageCollection
from DirectorRequestBuilder import DirectorRequestBuilder
from Invoker import Invoker
from HandlerSpecifyImageryCollection import HandlerSpecifyImageryCollection
from CommandSimplePointImageryRequest import CommandSimplePointImageryRequest
from HandlerSetRequestDatesFullSatelliteDateRange import HandlerSetRequestDatesFullSatelliteDateRange
from HandlerLoadPointData import HandlerLoadPointData
from HandlerDateFilter import HandlerDateFilter
from HandlerPointBoundingBox import HandlerPointBoundingBox
from HandlerPointClip import HandlerPointClip
from HandlerPointDownloadURL import HandlerPointDownloadURL
from HandlerURLDownloader import HandlerURLDownloader
from BuilderPointImageryRequest import BuilderPointImageryRequest
from ValidationLogic import ValidationLogic
from HandlerEESimplePointImageryProcessor import HandlerEESimplePointImageryPointProcessor
@click.group()
@click.option('--startdate', type=str)
@click.option('--enddate', type=str)
@click.option('--directory', type=click.Path())
@click.pass_context
def cli(ctx,
startdate,
enddate,
directory):
"""
This script will download satellite imagery from Google Earth Engine.
The user must specify a spatial data file containing points, and
and imagery collection from the list of collections below. The
application will then connect to earth engine and download imagery patches
that match the point coordinates and request specifications.
List of Imagery Collections:\n
Landsat8: Landsat 8 imagery at 30m resolution\n
Landsat7: Landsat 7 imagery at 30m resolution\n
Request types include:\n
SimplePointRequest: Download raw image patches from specified\n
collection.\n
CompositedPointRequest: Download image composites from specified\n
collection.\n
"""
ctx.obj['directory'] = directory
ctx.obj['startdate'] = startdate
ctx.obj['enddate'] = enddate
@click.command()
# TODO get the choice to work on the collection argument.
# TODO fix the issue with `Missing Argument directory` not showing up.
@click.argument('collection', type=click.Choice(['Landsat8', 'Landsat7', 'Landsat5']))
@click.argument('filename', type=click.Path(exists=True))
@click.argument('radius', type=int)
@click.pass_context
def SimplePointImageryRequest(ctx,
collection,
filename,
radius):
"""Download raw point imagery patches from EE collection."""
ctx.obj['filename'] = filename
ctx.obj['radius'] = ValidationLogic.isPositive(radius)
ctx.obj['collection'] = collection
ctx.obj['statusList'] = PointImageryRequestStatusCodes
settings = ctx.obj
request = build_request(BuilderPointImageryRequest, settings)
InvokerPointProcessorSimplePointImageryRequest(request)
InvokerImageryDownloader(request)
def build_request(builder, argdict):
# TODO this might not work on the builder() since it is a variable. Fix later.
tempRequest = builder(argdict)
director = DirectorRequestBuilder()
director.construct(tempRequest)
newRequest = tempRequest.request
return newRequest
def registerSatelliteImageryCollections():
imagecollections = {'Landsat8' : ImageCollection('LANDSAT/LC08/C01/T1',
['B1','B2','B3','B4','B5','B6','B7','B8','B9','B10','B11','BQA'],
'04/13/2011',
'10/07/2017',
30),
'Landsat7' : ImageCollection('LANDSAT/LE07/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1999',
'09/17/2017',
30),
'Landsat5' : ImageCollection('LANDSAT/LT05/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1984',
'05/05/2012',
30),
'Sentinel2msi' : ImageCollection('COPERNICUS/S2',
['B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','QA10','QA20','QA60'],
'01/23/2015',
'10/20/2017',
30),
'Sentinel2sar' : ImageCollection('COPERNICUS/S1_GRD',
['VV', 'HH',['VV', 'VH'], ['HH','HV']],
'10/03/2014',
'10/20/2017',
30),
'ModisThermalAnomalies' : ImageCollection('MODIS/006/MOD14A1',
['FireMask', 'MaxFRP','sample', 'QA'],
'02/18/2000',
'10/23/2017',
30)
}
return imagecollections
def InvokerSimplePointImageryRequest(request):
handlers = [HandlerEESimplePointImageryPointProcessor
InvokerImageryDownloader
]
invoker = Invoker()
for c in handlers:
invoker.store_command(c(request).handle())
invoker.execute_commands()
def InvokerImageryDownloader(request):
pass
class RequestTypes(Enum):
SIMPLEPOINTIMAGERY = 1
DIVAGIS = 2
COMPOSITEDPOINTIMAGERY = 3
class PointImageryRequestStatusCodes(Enum):
CLOSED = 0
CREATED = 1
READYTOPROCESS = 2
PROCESSING = 3
READYTODOWNLOAD = 4
COMPLETED = 5
cli.add_command(SimplePointImageryRequest)
if __name__ == "__main__":
cli(obj={})
|
# Generated by Django 2.2 on 2020-10-23 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='spares',
old_name='prise',
new_name='prise_rub',
),
migrations.RenameField(
model_name='suspension',
old_name='prise',
new_name='prise_rub',
),
migrations.RenameField(
model_name='wheel',
old_name='purpose',
new_name='diameter',
),
migrations.RemoveField(
model_name='wheel',
name='diameter_mm',
),
migrations.AlterField(
model_name='wheel',
name='prise',
field=models.CharField(max_length=128),
),
]
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as sc
""" Variables """
E0 = 90000
d = 2e7
B_0=0.000000001
def vgen(n):
'''Initial Velocities'''
lin = np.linspace(-0.015,0.017,n)
#lin = 0*np.ones((20,1))
return lin
def zgen(n):
'''Inital Positions'''
lin = np.linspace(0.00005,-0.00005,n)
#lin = 0*np.ones((20,1))
return lin
def RK4step(ti,zi,vi,h,dv,dz):
k11=dz(ti,zi,vi)
k21=dv(ti,zi,vi,B_0)
k12=dz(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21)
k22=dv(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21,B_0)
k13=dz(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22)
k23=dv(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22,B_0)
k14=dz(ti+h,zi +(h)*k13,vi +(h)*k23)
k24=dv(ti+h,zi +(h)*k13,vi +(h)*k23,B_0)
z1=zi+(h/6.0)*(k11+2.0*k12+2.0*k13+k14)
v1=vi+(h/6.0)*(k21+2.0*k22+2.0*k23+k24)
zi = z1
vi = v1
return zi,vi
""" Physical & Atomic Constants """
kb=sc.Boltzmann
mu0 = sc.mu_0
u=sc.proton_mass
hbar=sc.hbar
c=sc.c
pi=np.pi
e=sc.e
M=86.9*u
wab=2*pi*384.23e12
G=38.11e6
Z =337
dip= 3.485e-29
''' Variable Orgy '''
Rabi = dip*E0/hbar
IrR = 2*Rabi**2/G**2
IrE = c*8.85e-12/2*E0**2
w = wab - d
Lambda=2*pi*c/w
k = 2*pi/Lambda
i=0
zs=[]
vs=[]
ts=[]
a=1
b=5
#print()
#print()
#Ir=power/(a**2*pi)
def dv(t,z,v,B_0):
fz = abs(z)
O = w/(2*pi*c)-mu0*B_0*fz/hbar # Approximate - needs g-factors & which F?
c1 = 1+IrR+4*d**2/G**2
c2 = O*d*8/G**2
c3 = 4*O**2/G**2
rhoaa = -IrR/(c1+c2*v+c3*v**2) + IrR/(c1-c2*v+c3*v**2)
return rhoaa*hbar*k*G/M
def dz(t,z,v):
return v
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot2grid((2,1), (0,0))
ax2 = plt.subplot2grid((2,1), (1,0),sharex=ax1)
fig.subplots_adjust(hspace=0)
"""step size"""
h=0.00005
"""number of atoms"""
nj=20
"""number of steps"""
ni=300
"""creation of our array of velocities"""
#vis=Jgen(T,nj,M)
vlin=vgen(nj)
zlin=zgen(nj)
"""this loop goes through all the atoms we've got and
applies the force dv to them for a number of steps ni"""
for j in range(nj):
# vi=vis[j]
vi = vlin[j]
zi = zlin[j]
for i in range(ni):
ti=a+h*i
zs.append(zi)
vs.append(vi)
ts.append(ti)
z1=RK4step(ti,zi,vi,h,dv,dz)[0]
v1=RK4step(ti,zi,vi,h,dv,dz)[1]
zi = z1
vi = v1
V1,V2,V3,V4,V5,V6,V7,V8,V9,V10,V11,V12,V13,V14,V15,V16,V17,V18,V19,V20 = np.split(np.array(vs), nj)
Z1,Z2,Z3,Z4,Z5,Z6,Z7,Z8,Z9,Z10,Z11,Z12,Z13,Z14,Z15,Z16,Z17,Z18,Z19,Z20 = np.split(np.array(zs), nj)
tt = np.array(ts)
thet = np.split(tt, nj)[1]
#print(thet)
ax1.plot(thet,V1)
ax1.plot(thet,V2)
ax1.plot(thet,V3)
ax1.plot(thet,V4)
ax1.plot(thet,V5)
ax1.plot(thet,V6)
ax1.plot(thet,V7)
ax1.plot(thet,V8)
ax1.plot(thet,V9)
ax1.plot(thet,V10)
ax1.plot(thet,V11)
ax1.plot(thet,V12)
ax1.plot(thet,V13)
ax1.plot(thet,V14)
ax1.plot(thet,V15)
ax1.plot(thet,V16)
ax1.plot(thet,V17)
ax1.plot(thet,V18)
ax1.plot(thet,V19)
ax1.plot(thet,V20)
ax2.plot(thet,Z1)
ax2.plot(thet,Z2)
ax2.plot(thet,Z3)
ax2.plot(thet,Z4)
ax2.plot(thet,Z5)
ax2.plot(thet,Z6)
ax2.plot(thet,Z7)
ax2.plot(thet,Z8)
ax2.plot(thet,Z9)
ax2.plot(thet,Z10)
ax2.plot(thet,Z11)
ax2.plot(thet,Z12)
ax2.plot(thet,Z13)
ax2.plot(thet,Z14)
ax2.plot(thet,Z15)
ax2.plot(thet,Z16)
ax2.plot(thet,Z17)
ax2.plot(thet,Z18)
ax2.plot(thet,Z19)
ax2.plot(thet,Z20)
ax1.set_title('Optical Molasses Simulation w/ Real Parameters', size=18)
ax1.set_ylabel('Velocity', size = 16)
ax2.set_ylabel('Coordinate', size = 16)
ax2.set_xlabel('Time', size = 16)
ax1.legend(title='B-field Grad = {}\nE0 = {}'.format(B_0,E0), loc=8)
#ax1.set_xticks([])
plt.show()
print('Intensity = {}W/cm2'.format(IrE/1000)) |
import unittest
import os
import unfurl.manifest
from unfurl.yamlmanifest import YamlManifest
from unfurl.eval import Ref, mapValue, RefContext
manifestDoc = """
apiVersion: unfurl/v1alpha1
kind: Manifest
spec:
service_template:
decorators:
missing:
properties:
test: missing
my_server::dependency::tosca.nodes.Compute:
properties:
test: annotated
testy.nodes.aNodeType:
properties:
private_address: "annotated"
ports: []
node_types:
testy.nodes.aNodeType:
derived_from: tosca.nodes.Root
requirements:
- host:
capabilities: tosca.capabilities.Compute
relationship: tosca.relationships.HostedOn
attributes:
distribution:
type: string
default: { get_attribute: [ HOST, os, distribution ] }
properties:
private_address:
type: string
metadata:
sensitive: true
ports:
type: list
entry_schema:
type: tosca.datatypes.network.PortSpec
topology_template:
node_templates:
anode:
type: testy.nodes.aNodeType
# this is in error without the annotations: missing properties
anothernode:
type: testy.nodes.aNodeType
properties:
private_address: "base"
ports: []
my_server:
type: tosca.nodes.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: { eval: ::inputs::cpus }
disk_size: 10 GB
mem_size: 512 MB
properties:
foo: bar
requirements:
- dependency: my_server
"""
# expressions evaluate on tosca nodespecs (ignore validation errors)
# a compute instant that supports cloudinit and hosts a DockerComposeApp
# root __reflookup__ matches node templates by compatible type or template name
# nodes match relationships by requirement names
# relationships match source by compatible type or template name
class OverlayTest(unittest.TestCase):
def test_inputAndOutputs(self):
manifest = YamlManifest(manifestDoc)
ctx = RefContext(manifest.tosca.topology)
result1 = Ref("my_server::dependency::tosca.nodes.Compute").resolve(ctx)
self.assertEqual("my_server", result1[0].name)
self.assertEqual(
{"foo": "bar", "test": "annotated"},
manifest.tosca.nodeTemplates["my_server"].properties,
)
for name in ["anode", "anothernode"]:
node = manifest.tosca.nodeTemplates[name]
self.assertEqual(
{"ports": [], "private_address": "annotated"}, node.properties
)
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Suppose you have a long flowerbed in which some of the plots are planted and some are not.
# However, flowers cannot be planted in adjacent plots - they would compete for water and both would die.
# Given a flowerbed (represented as an array containing 0 and 1, where 0 means empty and 1 means not empty),
# and a number n, return if n new flowers can be planted in it without violating the no-adjacent-flowers rule.
# Example 1:
# Input: flowerbed = [1,0,0,0,1], n = 1
# Output: True
# Example 2:
# Input: flowerbed = [1,0,0,0,1], n = 2
# Output: False
# Note:
# The input array won't violate no-adjacent-flowers rule.
# The input array size is in the range of [1, 20000].
# n is a non-negative integer which won't exceed the input array size.
# Whenever we see a list of pairs as input, one probable approach will be to treat that as a list of edges and model
# the question as a graph. In this question, the idea here is to connect words to their similar words, and all
# connected words are similar. In each connected component of a graph, select any word to be the root word and
# then generate a mapping of word to root word. If two words are similar, they have the same root word.
# 113 / 113 test cases passed.
# Status: Accepted
# Runtime: 62 ms
# Your runtime beats 53.47 % of python submissions.
class Solution(object):
def findCircleNum(self, M):
"""
:type M: List[List[int]]
:rtype: int
"""
friend_cricles = {}
def dfs(kid, root_kid):
if kid in friend_cricles:
return
friend_cricles[kid] = root_kid
[dfs(k, root_kid) for k in range(len(M)) if M[kid][k]]
for kid in range(len(M)):
dfs(kid, kid)
return len({friend_cricles[c] for c in friend_cricles})
# 113 / 113 test cases passed.
# Status: Accepted
# Runtime: 96 ms
# Your runtime beats 23.33 % of python submissions.
import collections
class Solution(object):
def findCircleNum(self, M):
"""
:type M: List[List[int]]
:rtype: int
"""
friends = {}
rank = collections.defaultdict(int)
def find(kid):
if friends[kid] == kid:
return kid
friends[kid] = find(friends[kid])
return friends[kid]
def union(kid1, kid2):
friend1 = find(kid1)
friend2 = find(kid2)
if rank[friend1] > rank[friend2]:
friends[friend2] = friend1
elif rank[friend1] < rank[friend2]:
friends[friend1] = friend2
else:
friends[friend2] = friend1
rank[friend1] += 1
for i in range(len(M)):
friends[i] = i
for i in range(len(M)):
for j in range(len(M[i])):
if M[i][j] and i != j:
union(i, j)
return len({find(c) for c in friends})
if __name__ == '__main__':
print(Solution().findCircleNum(
[[1, 1, 0],
[1, 1, 0],
[0, 0, 1]]
))
print(Solution().findCircleNum(
[[1, 1, 0],
[1, 1, 1],
[0, 1, 1]]
))
print(
Solution().findCircleNum(
[[1, 1, 0, 0, 0, 0, 0, 1, 0, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1, 1]]
)
)
|
def fibonacci(n):
a = 0
b = 1
print(a)
print(b)
for i in range(2,n):
c = a+b
a = b
b = c
print(c)
fibonacci(int(input("Enter range: "))) |
import hashlib
import time
from datetime import datetime, timedelta
import redis
from pymongo import ASCENDING, DESCENDING, IndexModel, MongoClient
#redis set "urls" as a filter ignore duplicate urls
#use redis hashset "tmp_urls" to fetch urls and save to redis key-value as urls have send to client
class MongoRedisUrlManager:
def __init__(self, server_ip='localhost',port=27017,client=None,):
"""
client: mongo database client
expires: timedelta of amount of time before a cache entry is considered expired
"""
# if a client object is not passed
# then try connecting to mongodb at the default localhost port
self.client = MongoClient(server_ip, port) if client is None else client
self.db=self.client.spider
self.redis_pool=redis.ConnectionPool(host='localhost',port=6379)
self.redis_client = redis.StrictRedis(connection_pool=self.redis_pool,decode_responses=True)
#create collection to store cached webpages,
# which is the equivalent of a table in a relational database
# create index if db is empty
if self.db.mfw.count() is 0:
self.db.mfw.create_index([("status", ASCENDING),
("pr", DESCENDING)])
def dequeueUrl(self):
record = self.db.mfw.find_one_and_update(
{ 'status': 'new'},
{ '$set': { 'status' : 'downloading'} },
upsert=False,
sort=[('pr', DESCENDING)], # sort by pr in descending
returnNewDocument= False
)
if record:
return record
else:
return None
def enqueuUrl(self, url, status, depth):
try:
value='%s,%s'%(url,str(depth))
if not self.redis_client.sismember("urls",url):
try:
self.redis_client.sadd("urls",url)
except:
pass
try:
len=self.redis_client.hlen("tmp_urls")
except:
len=0
self.redis_client.hsetnx("tmp_urls",str(len+1),value)
self.db.mfw.insert({
'_id': hashlib.md5(url.encode("utf-8")).hexdigest(),
'url': url,
'status': status,
'queue_time': datetime.utcnow(),
'depth': depth,
'pr': 0
})
# print("enqued {},depth:{}".format(url,depth))
return True
else:
# print("{} already exists in set urls".format(url))
return False
except Exception as err:
print(err)
def finishUrl(self, url):
record = {'status': 'done', 'done_time': datetime.utcnow()}
self.db.mfw.update({'_id': hashlib.md5(url.encode("utf-8")).hexdigest()}, {'$set': record}, upsert=False)
print("downloaded {}".format(url))
def set_url_links(self, url, links):
self.db.urlpr.insert({
'_id': hashlib.md5(url.encode("utf-8")).hexdigest(),
'url': url,
'links': links
})
def clear(self):
self.db.mfw.drop()
|
# Exercício 2.4 - Livro
a = 3
b = 5
print((2*a) * (3*b))
|
"""
Django settings for segmentoj project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = (
'sx!y=avrq(g1o+-7o2ef_4e*slekh5vtd-+6rs&c-nbfzw0*b^' # CHANGE HERE ON PRODUCTION
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True # CHANGE HERE TO 'False' ON PRODUCTION
ALLOWED_HOSTS = ['*'] # CHANGE HERE ON PRODUCTION
# Application definition
# DON'T CHANGE THIS unless you know what you are doing!
INSTALLED_APPS = [
# Django default apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# CORS
'corsheaders',
# API Framework
'rest_framework',
# SegmentOJ Apps
'account',
'problem',
'status',
'captcha',
'judger', # Judger API
]
MIDDLEWARE = [
'segmentoj.middleware.DisableCSRFCheck', # Disable CSRF check
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'segmentoj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/template'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'segmentoj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
# CHANGE HERE if want to change to MySQL or other Databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },
]
AUTH_USER_MODEL = 'account.Account'
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
# Time Zone, CHANGE HERE
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# Session ID
# SESSION_SAVE_EVERY_REQUEST = False
# SESSION_COOKIE_AGE = 1209600
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.ScopedRateThrottle', # use throttle_scope = 'xxx'
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 5,
}
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = os.environ.get('BACKEND_EMAIL_HOST')
EMAIL_PORT = int(os.environ.get('BACKEND_EMAIL_PORT')) if os.environ.get('BACKEND_EMAIL_PORT') else 25
EMAIL_HOST_USER = os.environ.get('BACKEND_EMAIL_USERNAME')
EMAIL_HOST_PASSWORD = os.environ.get('BACKEND_EMAIL_PASSWORD')
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
VERIFY_EMAIL_TEMPLATE_TITLE = '[SegmentOJ] Email Verify'
VERIFY_EMAIL_TEMPLATE_CONTENT = """Hi, {username}<br/>
It seems that you have just requested an email verify!<br/>
<strong>Your code is:</strong> <code>{signature}</code><br/>
Please use it in 20 minutes.<br/>
"""
VERIFY_EMAIL_MAX_AGE = 20
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': datetime.timedelta(days=3)
}
# CORS
CORS_ORIGIN_ALLOW_ALL = True
# Judger Port Connection
JUDGER_PORT = {
# This base url for server to connect judger port.
'base_url': 'http://127.0.0.1:3000',
# Uncomment this if you wants to use password auth too.
# This is suggested in production environment.
# NOTE: you need to config the judger ports too.
# 'password': 'your password',
}
# Captcha Configuration
CAPTCHA = {
# The height of each captcha pic
# NOTE: According to our experience, 'font_size + 10' works well.
'picture_height': 26,
# The width of each captcha pic
# NOTE: According to our experience, 'font_size * length + 14' works well
'picture_width': 78,
# The number of chars in each captcha pic
'length': 4,
# font size on captcha
# NOTE: If you change this, you have to change 'picture_height' and 'picture_width'
'font_size': 16,
# the font file of font
'font_family': os.path.join(BASE_DIR, 'captcha', 'FiraCode-Regular.ttf'),
# The number of dots on the pic to interfare
'dot_number': 100,
# The number of lines on the pic to interfare
# NOTE: If you made the picture larger, you'd better increase this.
'line_number': 2,
# how long a captcha expire (minutes)
'age': 5,
}
|
input_variable1,input_variable2=map(int,input().split())
temp=0;
var1=[int(a)for a in input().split()]
for i in range(0,len(var1)-1):
for j in range(1,len(var1)):
if var1[i]+var1[j]==input_variable2:
temp=temp+1
break
break
if temp>=1:
print("yes")
else:
print("no")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Assignment in BMP course - Program Association Table parser
Author: Jakub Lukac
E-mail: xlukac09@stud.fit.vutbr.cz
Created: 16-10-2019
Testing: python3.6
"""
import sys
from psi import PSI
class PAT(PSI):
__PAT_TABLE = 0x00
__TABLE_EXTENSION_ID = 0x03fd
def __init__(self, data):
# parse program-specific information frame
super().__init__(data)
if self.table_id != PAT.__PAT_TABLE:
print("PAT Error:", "Table ID is not PAT ID(0x00).", file=sys.stderr)
if not self.section_syntax_indicator:
print("PAT Error:", "Section syntax indicator bit not set to 1.", file=sys.stderr)
if self.private_bit:
print("PAT Error:", "Private bit not set to 0.", file=sys.stderr)
self.__parse_pat_table(self.table_data)
def __parse_pat_table(self, data):
position_indicator = 0
# parse Program Association Table
self.program_mapping = []
while position_indicator < len(data):
# 16 bits program number
service_id = int.from_bytes(data[position_indicator:position_indicator + 2], byteorder="big")
position_indicator += 2
# 3 bits reserved bits
reserved = (data[position_indicator] & 0xe0) >> 5
if reserved != 0x07:
print("PAT Error: ", "Reserved bits not set to 0x07.", reserved, file=sys.stderr)
# 13 bits pid
pid = int.from_bytes(data[position_indicator:position_indicator + 2], byteorder="big") & 0x1fff
position_indicator += 2
self.program_mapping.append((service_id, pid))
def __str__(self):
pat_str = super().__str__()
pat_str += "TS ID: {self.id:#06x}\nProgram mapping (service_id, pid): [".format(self=self) + ", ".join(
["(" + ", ".join([format(n, "#06x") for n in mapping]) + ")" for mapping in self.program_mapping]) + "]\n"
return pat_str
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-09-04 10:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('progress_analyzer', '0020_auto_20180723_1422'),
]
operations = [
migrations.AddField(
model_name='stresscumulative',
name='cum_days_garmin_stress_lvl',
field=models.IntegerField(blank=True, null=True),
),
]
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.jvm.resolve.jvm_tool import JvmToolBase
class JavaProtobufGrpcSubsystem(JvmToolBase):
options_scope = "protobuf-java-grpc"
help = "gRPC support for Java Protobuf (https://github.com/grpc/grpc-java)"
default_version = "1.48.0"
default_artifacts = (
"io.grpc:protoc-gen-grpc-java:exe:linux-aarch_64:{version}",
"io.grpc:protoc-gen-grpc-java:exe:linux-x86_64:{version}",
"io.grpc:protoc-gen-grpc-java:exe:osx-aarch_64:{version}",
"io.grpc:protoc-gen-grpc-java:exe:osx-x86_64:{version}",
)
default_lockfile_resource = (
"pants.backend.codegen.protobuf.java",
"grpc-java.default.lockfile.txt",
)
|
import numpy as np
grid = [
[9,0,0,0,0,2,0,0,0] ,
[1,0,0,5,0,0,0,0,4] ,
[0,0,3,0,0,0,5,7,0] ,
[0,0,0,0,0,8,6,9,0] ,
[6,0,0,0,0,0,0,0,2] ,
[0,1,4,9,0,0,0,0,0] ,
[0,5,9,0,0,0,4,0,0] ,
[2,0,0,0,0,1,0,0,6] ,
[0,0,0,3,0,0,0,0,5]
]
def possible(x, y, n):
for i in range(0, 9):
if grid[i][x] == n and i != y:
return False
for i in range(0, 9):
if grid[y][i] == n and i != x:
return False
x0 = (x // 3) * 3
y0 = (y // 3) * 3
for X in range(x0, x0 + 3):
for Y in range(y0, y0 + 3):
if grid[Y][X] == n:
return False
return True
def Print(matrix):
for i in range(9):
print(matrix[i])
def solve():
global grid
for y in range(9):
for x in range(9):
if grid[y][x] == 0:
for n in range(1, 10):
if possible(x, y, n):
grid[y][x] = n
solve()
grid[y][x] = 0
return
Print(grid)
input("")
solve() |
import re
count = 0
lst = []
start = int(input('start of working day: '))
end = int(input('end of working day: '))
f1 = open('/var/log/auth.log')
for line in f1:
count += 1
time = re.search(r"\w\w:\w\w:\w\w", line)
if int(time.group(0)[0:2]) < start or int(time.group(0)[0:2]) > end or (int(time.group(0)[0:2]) == end and (int(time.group(0)[3:5]) != 0 or int(time.group(0)[6:]) != 0 )):
lst.append(count)
print(lst)
|
# Bài 04: Viết hàm
# def is_prime(n)
# để kiểm tra xem số tự nhiên n có phải số nguyên tố hay không, nếu có thì trả lại True, nếu không thì trả lại False
def is_prime(n) :
if n == 1 :
return False
run = int(n/2 + 1)
count = 0
for i in range(1,run) :
if n % i == 0 :
count +=1
if count > 2 :
return False
else :
return True
print(is_prime(2)) |
# -*- coding: utf-8 -*-
import sys
sys.path.append('./service')
sys.path.append('./lib')
from data import data
class AAA:
def __init__(self):
#super(FightService, self).__init__()
print 'self.data= data'
self.data= data
#self.data= {}
#aaa= 'asdf'
def f(self):
self.data['s']= 's';
def ff(self):
print self.data['s']
a= AAA()
print a
a.f()
#print a.aaa
b= AAA()
print b
b.ff()
print data['s']
#print b.aaa
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import os.path
import tarfile
import textwrap
from textwrap import dedent
from typing import cast
import pytest
from pants.backend.javascript import package_json
from pants.backend.javascript.package.rules import (
GenerateResourcesFromNodeBuildScriptRequest,
NodePackageTarFieldSet,
)
from pants.backend.javascript.package.rules import rules as package_rules
from pants.backend.javascript.package_json import NPMDistributionTarget
from pants.backend.javascript.target_types import JSSourcesGeneratorTarget, JSSourceTarget
from pants.build_graph.address import Address
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FilesGeneratorTarget
from pants.engine.internals.native_engine import EMPTY_DIGEST, Digest, Snapshot
from pants.engine.rules import QueryRule
from pants.engine.target import GeneratedSources
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture(params=["pnpm", "npm", "yarn"])
def package_manager(request) -> str:
return cast(str, request.param)
@pytest.fixture
def rule_runner(package_manager: str) -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*package_rules(),
QueryRule(BuiltPackage, (NodePackageTarFieldSet,)),
QueryRule(GeneratedSources, (GenerateResourcesFromNodeBuildScriptRequest,)),
QueryRule(Snapshot, (Digest,)),
],
target_types=[
*package_json.target_types(),
JSSourceTarget,
JSSourcesGeneratorTarget,
NPMDistributionTarget,
FilesGeneratorTarget,
],
objects=dict(package_json.build_file_aliases().objects),
)
rule_runner.set_options([f"--nodejs-package-manager={package_manager}"], env_inherit={"PATH"})
return rule_runner
def test_creates_tar_for_package_json(rule_runner: RuleRunner, package_manager: str) -> None:
rule_runner.write_files(
{
"src/js/BUILD": dedent(
"""\
package_json(dependencies=[":readme"])
files(name="readme", sources=["*.md"])
npm_distribution(name="ham-dist")
"""
),
"src/js/package.json": json.dumps(
{"name": "ham", "version": "0.0.1", "browser": "lib/index.mjs"}
),
"src/js/README.md": "",
"src/js/package-lock.json": json.dumps(
{
"name": "ham",
"version": "0.0.1",
"lockfileVersion": 2,
"requires": True,
"packages": {"": {"name": "ham", "version": "0.0.1"}},
}
),
"src/js/lib/BUILD": dedent(
"""\
javascript_sources()
"""
),
"src/js/lib/index.mjs": "",
}
)
tgt = rule_runner.get_target(Address("src/js", target_name="ham-dist"))
result = rule_runner.request(BuiltPackage, [NodePackageTarFieldSet.create(tgt)])
rule_runner.write_digest(result.digest)
archive_name = "ham-v0.0.1.tgz" if package_manager == "yarn" else "ham-0.0.1.tgz"
with tarfile.open(os.path.join(rule_runner.build_root, archive_name)) as tar:
assert {member.name for member in tar.getmembers()}.issuperset(
{
"package/package.json",
"package/lib/index.mjs",
"package/README.md",
}
)
def test_packages_files_as_resource(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": dedent(
"""\
package_json(
scripts=[
node_build_script(entry_point="build", output_files=["dist/index.cjs"])
]
)
"""
),
"src/js/package.json": json.dumps(
{
"name": "ham",
"version": "0.0.1",
"browser": "lib/index.mjs",
"scripts": {"build": "mkdir dist && echo 'blarb' >> dist/index.cjs"},
}
),
"src/js/package-lock.json": json.dumps({}),
"src/js/lib/BUILD": dedent(
"""\
javascript_sources()
"""
),
"src/js/lib/index.mjs": "",
}
)
tgt = rule_runner.get_target(Address("src/js", generated_name="build"))
snapshot = rule_runner.request(Snapshot, (EMPTY_DIGEST,))
result = rule_runner.request(
GeneratedSources, [GenerateResourcesFromNodeBuildScriptRequest(snapshot, tgt)]
)
rule_runner.write_digest(result.snapshot.digest)
with open(os.path.join(rule_runner.build_root, "src/js/dist/index.cjs")) as f:
assert f.read() == "blarb\n"
@pytest.fixture
def workspace_files(package_manager: str) -> dict[str, str]:
if package_manager == "npm":
return {
"src/js/package-lock.json": json.dumps(
{
"name": "spam",
"version": "0.0.1",
"lockfileVersion": 2,
"requires": True,
"dependencies": {"ham": {"version": "file:a"}},
"packages": {
"": {"name": "spam", "version": "0.0.1", "workspaces": ["a"]},
"a": {"name": "ham", "version": "0.0.1"},
"node_modules/ham": {"link": True, "resolved": "a"},
},
}
)
}
if package_manager == "pnpm":
return {
"src/js/pnpm-workspace.yaml": textwrap.dedent(
"""\
packages:
"""
),
"src/js/pnpm-lock.yaml": json.dumps(
{
"importers": {
".": {"specifiers": {}},
"a": {"specifiers": {}},
},
"lockfileVersion": 5.3,
}
),
}
if package_manager == "yarn":
return {
"src/js/yarn.lock": textwrap.dedent(
"""\
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"""
)
}
raise AssertionError(f"No lockfile implemented for {package_manager}.")
def test_packages_files_as_resource_in_workspace(
rule_runner: RuleRunner, workspace_files: dict[str, str]
) -> None:
rule_runner.write_files(
{
**workspace_files,
"src/js/package.json": json.dumps(
{"name": "spam", "version": "0.0.1", "workspaces": ["a"], "private": True}
),
"src/js/BUILD": "package_json()",
"src/js/a/BUILD": dedent(
"""\
package_json(
scripts=[
node_build_script(entry_point="build", output_files=["dist/index.cjs"])
]
)
"""
),
"src/js/a/package.json": json.dumps(
{
"name": "ham",
"version": "0.0.1",
"browser": "lib/index.mjs",
"scripts": {"build": "mkdir dist && echo 'blarb' >> dist/index.cjs"},
}
),
"src/js/a/lib/BUILD": dedent(
"""\
javascript_sources()
"""
),
"src/js/a/lib/index.mjs": "",
}
)
tgt = rule_runner.get_target(Address("src/js/a", generated_name="build"))
snapshot = rule_runner.request(Snapshot, (EMPTY_DIGEST,))
result = rule_runner.request(
GeneratedSources, [GenerateResourcesFromNodeBuildScriptRequest(snapshot, tgt)]
)
rule_runner.write_digest(result.snapshot.digest)
with open(os.path.join(rule_runner.build_root, "src/js/a/dist/index.cjs")) as f:
assert f.read() == "blarb\n"
def test_extra_envs(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/js/BUILD": dedent(
"""\
package_json(
scripts=[
node_build_script(entry_point="build", extra_env_vars=["FOO=BAR"], output_files=["dist/index.cjs"])
]
)
"""
),
"src/js/package.json": json.dumps(
{
"name": "ham",
"version": "0.0.1",
"browser": "lib/index.mjs",
"scripts": {"build": "mkdir dist && echo $FOO >> dist/index.cjs"},
}
),
"src/js/package-lock.json": json.dumps({}),
"src/js/lib/BUILD": dedent(
"""\
javascript_sources()
"""
),
"src/js/lib/index.mjs": "",
}
)
tgt = rule_runner.get_target(Address("src/js", generated_name="build"))
snapshot = rule_runner.request(Snapshot, (EMPTY_DIGEST,))
result = rule_runner.request(
GeneratedSources, [GenerateResourcesFromNodeBuildScriptRequest(snapshot, tgt)]
)
rule_runner.write_digest(result.snapshot.digest)
with open(os.path.join(rule_runner.build_root, "src/js/dist/index.cjs")) as f:
assert f.read() == "BAR\n"
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
import argparse
import os
import fabio
import numpy as np
from skimage.feature import peak_local_max
def isDir(pathname):
"""
Return a boolean depending on if the path is a directory or not
"""
if os.path.isdir(pathname):
return True
else:
return False
def detectCenter(image):
"""
Detect the center and return it if it exists
"""
coordinates = peak_local_max(image, min_distance=20,threshold_rel=0.7)
if len(coordinates) > 0:
center = coordinates[0]
return center
else:
return None
def isImg(fileName):
"""
Check if a file name is an image file
:param fileName: (str)
:return: True or False
"""
input_types = ['bmp','jpg','tif','tiff','png','jpeg']
nameList = fileName.split('.')
return nameList[-1] in input_types
def fullPath(filePath, fileName):
"""
Return the full path by joining the filepath and the filename
:param filepath, fileName:
:return: full file path
"""
# if filePath[-1] == '/':
# return filePath+fileName
# else:
# return filePath+"/"+fileName
return os.path.join(filePath, fileName)
def getImgFiles(input):
"""
Give the list of images in a folder
:param input:
:return: image list
"""
dir_path=str(input)
dir_path = str(dir_path)
fileList = os.listdir(dir_path)
imgList = []
for f in fileList:
full_file_name = fullPath(dir_path, f)
if isImg(full_file_name):
imgList.append(f)
imgList.sort()
return imgList
def combine_image(image,direction):
"""
Combine image
"""
size=max(image.shape[0],image.shape[1])
newimage=np.zeros((size,size))
if direction == 1:
newimage[:image.shape[0],:]=image
elif direction==2:
newimage[size-image.shape[0]:,:]=image
elif direction==3:
newimage[:,:image.shape[1]]=image
elif direction==4:
newimage[:,size-image.shape[1]:]=image
return newimage
def main(args):
"""
Main function
"""
input=args.input
output=args.output
if not isDir(input):
print("input folder is invalid")
return
try:
os.makedirs(output, exist_ok = True)
print(f"Directory '{output}' created successfully")
except OSError:
print("Directory '%s' can not be created")
imgList=getImgFiles(input)
if len(imgList)==0:
print("Input folder doesn't have image files")
return
f=os.path.join(input,imgList[0])
image=fabio.open(f).data
row=image.shape[0]
col=image.shape[1]
center=detectCenter(image)
if center is None:
direction=1
else:
if row>col:
if center[1]>col/2:
direction=3
else:
direction=4
else:
if center[0]>row/2:
direction=1
else:
direction=2
for f in imgList:
filepath=os.path.join(input,f)
image=fabio.open(filepath).data
if not np.any(image):
pass
else:
newimage=combine_image(image,direction)
outputfile=os.path.join(output,"squared_"+f)
fabio.tifimage.tifimage(data=newimage).write(outputfile)
if __name__=='__main__':
parser=argparse.ArgumentParser(description='convert a rectangle image to square image')
parser.add_argument('--input',help="Please type in input folder path",required=True)
parser.add_argument('--output',help="Please type in output folder path",required=True)
args=parser.parse_args()
main(args)
|
def cabecalho():
print(10 * '-' + 'PROGRAMA EM PYTHON' + 10 * '-')
def validaCPF(cpf):
cabecalho()
if len(cpf) == 11:
print('cpf válido')
else:
print('cpf invalido')
cp = str(input('Digite seu CPF: '))
validaCPF(cp) |
from sanic import Sanic
from sanic.response import json
from datetime import datetime
import requests
app = Sanic()
@app.route('/')
@app.route('/<path:path>')
async def index(request, path=""):
url = "https://graphql.anilist.co"
months = ["", "Jan", "Feb", "Mar", "Apr", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"]
month_to_season = {
"Jan": "WINTER",
"Feb": "WINTER",
"Mar": "SPRING",
"Apr": "SPRING",
"May": "SPRING",
"June": "SUMMER",
"July": "SUMMER",
"Aug": "SUMMER",
"Sept": "FALL",
"Oct": "FALL",
"Nov": "FALL",
"Dec": "WINTER"
}
# Automatically get the current season by month (WINTER, SPRING, SUMMER, FALL)
current_month = int(datetime.today().strftime("%m"))
month = months[current_month]
season_now = month_to_season[month]
year = int(datetime.today().strftime("%Y"))
# Prevents displaying previous Winter seasonal shows due to year difference
if month == "Dec":
year = year + 1
query = """query($season: MediaSeason, $seasonYear: Int, $page: Int, $isAdult: Boolean) {
Page(page: $page) {
pageInfo {
total
perPage
currentPage
lastPage
hasNextPage
}
media(season: $season, seasonYear: $seasonYear, type: ANIME, isAdult: $isAdult){
id
title {
romaji
english
}
coverImage {
medium
large
extraLarge
}
episodes
status
nextAiringEpisode {
airingAt
timeUntilAiring
episode
}
}
}
}"""
variables = {
"season": season_now,
"seasonYear": year,
"page": 1,
"isAdult": False,
}
try:
response = requests.post(url, json= {"query": query, "variables": variables})
except requests.exceptions.ConnectionError as error:
print("A Connection Error occured", error)
currentPage = response.json()["data"]["Page"]["pageInfo"]["currentPage"]
lastPage = response.json()["data"]["Page"]["pageInfo"]["lastPage"]
animes = response.json()["data"]["Page"]["media"]
for i in range(currentPage, lastPage + 1):
currentPage = currentPage + 1
variables["page"] = currentPage
try:
response = requests.post(url, json = {"query": query, "variables": variables})
except requests.exceptions.ConnectionError as error:
print("A Connection Error occured", error)
currentPageAnime = response.json()["data"]["Page"]["media"]
# Add anime on this page into anime list
animes = animes + currentPageAnime
animeList = {
"media": animes
}
# return a list of ALL animes in the current season
return json(animeList)
|
#!flask/bin/python
import json
from flask import jsonify
import MySQLdb as MySQL
HOST = "127.0.0.1"
USER = "root"
PASSWORD = "cs411fa2016"
DB = "imdb"
def get_actors_by_movie_id(movie_id):
try:
conn = MySQL.connect(host=HOST, user=USER, passwd=PASSWORD, db=DB)
cursor = conn.cursor()
except MySQL.Error as e:
print "SQL Connection Error"
conn.rollback()
raise
return None
query = "SELECT Name, ActorID FROM Actor1 WHERE ActorID IN ( SELECT MovieActor1.ActorID FROM Movie1 INNER JOIN MovieActor1 ON MovieActor1.MovieID = Movie1.MovieID WHERE Movie1.MovieID = %d )" % movie_id
try:
x = cursor.execute(query)
if x == 0:
return None
results = cursor.fetchall()
actors = []
for result in results:
actor_id = result[1]
actor_name = result[0]
actor = {"ActorID": actor_id, "Name": actor_name}
actors.append(actor)
actors = json.dumps(actors, ensure_ascii=False)
return actors
except MySQL.Error as e:
conn.rollback()
raise
return None
|
from flask import Blueprint, jsonify, Response, request
from src.redis import Redis, CACHED_CARDS
import itertools
import random
blueprint = Blueprint('get_random_deck', __name__)
def get_card_output(card):
return {
'name': card['name'],
'colors': card['colors'],
'manaCost': card['manaCost'],
'convertedManaCost': card['convertedManaCost'],
'type': card['type'],
'types': card['types'],
'text': card['text'],
'power': card['power'],
'toughness': card['toughness'],
'quantity': card['quantity'],
'image': card['image']
}
def card_is_basic_land(card):
return 'Land' in card['types'] and 'Basic' in card['supertypes']
def create_by_card_key_map(cards, get_value):
return {card['key']: get_value(card) for card in cards}
def get_initial_card(cards, cards_by_key):
card_key_roulette = list(itertools.chain(
*[[card['key'] for _ in range(card['deckOccurrences'])] for card in cards]
))
random_card_key = random.choice(card_key_roulette)
return cards_by_key[random_card_key]
def get_random_deck(max_colors):
redis = Redis()
cards = redis.get_cached_data(CACHED_CARDS)
cards_by_key = create_by_card_key_map(cards, lambda c: c)
colors_by_card_key = create_by_card_key_map(cards, lambda c: c['colors'])
initial_card = get_initial_card(cards, cards_by_key)
deck_cards = [initial_card]
seen_colors = colors_by_card_key[initial_card['key']]
while len(deck_cards) < 60:
non_basic_land_keys = [
deck_card['key'] for deck_card in deck_cards
if not card_is_basic_land(cards_by_key[deck_card['key']])
]
forbidden_card_keys = [
forbidden_card_key
for forbidden_card_key, card_count in
({
card_key: len([c for c in deck_cards if c['key'] == card_key])
for card_key in set(non_basic_land_keys)
}).items()
if card_count >= 4
]
synergy_roulette_wheel = {}
for card in deck_cards:
synergy_cards = card['synergies']
for synergy_card in synergy_cards:
synergy_card_key = synergy_card['key']
if synergy_card_key in forbidden_card_keys:
continue
if len(seen_colors) >= max_colors:
synergy_card_colors = colors_by_card_key[synergy_card_key]
if not set(synergy_card_colors).issubset(seen_colors):
continue
if synergy_card_key not in synergy_roulette_wheel:
synergy_roulette_wheel[synergy_card_key] = 0
synergy_roulette_wheel[synergy_card_key] += synergy_card['synergy']
synergy_roulette_result_key = random.choices(
list(synergy_roulette_wheel.keys()), list(synergy_roulette_wheel.values())
)[0]
next_card = cards_by_key[synergy_roulette_result_key]
seen_colors.extend(next_card['colors'])
seen_colors = list(set(seen_colors))
deck_cards.append(next_card)
deck_cards_by_key = {}
for card in deck_cards:
card_key = card['key']
if card_key not in deck_cards_by_key:
deck_cards_by_key[card_key] = dict(
**card,
**{'quantity': 0}
)
deck_cards_by_key[card_key]['quantity'] += 1
return [get_card_output(card) for card in deck_cards_by_key.values()]
@blueprint.route('/get_random_deck_json')
def get_random_deck_json():
return jsonify(get_random_deck(int(request.args.get('max_colors', 5))))
@blueprint.route('/get_random_deck_txt')
def get_random_deck_txt():
random_deck = get_random_deck(int(request.args.get('max_colors', 5)))
return Response('\r\n'.join('%s %s' % (card['quantity'], card['name']) for card in random_deck), mimetype='text')
|
import sys
def read_file(textfile):
# Open sudoku file (text document)
f = open(textfile, 'r')
next(f)
i = 0
j = 0
matrix = [[0 for x in range(9)] for y in range(9)]
# print (matrix)
while True:
j = 0
char = f.readline()
for c in char:
matrix[i][j] = int(c)
# print (i, j)
j += 1
if j == 9:
i += 1
# Stop
break
if i == 9:
# Stop
break
return matrix
def check_sudoku(row, column, number, matrix_board):
check = 0
for i in range(0, 9):
if matrix_board[row][i] == number:
check = 1
for i in range(0, 9):
if matrix_board[i][column] == number:
check = 1
row = row - row % 3
column = column - column % 3
for i in range(0, 3):
for j in range(0, 3):
if matrix_board[row+i][column+j] == number:
check = 1
if check == 1:
return False
else:
return True
class calls:
number_of_calls = 0
c = calls()
def sudoku_solver(matrix):
c.number_of_calls = c.number_of_calls + 1
break_condition = 0
for i in range(0, 9):
for j in range(0, 9):
if matrix[i][j] == 0:
break_condition = 1
row = column = j
break
if break_condition == 0:
print('Naive Backtracking Algorithm Solution: ')
for i in matrix:
print(i)
print('Amount of Recursions')
print(c.number_of_calls)
exit(0)
for i in range(0, 10):
if check_sudoku(row, column, i, matrix):
matrix[row][column] = i
if sudoku_solver(matrix):
return True
matrix[row][column] = 0
return False
matrix = read_file(sys.argv[1])
sudoku_solver(matrix)
|
# -*- coding: utf-8 -*-
# flake8: noqa
from qiniu import Auth, put_data, etag, urlsafe_base64_encode
import qiniu.config
import logging
#需要填写你的 Access Key 和 Secret Key
access_key = 'kYadYeFySDHVmCzIBrvYLO6Kdb67TU-MbisQhHqc'
secret_key = 'zGIGRSFNyifqPXR4kUUYYPZO7mV50JdIYABnojyA'
def storage(file_data):
try:
#构建鉴权对象
q = Auth(access_key, secret_key)
#要上传的空间
bucket_name = 'ihome'
#上传到七牛后保存的文件名
#key = 'my-python-logo.png';
#生成上传 Token,可以指定过期时间等
token = q.upload_token(bucket_name)
#要上传文件的本地路径
#localfile = './sync/bbb.jpg'
ret, info = put_data(token, None, file_data)
except Exception as e:
logging.error(e)
raise e
print(ret)
print("*"*16)
print(info)
# assert ret['key'] == key
# assert ret['hash'] == etag(localfile)
print (type(info))
print (info.status_code)
if 200 == info.status_code:
return ret["key"]
else:
raise Exception("上传失败")
if __name__ == '__main__':
file_name = raw_input("input file name:\n")
with open(file_name, "rb") as file:
file_data = file.read()
storage(file_data) |
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
f = open("output.csv","w")
f.write("t,x,y\n")
start = time.time()
while(True):
ret, frame = cap.read()
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
low_color = np.array([160, 75, 75])
upper_color = np.array([180, 255, 255])
ex_img = cv2.inRange(hsv,low_color,upper_color)
x = np.argmax(np.sum(ex_img,axis=0))
y = np.argmax(np.sum(ex_img,axis=1))
t = time.time() - start
f.write(str(t)+","+str(x)+","+str(y)+"\n")
print(str(t)+","+str(x)+","+str(y)+"\n")
cv2.circle(frame,(x,y),10,(0,255,0))
cv2.imshow('frame',frame)
if t > 120:
break
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
f.close() |
#!/usr/bin/env python3
import re
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode
month_index = {"jan": 1, "january": 1, "feb": 2, "february": 2, "mar": 3, "march": 3, "apr": 4,
"april": 4, "may": 5, "jun": 6, "june": 6, "jul": 7, "july": 7, "aug": 8,
"august": 8, "sep": 9, "september": 9, "oct": 10, "october": 10, "nov": 11,
"november": 11, "dec": 12, "december": 12}
quotations = {"'": "'", '(': ")", "{": "}", '"': '"'}
title_regex = re.compile('<\s*i\s*>([\w\s]+)<\s*/i>')
pages_regex = re.compile('^(\w*)(\d+)[-:_]{1,2}(\d+)$')
def read(fp):
parser = BibTexParser()
parser.customization = custom_filter
return bibtexparser.load(fp, parser=parser)
def _filter_common(item_str):
item_str = item_str.strip(',')
while True:
if item_str[0] in quotations:
if item_str[-1] == quotations[item_str[0]]:
item_str = item_str[1:-1]
continue
break
return item_str
def _filter_type(string):
return string.lower()
def _filter_keyword(keywords_str):
"""read keywords string into a set of keywords
"""
return {keyword.strip().lower() for keyword in keywords_str.split(',')}
def _filter_title(title_str):
return title_regex.sub(r'\\textit{\1}', title_str)
def _filter_people(people_str):
"""split people string
:param people_str: Last_name, F.N. and Last_name2, F.N2 and Last_name3, F.N3
:type people_str: str
:return: list[(last_name, first_name)]
:rtype: list[(str, str)]
"""
people = people_str.replace('\n', ' ').split(' and ')
return [_filter_name(x) for x in people if not x.startswith('other')]
def _filter_name(string):
"""Make people names as surname, first names
or surname, initials. Should eventually combine up the two.
:type string: str
:rtype: tuple[str, str] -- [last_name, first_name] or [last_name, initials]
"""
string = string.strip()
if len(string) < 1:
return ""
if ',' in string:
name_split = string.split(',', 1)
last = name_split[0].strip()
firsts = [i.strip() for i in name_split[1].split()]
else:
name_split = string.split()
last = name_split.pop()
firsts = [i.replace('.', '. ').strip() for i in name_split]
if last in ['jnr', 'jr', 'junior']:
last = firsts.pop()
for item in firsts:
if item in ['ben', 'van', 'der', 'de', 'la', 'le']:
last = firsts.pop() + ' ' + last
return [last.lower(), ' '.join(firsts).lower()]
def _filter_pages(pages_str):
"""force pages formatting
123-126, 123-6, 123:126, 123--126, 123_126
to
123-126
"""
# sometimes pages are xx.xxx/xx.xxx and we shouldn't change that
if '.' in pages_str:
return pages_str
result = pages_regex.match(pages_str)
if not result:
return pages_str
page_start = result.group(2)
""":type : str"""
page_end = result.group(3)
""":type : str"""
if len(page_end) < len(page_start): # some stupid bibtex sources truncate the end page digits
page_end = page_start[0:len(page_start) - len(page_end)] + page_end
return '{0}{1}-{2}'.format(result.group(1), page_start, page_end)
def _filter_month(month_str: str):
return int(month_index[month_str.lower()]) if month_str.lower() in month_index else int(month_str)
def _filter_file(file_str):
names = file_str.split(', ')
file_names = [name.strip() for name in names]
return file_names
def _filter_journal(x):
if x.startswith("The ") or x.startswith("the "):
x = x[4:]
return x
_filter_dict = {
'type': _filter_type,
'author': _filter_people,
'editor': _filter_people,
'pages': _filter_pages,
'month': _filter_month,
'pdf_file': _filter_file,
'comment_file': _filter_file,
'keyword': _filter_keyword,
'title': _filter_title,
'journal': _filter_journal}
def custom_filter(entry):
entry = convert_to_unicode(entry)
for key in entry:
item = _filter_common(entry[key])
if key in _filter_dict:
item = _filter_dict[key](item)
entry[key] = item
return entry
|
# -*- coding: utf-8 -*- #encoding of a python file
#PostfixExpression.py: wenlong
#Description: compute the value of the postfix [ (1+2)*3 => 1 2 + 3 * ]
#Notes: 1) the import way 2) in Stack class, pop method should have the return function
from datastructure.stack import Stack #the file is named stack too
def EvaluatePostfix(tokens):
stack = Stack()
for token in tokens:
if token == '' or token == ' ':
continue
if token == '+':
#import pdb; pdb.set_trace()
sum = stack.pop() + stack.pop()
stack.push(sum)
continue
if token == '*':
product = stack.pop() * stack.pop()
stack.push(product)
continue
else:
stack.push(int(token))
continue
return stack.pop()
def main():
input = "56 47 + 2*"
import re
tokens = re.split('([^0-9])', input)
result = EvaluatePostfix(tokens)
print result
if __name__ == "__main__":
main()
|
"""Dataclasses for storing and processing the samples."""
from __future__ import annotations
import itertools
from collections import defaultdict
from dataclasses import dataclass, field, replace
from typing import TYPE_CHECKING, Optional, cast
import numpy as np
from pulser.channels.base_channel import Channel
from pulser.channels.eom import BaseEOM
from pulser.register import QubitId
from pulser.sequence._basis_ref import _QubitRef
if TYPE_CHECKING:
from pulser.sequence._schedule import _EOMSettings, _TimeSlot
"""Literal constants for addressing."""
_GLOBAL = "Global"
_LOCAL = "Local"
_AMP = "amp"
_DET = "det"
_PHASE = "phase"
def _prepare_dict(N: int, in_xy: bool = False) -> dict:
"""Constructs empty dict of size N.
Usually N is the duration of seq.
"""
def new_qty_dict() -> dict:
return {
_AMP: np.zeros(N),
_DET: np.zeros(N),
_PHASE: np.zeros(N),
}
def new_qdict() -> dict:
return defaultdict(new_qty_dict)
if in_xy:
return {
_GLOBAL: {"XY": new_qty_dict()},
_LOCAL: {"XY": new_qdict()},
}
else:
return {
_GLOBAL: defaultdict(new_qty_dict),
_LOCAL: defaultdict(new_qdict),
}
def _default_to_regular(d: dict | defaultdict) -> dict:
"""Helper function to convert defaultdicts to regular dicts."""
if isinstance(d, dict):
d = {k: _default_to_regular(v) for k, v in d.items()}
return d
@dataclass
class _PulseTargetSlot:
"""Auxiliary class to store target information.
Recopy of the sequence._TimeSlot but without the unrelevant `type` field,
unrelevant at the sample level.
NOTE: While it store targets, targets themselves are insufficient to
conclude on the addressing of the samples. Additional info is needed:
compare against a known register or the original sequence information.
"""
ti: int
tf: int
targets: set[QubitId]
@dataclass
class _SlmMask:
"""Auxiliary class to store the SLM mask configuration."""
targets: set[QubitId] = field(default_factory=set)
end: int = 0
@dataclass
class ChannelSamples:
"""Gathers samples of a channel."""
amp: np.ndarray
det: np.ndarray
phase: np.ndarray
slots: list[_PulseTargetSlot] = field(default_factory=list)
eom_blocks: list[_EOMSettings] = field(default_factory=list)
eom_start_buffers: list[tuple[int, int]] = field(default_factory=list)
eom_end_buffers: list[tuple[int, int]] = field(default_factory=list)
target_time_slots: list[_TimeSlot] = field(default_factory=list)
def __post_init__(self) -> None:
assert len(self.amp) == len(self.det) == len(self.phase)
self.duration = len(self.amp)
for t in self.slots:
assert t.ti < t.tf # well ordered slots
for t1, t2 in zip(self.slots, self.slots[1:]):
assert t1.tf <= t2.ti # no overlaps on a given channel
@property
def initial_targets(self) -> set[QubitId]:
"""Returns the initial targets."""
return (
self.target_time_slots[0].targets
if self.target_time_slots
else set()
)
def extend_duration(self, new_duration: int) -> ChannelSamples:
"""Extends the duration of the samples.
Pads the amplitude and detuning samples with zeros and the phase with
its last value (or zero if empty).
Args:
new_duration: The new duration for the samples (in ns).
Must be greater than or equal to the current duration.
Returns:
The extended channel samples.
"""
extension = new_duration - self.duration
if extension < 0:
raise ValueError("Can't extend samples to a lower duration.")
new_amp = np.pad(self.amp, (0, extension))
# When in EOM mode, we need to keep the detuning at detuning_off
if self.eom_blocks and self.eom_blocks[-1].tf is None:
final_detuning = self.eom_blocks[-1].detuning_off
else:
final_detuning = 0.0
new_detuning = np.pad(
self.det,
(0, extension),
constant_values=(final_detuning,),
mode="constant",
)
new_phase = np.pad(
self.phase,
(0, extension),
mode="edge" if self.phase.size > 0 else "constant",
)
return replace(self, amp=new_amp, det=new_detuning, phase=new_phase)
def is_empty(self) -> bool:
"""Whether the channel is effectively empty.
The channel is considered empty if all amplitude and detuning
samples are zero.
"""
return np.count_nonzero(self.amp) + np.count_nonzero(self.det) == 0
def _generate_std_samples(self) -> ChannelSamples:
new_samples = {
key: getattr(self, key).copy() for key in ("amp", "det")
}
for block in self.eom_blocks:
region = slice(block.ti, block.tf)
new_samples["amp"][region] = 0
# For modulation purposes, the detuning on the standard
# samples is kept at 'detuning_off', which permits a smooth
# transition to/from the EOM modulated samples
new_samples["det"][region] = block.detuning_off
return replace(self, **new_samples)
def get_eom_mode_intervals(self) -> list[tuple[int, int]]:
"""Returns EOM mode intervals."""
return [
(
block.ti,
block.tf if block.tf is not None else self.duration,
)
for block in self.eom_blocks
]
def in_eom_mode(self, slot: _TimeSlot | _PulseTargetSlot) -> bool:
"""States if a time slot is inside an EOM mode block."""
return any(
start <= slot.ti < end
for start, end in self.get_eom_mode_intervals()
)
def modulate(
self, channel_obj: Channel, max_duration: Optional[int] = None
) -> ChannelSamples:
"""Modulates the samples for a given channel.
It assumes that the detuning and phase start at their initial values
and are kept at their final values.
Args:
channel_obj: The channel object for which to modulate the samples.
max_duration: The maximum duration of the modulation samples. If
defined, truncates them to have a duration less than or equal
to the given value.
Returns:
The modulated channel samples.
"""
def masked(samples: np.ndarray, mask: np.ndarray) -> np.ndarray:
new_samples = samples.copy()
# Extend the mask to fit the size of the samples
mask = np.pad(mask, (0, len(new_samples) - len(mask)), mode="edge")
new_samples[~mask] = 0
return new_samples
new_samples: dict[str, np.ndarray] = {}
eom_samples = {
key: getattr(self, key).copy() for key in ("amp", "det")
}
if self.eom_blocks:
std_samples = self._generate_std_samples()
# Note: self.duration already includes the fall time
eom_mask = np.zeros(self.duration, dtype=bool)
# Extension of the EOM mask outside of the EOM interval
eom_mask_ext = eom_mask.copy()
eom_fall_time = 2 * cast(BaseEOM, channel_obj.eom_config).rise_time
for block in self.eom_blocks:
# If block.tf is None, uses the full duration as the tf
end = block.tf or self.duration
eom_mask[block.ti : end] = True
# Extends EOM masks to include fall time
ext_end = end + eom_fall_time
eom_mask_ext[end:ext_end] = True
# We need 'eom_mask_ext' on its own, but we can already add it
# to the 'eom_mask'
eom_mask = eom_mask + eom_mask_ext
eom_buffers_mask = np.zeros_like(eom_mask, dtype=bool)
for start, end in itertools.chain(
self.eom_start_buffers, self.eom_end_buffers
):
eom_buffers_mask[start:end] = True
eom_buffers_mask = eom_buffers_mask & ~eom_mask_ext
buffer_ch_obj = replace(
channel_obj,
mod_bandwidth=channel_obj._eom_buffer_mod_bandwidth,
)
if block.tf is None:
# The sequence finishes in EOM mode, so 'end' was already
# including the fall time (unlike when it is disabled).
# For modulation, we make the detuning during the last
# fall time to be kept at 'detuning_off'
eom_samples["det"][-eom_fall_time:] = block.detuning_off
for key in ("amp", "det"):
# First, we modulated the pre-filtered standard samples, then
# we mask them to include only the parts outside the EOM mask
# This ensures smooth transitions between EOM and STD samples
key_samples = getattr(std_samples, key)
modulated_std = channel_obj.modulate(
key_samples, keep_ends=key == "det"
)
if key == "det":
std_mask = ~(eom_mask + eom_buffers_mask)
# Adjusted detuning modulation during EOM buffers
modulated_buffer = buffer_ch_obj.modulate(
key_samples, keep_ends=True
)
else:
std_mask = ~eom_mask
modulated_buffer = np.zeros_like(modulated_std)
std = masked(modulated_std, std_mask)
buffers = masked(
modulated_buffer[: len(std)], eom_buffers_mask
)
# At the end of an EOM block, the EOM(s) are switched back
# to the OFF configuration, so the detuning should go quickly
# back to `detuning_off`.
# However, the applied detuning and the lightshift are
# simultaneously being ramped to zero, so the fast ramp doesn't
# reach `detuning_off` but rather a modified detuning value
# (closer to zero). Then, the detuning goes slowly
# to zero (as dictacted by the standard modulation bandwidth).
# To mimick this effect, we substitute the detuning at the end
# of each block by the standard modulated detuning during the
# transition period, so the EOM modulation is superimposed on
# the standard modulation
if key == "det":
samples_ = eom_samples[key]
samples_[eom_mask_ext] = modulated_std[
: len(eom_mask_ext)
][eom_mask_ext]
# Starts out in EOM mode, so we prepend 'detuning_off'
# such that the modulation starts off from that value
# We then remove the extra value after modulation
if eom_mask[0]:
samples_ = np.insert(
samples_,
0,
self.eom_blocks[0].detuning_off,
)
# Finally, the modified EOM samples are modulated
modulated_eom = channel_obj.modulate(
samples_, eom=True, keep_ends=True
)[(1 if eom_mask[0] else 0) :]
else:
modulated_eom = channel_obj.modulate(
eom_samples[key], eom=True
)
# filtered to include only the parts inside the EOM mask
eom = masked(modulated_eom, eom_mask)
# 'std', 'eom' and 'buffers' are then summed, but before the
# short arrays are extended so that they are of the same length
sample_arrs = [std, eom, buffers]
sample_arrs.sort(key=len)
# Extend shortest arrays to match the longest before summing
new_samples[key] = sample_arrs[-1]
for arr in sample_arrs[:-1]:
arr = np.pad(
arr,
(0, sample_arrs[-1].size - arr.size),
)
new_samples[key] = new_samples[key] + arr
else:
new_samples["amp"] = channel_obj.modulate(self.amp)
new_samples["det"] = channel_obj.modulate(self.det, keep_ends=True)
new_samples["phase"] = channel_obj.modulate(self.phase, keep_ends=True)
for key in new_samples:
new_samples[key] = new_samples[key][slice(0, max_duration)]
return replace(self, **new_samples)
@dataclass
class SequenceSamples:
"""Gather samples for each channel in a sequence."""
channels: list[str]
samples_list: list[ChannelSamples]
_ch_objs: dict[str, Channel]
_basis_ref: dict[str, dict[QubitId, _QubitRef]] = field(
default_factory=dict
)
_slm_mask: _SlmMask = field(default_factory=_SlmMask)
_magnetic_field: np.ndarray | None = None
_measurement: str | None = None
@property
def channel_samples(self) -> dict[str, ChannelSamples]:
"""Mapping between the channel name and its samples."""
return dict(zip(self.channels, self.samples_list))
@property
def max_duration(self) -> int:
"""The maximum duration among the channel samples."""
return max(samples.duration for samples in self.samples_list)
@property
def used_bases(self) -> set[str]:
"""The bases with non-zero pulses."""
return {
ch_obj.basis
for ch_obj, ch_samples in zip(
self._ch_objs.values(), self.samples_list
)
if not ch_samples.is_empty()
}
@property
def _in_xy(self) -> bool:
"""Checks if the sequence is in XY mode."""
bases = {ch_obj.basis for ch_obj in self._ch_objs.values()}
in_xy = False
if "XY" in bases:
assert bases == {"XY"}
in_xy = True
return in_xy
def extend_duration(self, new_duration: int) -> SequenceSamples:
"""Extend the duration of each samples to a new duration."""
return replace(
self,
samples_list=[
sample.extend_duration(new_duration)
for sample in self.samples_list
],
)
def to_nested_dict(self, all_local: bool = False) -> dict:
"""Format in the nested dictionary form.
This is the format expected by `pulser_simulation.Simulation()`.
Args:
all_local: Forces all samples to be distributed by their
individual targets, even when applied by a global channel.
Returns:
A nested dictionary splitting the samples according to their
addressing ('Global' or 'Local'), the targeted basis
and, in the 'Local' case, the targeted qubit.
"""
d = _prepare_dict(self.max_duration, in_xy=self._in_xy)
for chname, samples in zip(self.channels, self.samples_list):
cs = (
samples.extend_duration(self.max_duration)
if samples.duration != self.max_duration
else samples
)
addr = self._ch_objs[chname].addressing
basis = self._ch_objs[chname].basis
if addr == _GLOBAL and not all_local:
start_t = self._slm_mask.end
d[_GLOBAL][basis][_AMP][start_t:] += cs.amp[start_t:]
d[_GLOBAL][basis][_DET][start_t:] += cs.det[start_t:]
d[_GLOBAL][basis][_PHASE][start_t:] += cs.phase[start_t:]
if start_t == 0:
# Prevents lines below from running unnecessarily
continue
unmasked_targets = cs.slots[0].targets - self._slm_mask.targets
for t in unmasked_targets:
d[_LOCAL][basis][t][_AMP][:start_t] += cs.amp[:start_t]
d[_LOCAL][basis][t][_DET][:start_t] += cs.det[:start_t]
d[_LOCAL][basis][t][_PHASE][:start_t] += cs.phase[:start_t]
else:
if not cs.slots:
# Fill the defaultdict entries to not return an empty dict
for t in cs.initial_targets:
d[_LOCAL][basis][t]
for s in cs.slots:
for t in s.targets:
ti = s.ti
if t in self._slm_mask.targets:
ti = max(ti, self._slm_mask.end)
times = slice(ti, s.tf)
d[_LOCAL][basis][t][_AMP][times] += cs.amp[times]
d[_LOCAL][basis][t][_DET][times] += cs.det[times]
d[_LOCAL][basis][t][_PHASE][times] += cs.phase[times]
return _default_to_regular(d)
def __repr__(self) -> str:
blocks = [
f"{chname}:\n{cs!r}"
for chname, cs in zip(self.channels, self.samples_list)
]
return "\n\n".join(blocks)
# This is just to preserve backwards compatibility after the renaming of
# _TargetSlot to _PulseTarget slot
_TargetSlot = _PulseTargetSlot
|
# A simple python script meant to show the potential damage a Rubber Ducky could do
# Frank Cerny
# 9/20/19
import pyfiglet
ascii_banner = pyfiglet.figlet_format("You have been H4cked")
print(ascii_banner)
# print('\nYou\nhave\nbeen\nh4cked\n')
|
# dcc constants
DCC_ID_ISIF_CSC = 0
DCC_ID_ISIF_BLACK_CLAMP = 1
DCC_ID_H3A_MUX = 2
DCC_ID_H3A_AEWB_CFG = 3
DCC_ID_IPIPE_DECMP = 4
DCC_ID_MESH_LDC = 5
DCC_ID_ISS_GLBCE = 6
DCC_ID_IPIPE_CFA = 9
DCC_ID_IPIPE_RGB_RGB_1 = 10
DCC_ID_NSF_CFG = 21
DCC_ID_AAA_ALG_AWB_TI3 = 40
|
# coding: utf-8
'''
Análise de Sentimentos em Textos
Apendisagem Máquina Supevisionado
Exemplo do curso de Mineração de Emoções em Textos
https://www.udemy.com/mineracao-de-emocao-em-textos-com-python-e-nltk/learn/v4/t/lecture/7317124?start=0
Data: 07/04/2018
'''
import nltk
#nltk.download()
'''
O DB contem:
Atributos Previsores | Classe
OBS.: o Texto já está pré-pocessádo (tudo minusculo, sem caracteres especiais, "vigulas, acentuação, outros.")
'''
base = [('eu sou admirada por muitos','alegria'),
('me sinto completamente amado','alegria'),
('amar e maravilhoso','alegria'),
('estou me sentindo muito animado novamente','alegria'),
('eu estou muito bem hoje','alegria'),
('que belo dia para dirigir um carro novo','alegria'),
('o dia está muito bonito','alegria'),
('estou contente com o resultado do teste que fiz no dia de ontem','alegria'),
('o amor e lindo','alegria'),
('nossa amizade e amor vai durar para sempre', 'alegria'),
('estou amedrontado', 'medo'),
('ele esta me ameacando a dias', 'medo'),
('isso me deixa apavorada', 'medo'),
('este lugar e apavorante', 'medo'),
('se perdermos outro jogo seremos eliminados e isso me deixa com pavor', 'medo'),
('tome cuidado com o lobisomem', 'medo'),
('se eles descobrirem estamos encrencados', 'medo'),
('estou tremendo de medo', 'medo'),
('eu tenho muito medo dele', 'medo'),
('estou com medo do resultado dos meus testes', 'medo')]
#print(base[1])
basetreinamento = [
('você e abominável','desgosto'),
('abomino a maneira como você age','desgosto'),
('estou adoentado','desgosto'),
('meu pai esta adoentado','desgosto'),
('estamos todos doentes','desgosto'),
('essa situação e muito amarga','desgosto'),
('disse adeus amargamente','desgosto'),
('tenho antipatia por aquela pessoa','desgosto'),
('como pode ser tão antipática!','desgosto'),
('que horrível seu asqueroso','desgosto'),
('tenho aversão agente como você','desgosto'),
('isso tudo e só chateação','desgosto'),
('estou muito chateada com suas mentiras','desgosto'),
('tão desagradável','desgosto'),
('isso me desagrada completamente','desgosto'),
('te desagrada isso','desgosto'),
('estou com enjôos terríveis','desgosto'),
('todos estão enfermos','desgosto'),
('foi uma enfermidade terrível','desgosto'),
('isso e muito grave','desgosto'),
('não seja tão grosseiro','desgosto'),
('você fez uma manobra ilegal','desgosto'),
('sua indecente, não tem vergonha?','desgosto'),
('você e malvado com as crianças','desgosto'),
('que comentário maldoso','desgosto'),
('sem escrúpulos você manipula a tudo','desgosto'),
('sinto repulsa por você','desgosto'),
('e repulsivo a maneira como olha para as pessoas','desgosto'),
('estou indisposta','desgosto'),
('a indisposição me atacou hoje','desgosto'),
('acho que vou vomitar','desgosto'),
('tem muito vomito lá','desgosto'),
('que incomodo essa dor','desgosto'),
('não me incomode nunca mais','desgosto'),
('suas bobagens estão nos incomodando','desgosto'),
('que nojo olha toda essa sujeira','desgosto'),
('como isso está sujo','desgosto'),
('tenho náuseas só de lembrar','desgosto'),
('me sinto nauseada com o cheiro desta comida','desgosto'),
('você esta obstruindo a passagem de ar','desgosto'),
('você esta terrivelmente doente','desgosto'),
('olhe que feia esta roupa','desgosto'),
('que atitude deplorável','desgosto'),
('nossa como você e feio','desgosto'),
('muito mau tudo isso','desgosto'),
('estou desgostoso com você','desgosto'),
('você cortou o meu assunto','desgosto'),
('para que tanta chateação?','desgosto'),
('esse perfume e enjoativo','desgosto'),
('ser perigoso não nada bom','desgosto'),
('você e perigoso demais para minha filhas','desgosto'),
('que fetido este esgoto','desgosto'),
('que fedido você esta','desgosto'),
('que cachorro malcheiroso','desgosto'),
('hora que ultraje','desgosto'),
('e ultrajante da sua parte','desgosto'),
('situação desagradável essa','desgosto'),
('você só me da desgosto','desgosto'),
('tenho aversão a pessoas assim','desgosto'),
('antipatia e um mal da sociedade','desgosto'),
('que criatura abominável','desgosto'),
('e depressiva a maneira como você vê o mundo','desgosto'),
('me desagrada sua presença na festa','desgosto'),
('sinto asco dessa coisa','desgosto'),
('que hediondo!','desgosto'),
('vou golfar o cafe fora','desgosto'),
('hora que garota detestável!','desgosto'),
('estou nauseada','desgosto'),
('isso que você disse foi muito grave','desgosto'),
('não seja obsceno na frente das crianças','desgosto'),
('não seja rude com as visitas','desgosto'),
('esse assunto me da repulsa','desgosto'),
('que criança terrivelmente travessa','desgosto'),
('que criança mal educada','desgosto'),
('estou indisposta te dar o divorcio','desgosto'),
('tão patetico, não tem nada mais rude para dizer?','desgosto'),
('por motivo torpe, com emprego de meio cruel e com impossibilidade de defesa para a vítima','desgosto'),
('a inveja e tão vil e vergonhosa que ninguem se atreve a confessá-la','desgosto'),
('o miserável receio de ser sentimental e o mais vil de todos os receios modernos','desgosto'),
('travesso gato quando fica com saudades do dono mija no sapato','desgosto'),
('isso e um ato detestável e covarde','desgosto'),
('revelam apenas o que e destrutivo e detestável para o povo','desgosto'),
('não sei como e a vida de um patife, mais a de um homem honesto e abominável','desgosto'),
('há coisas que temos que suportar para não acharmos a vida insuportável','desgosto'),
('as injurias do tempo e as injustiças do homem','desgosto'),
('odioso e desumano','desgosto'),
('você não publicará conteúdo odiento, pornográfico ou ameaçador','desgosto'),
('rancoroso e reprimido','desgosto'),
('não há animal mais degradante, estúpido, covarde, lamentável, egoísta, rancoroso e invejoso do que o homem','desgosto'),
('o virulento debate ente políticos','desgosto'),
('por favor não me abandone','tristeza'),
('não quero ficar sozinha','tristeza'),
('não me deixe sozinha','tristeza'),
('estou abatida','tristeza'),
('ele esta todo abatido','tristeza'),
('tão triste suas palavras','tristeza'),
('seu amor não e mais meu','tristeza'),
('estou aborrecida','tristeza'),
('isso vai me aborrecer','tristeza'),
('estou com muita aflição','tristeza'),
('me aflige o modo como fala','tristeza'),
('estou em agonia com meu intimo','tristeza'),
('não quero fazer nada','tristeza'),
('me sinto ansiosa e tensa','tristeza'),
('não consigo parar de chorar','tristeza'),
('não consigo segurar as lagrimas','tristeza'),
('e muita dor perder um ente querido','tristeza'),
('estou realmente arrependida','tristeza'),
('acho que o carma volta, pois agora sou eu quem sofro','tristeza'),
('você não cumpriu suas promessas','tristeza'),
('me sinto amargurada','tristeza'),
('coitado esta tão triste','tristeza'),
('já e tarde de mais','tristeza'),
('nosso amor acabou','tristeza'),
('essa noite machuca só para mim','tristeza'),
('eu não estou mais no seu coração','tristeza'),
('você mudou comigo','tristeza'),
('quando eu penso em você realmente dói','tristeza'),
('como se fosse nada você vê minhas lagrimas','tristeza'),
('você disse cruelmente que não se arrependeu','tristeza'),
('eu nunca mais vou te ver','tristeza'),
('ela esta com depressão','tristeza'),
('a depressão aflige as pessoas','tristeza'),
('estar depressivo e muito ruim','tristeza'),
('estou derrotada e deprimida depois deste dia','tristeza'),
('e comovente te ver dessa maneira','tristeza'),
('e comovente ver o que os filhos do brasil passam','tristeza'),
('como me sinto culpada','tristeza'),
('estou abatida','tristeza'),
('a ansiedade tomou conta de mim','tristeza'),
('as pessoas não gostam do meu jeito','tristeza'),
('adeus passamos bons momentos juntos','tristeza'),
('sinto sua falta','tristeza'),
('ele não gostou da minha comida','tristeza'),
('estou sem dinheiro para a comida','tristeza'),
('queria que fosse o ultimo dia da minha vida','tristeza'),
('você está com vergonha de mim','tristeza'),
('ela não aceitou a minha proposta','tristeza'),
('era o meu ultimo centavo','tristeza'),
('reprovei de ano na faculdade','tristeza'),
('afinal você só sabe me desfazer','tristeza'),
('eu falhei em tudo nessa vida','tristeza'),
('eu fui muito humilhado','tristeza'),
('e uma história muito triste','tristeza'),
('ninguem acredita em mim','tristeza'),
('eu não sirvo para nada mesmo','tristeza'),
('droga, não faço nada direito','tristeza'),
('sofrimento em dobro na minha vida','tristeza'),
('fui demitida essa semana','tristeza'),
('as crianças sofrem ainda mais que os adultos','tristeza'),
('pra mim um dia e ruim, o outro e pior','tristeza'),
('de repente perdi o apetite','tristeza'),
('oh que dia infeliz','tristeza'),
('estamos afundados em contas','tristeza'),
('nem um milagre pode nos salvar','tristeza'),
('só me resta a esperança','tristeza'),
('pior que isso não pode ficar','tristeza'),
('meu salário e baixo','tristeza'),
('não passei no vestibular','tristeza'),
('ninguem se importa comigo','tristeza'),
('ninguem lembrou do meu aniversário','tristeza'),
('tenho tanto azar','tristeza'),
('o gosto da vingança e amargo','tristeza'),
('sou uma mulher amargurada depois de que você me deixou','tristeza'),
('estou desanimada com a vida','tristeza'),
('e um desanimo só coitadinha','tristeza'),
('a derrota e depressiva','tristeza'),
('discriminar e desumano','tristeza'),
('que desanimo','tristeza'),
('e uma desonra para o pais','tristeza'),
('a preocupação deveria nos levar a ação não a depressão','tristeza'),
('passamos ao desalento e a loucura','tristeza'),
('aquele que nunca viu a tristeza nunca reconhecerá a alegria','tristeza'),
('cuidado com a tristeza ela e um vicio','tristeza'),
('eu imploro, não me matem!','medo'),
('tem certeza que não e perigoso?','medo'),
('não tenho certeza se e seguro','medo'),
('tenho que correr pra não me pegarem','medo'),
('socorro! ele queria roubar os meus doces!','medo'),
('esse cara está me perseguindo','medo'),
('não entro lá, e um lugar muito perigoso','medo'),
('este lugar continua assustador','medo'),
('na selva tem muitos animais perigosos','medo'),
('avancem com cautela','medo'),
('este lugar está silencioso de mais, cuidado!','medo'),
('por favor, deixe-me viver!','medo'),
('vou ficar sem mesada se tirar nota baixa','medo'),
('parece que tem olhos nos vigiando','medo'),
('eu temo que a sentença do juiz possa ser negativa','medo'),
('mas essa missão e arriscada','medo'),
('salvem-se quem puder!','medo'),
('meu plano pode ser descoberto','medo'),
('não tive culpa, juro não fui eu','medo'),
('tenho que tomar cuidado com o lobisomem','medo'),
('se eu não achar, ele vai descobrir a verdade','medo'),
('meu deus, ele desapareceu!','medo'),
('tomara que eles não me vejam daqui!','medo'),
('mantenha isso em segredo, se descobrirem estaremos ferrados','medo'),
('por favor, me soltem, eu sou inocente','medo'),
('estou ouvindo passos atrás de mim','medo'),
('eu vou pedir socorro!','medo'),
('cuidado com as curvas na estrada','medo'),
('não sei não, parece perigoso','medo'),
('estou tremendo de medo!','medo'),
('socorro, eu vou cair!','medo'),
('eu não vou ate a floresta negra, e muito perigoso','medo'),
('ouço passos na minha direção','medo'),
('acho que está arriscado de mais','medo'),
('vamos voltar, e muito perigoso','medo'),
('fuja, se não acabaremos mortos','medo'),
('receio por não me livrar desta situação','medo'),
('socorro! ele está armado!','medo'),
('ei cuidado, você vai bater no poste!','medo'),
('socorro, nós estamos afundando','medo'),
('e serio, cuidado com essa arma!','medo'),
('os tubarões estão atacando!','medo'),
('sinto arrepios quando fico sozinho no escuro','medo'),
('calma, eu não estou com o dinheiro','medo'),
('eu acho que estou sendo enganado','medo'),
('ligeiro, temos que fugir depressa','medo'),
('tem um crocodilo selvagem vindo para cá','medo'),
('se ficarmos quietos eles não vão nos achar','medo'),
('fuja! o tigre parece faminto','medo'),
('estou sem saída, preciso de um milagre','medo'),
('tire isso de mim! socorro!','medo'),
('não sei nadar, vou me afogar!','medo'),
('não tenho certeza se e seguro','medo'),
('vou apanhar se meus pais verem meu boletim','medo'),
('não consigo sair daqui!','medo'),
('se sair tão tarde, poderei ser assaltada','medo'),
('não me deixe por favor!','medo'),
('espere, não pode me largar aqui sozinho','medo'),
('temo pela sua segurança','medo'),
('eu te entrego o dinheiro, por favor não me mate!','medo'),
('ele vai levar todo o meu dinheiro','medo'),
('não dirija tão rápido assim','medo'),
('me descobriram, irão me prender!','medo'),
('só espero que não me façam nenhum mal','medo'),
('vou me afogar, me ajudem a sair da água','medo'),
('não estaremos a salvo aqui','medo'),
('não quero nem pensar no que pode acontecer','medo'),
('nessa cidade e uma desgraça atrás da outra','medo'),
('alguem esta me ligando, estou assustado','medo'),
('isso não e remedio, não me matem','medo'),
('eu não confio nele, tenho que ter cautela','medo'),
('muita cautela','medo'),
('vou ser descoberto, meu deus','medo'),
('receio que terei de ir','medo'),
('a noite e muito perigosa','medo'),
('estou estremecendo com essa casa','medo'),
('olha aquela criatura se movendo monstruosamente','medo'),
('não agüento este suspense','medo'),
('afugente os cães','medo'),
('estou chocado e amedrontado com este assassinato brutal','medo'),
('e preciso afugenta com ímpeto este medo do inferno','medo'),
('seu políticos usam suas forças para afugentar e amedrontar o povo','medo'),
('o objetivo disso e apenas me amedrontar mais','medo'),
('isso me apavora','medo')]
baseteste =[
('o mundo e feio como o pecado','desgosto'),
('a coisa mais difícil de esconder e aquilo que não existe','desgosto'),
('você errou feio aquele gol','desgosto'),
('nunca vou me casar sou muito feia','desgosto'),
('os golpes da adversidade são terrivelmente amargos','desgosto'),
('os homem ficam terrivelmente chatos','desgosto'),
('abominavelmente convencido','desgosto'),
('terrivelmente irritado','desgosto'),
('as instituições publicas estão terrivelmente decadentes','desgosto'),
('a população viveu em isolamento por muito tempo','desgosto'),
('estou terrivelmente preocupada','desgosto'),
('o nacionalismo e uma doença infantil','desgosto'),
('se me es antipático a minha negação esta pronta','desgosto'),
('muitos documentários sobre esse casal antipático','desgosto'),
('sua beleza não desfaça sua antipatia','desgosto'),
('esta e uma experiência desagradável','desgosto'),
('desagradável estrago nos banheiros','desgosto'),
('o mais irritante no amor e que se trata de um crime que precisa de um cúmplice','desgosto'),
('a situação nos causa grande incomodo','desgosto'),
('estou preocupado com o incomodo na garganta','desgosto'),
('simplesmente não quero amolação da policia','desgosto'),
('você e uma criaturinha muito impertinente','desgosto'),
('o peso e a dor da vida','desgosto'),
('me arrependo amargamente de minhas ações','desgosto'),
('o destino e cruel e os homens não são dignos de compaixão','desgosto'),
('o ódio conduz ao isolamento cruel e ao desespero','desgosto'),
('encerrou com o massacre mais repudiável e asqueroso que se conhece','desgosto'),
('de mal gosto e asqueroso','desgosto'),
('tudo e inserto neste mundo hediondo','desgosto'),
('o crime de corrupção e um crime hediondo','desgosto'),
('o rio esta fetido e de cor escura','desgosto'),
('muito lixo no rio o deixa malcheiroso','desgosto'),
('existe uma laranja podre no grupo e já desconfiamos quem e','desgosto'),
('foi de repente estou machucado e me sentindo enjoado','desgosto'),
('eu fiquei enojado','desgosto'),
('daqui alguns meses vou embora deste pais que já estou nauseado','desgosto'),
('isso tudo e um erro','tristeza'),
('eu sou errada eu sou errante','tristeza'),
('tenho muito dó do cachorro','tristeza'),
('e dolorida a perda de um filho','tristeza'),
('essa tragedia vai nos abalar para sempre','tristeza'),
('perdi meus filhos','tristeza'),
('perdi meu curso','tristeza'),
('sou só uma chorona','tristeza'),
('você e um chorão','tristeza'),
('se arrependimento matasse','tristeza'),
('me sinto deslocado em sala de aula','tristeza'),
('foi uma passagem fúnebre','tristeza'),
('nossa condolências e tristeza a sua perda','tristeza'),
('desanimo, raiva, solidão ou vazies, depressão','tristeza'),
('vivo te desanimando','tristeza'),
('estou desanimado','tristeza'),
('imperador sanguinário, depravado e temeroso','tristeza'),
('meu ser esta em agonia','tristeza'),
('este atrito entre nos tem que acabar','tristeza'),
('a escuridão desola meu ser','tristeza'),
('sua falsa preocupação','tristeza'),
('sua falsidade me entristece','tristeza'),
('quem esta descontente com os outros esta descontente consigo próprio','tristeza'),
('a torcida esta descontente com a demissão do tecnico','tristeza'),
('estou bastante aborrecido com o jornal','tristeza'),
('me sinto solitário e entediado','tristeza'),
('a vida e solitária para aqueles que não são falsos','tristeza'),
('como com compulsão depois da depressão','tristeza'),
('estou me desencorajando a viver','tristeza'),
('ele desencoraja minhas vontades','tristeza'),
('isso vai deprimindo por dentro','tristeza'),
('acho que isso e defeituoso','tristeza'),
('os remedios me derrubam na cama','tristeza'),
('a depressão vai me derrubar','tristeza'),
('suas desculpas são falsas','tristeza'),
('não magoe as pessoas','tristeza'),
('que abominável esse montro!','medo'),
('vamos alarmar a todos sobre a situação','medo'),
('estou amedrontada','medo'),
('estou com muito medo da noite','medo'),
('ele esta me ameaçando a dias','medo'),
('quanta angustia','medo'),
('estou angustiada','medo'),
('angustiadamente vou sair e casa','medo'),
('isso me deixa apavorada','medo'),
('você esta me apavorando','medo'),
('estou desconfiada de você','medo'),
('não confio em você','medo'),
('ate o cachorro está apavorado','medo'),
('estou assustado com as ações do meu colega','medo'),
('agora se sente humilhado, apavorado','medo'),
('assustou a população e provocou mortes','medo'),
('estou com dificuldades para respirar e muito assustado','medo'),
('os policiais se assustaram quando o carro capotou','medo'),
('o trabalhador e assombrado pelo temor do desemprego','medo'),
('este lugar e mal assombrado','medo'),
('estou assombrado pela crise financeira','medo'),
('mesmo aterrorizado lembro de você','medo'),
('aterrorizado e suando frio','medo'),
('um grupo de elefantes selvagens tem aterrorizado vilas','medo'),
('me sinto intimidada pela sua presença','medo'),
('tenho medo de ser advertida novamente','medo'),
('estou correndo o risco de ser advertido','medo'),
('estou correndo riscos de saúde','medo'),
('os riscos são reais','medo'),
('podemos perder muito dinheiro com essa investida','medo'),
('socorro, fui intimado a depor','medo'),
('fui notificado e estou com medo de perde a guarda da minha filha','medo'),
('estou angustiada com meus filhos na rua','medo'),
('e abominável o que fazem com os animais','medo'),
('foi terrível o tigre quase o matou','medo'),
('me advertiram sobre isso','medo')]
'''
São palavras consideradas que não tem neheum significado dentro de uma frase, ou seja,
deve-se remover essas palavras para não enfluenciar negativamento no algorítimo de aprendisagem.
'''
stopwords = ['a', 'agora', 'algum', 'alguma', 'aquele', 'aqueles', 'de', 'deu', 'do', 'e', 'estou', 'esta', 'esta',
'ir', 'meu', 'muito', 'mesmo', 'no', 'nossa', 'o', 'outro', 'para', 'que', 'sem', 'talvez', 'tem', 'tendo',
'tenha', 'teve', 'tive', 'todo', 'um', 'uma', 'umas', 'uns', 'vou']
stopwordsnltk = nltk.corpus.stopwords.words('portuguese')
stopwordsnltk.append('vou')
stopwordsnltk.append('tão')
#print(stopwordsnltk)
def remove_stopwords(texto):
frases = []
for (palavras, emocao) in texto:
semstop = [p for p in palavras.split() if p not in stopwordsnltk]
frases.append((semstop, emocao))
return frases
#print(removestopwords(base))
def aplicastemmer(texto):
stemmer = nltk.stem.RSLPStemmer()
frasessstemming = []
for (palavras, emocao) in texto:
comstemming = [str(stemmer.stem(p)) for p in palavras.split() if p not in stopwordsnltk]
frasessstemming.append((comstemming, emocao))
return frasessstemming
frasescomstemmingtreinamento = aplicastemmer(basetreinamento)
frasescomstemmingteste = aplicastemmer(baseteste)
#print(frasescomstemming)
def buscapalavras(frases):
todaspalavras = []
for (palavras, emocao) in frases:
todaspalavras.extend(palavras)
return todaspalavras
palavrastreinamento = buscapalavras(frasescomstemmingtreinamento)
palavrasteste = buscapalavras(frasescomstemmingteste)
#print(palavras)
def buscafrequencia(palavras):
palavras = nltk.FreqDist(palavras)
return palavras
frequenciatreinamento = buscafrequencia(palavrastreinamento)
frequenciateste = buscafrequencia(palavrasteste)
#print(frequencia.most_common(50))
def buscapalavrasunicas(frequencia):
freq = frequencia.keys()
return freq
palavrasunicastreinamento = buscapalavrasunicas(frequenciatreinamento)
palavrasunicasteste = buscapalavrasunicas(frequenciateste)
#print(palavrasunicastreinamento)
#print(palavrasunicas)
def extratorpalavras(documento):
doc = set(documento)
caracteristicas = {}
for palavras in palavrasunicastreinamento:
caracteristicas['%s' % palavras] = (palavras in doc)
return caracteristicas
caracteristicasfrase = extratorpalavras(['am', 'nov', 'dia'])
#print(caracteristicasfrase)
basecompletatreinamento = nltk.classify.apply_features(extratorpalavras, frasescomstemmingtreinamento)
basecompletateste = nltk.classify.apply_features(extratorpalavras, frasescomstemmingteste)
#print(basecompleta[15])
# constroi a tabela de probabilidade
classificador = nltk.NaiveBayesClassifier.train(basecompletatreinamento)
print(classificador.labels())
#print(classificador.show_most_informative_features(20))
print(nltk.classify.accuracy(classificador, basecompletateste))
erros = []
for (frase, classe) in basecompletateste:
#print(frase)
#print(classe)
resultado = classificador.classify(frase)
if resultado != classe:
erros.append((classe, resultado, frase))
#for (classe, resultado, frase) in erros:
# print(classe, resultado, frase)
from nltk.metrics import ConfusionMatrix
esperado = []
previsto = []
for (frase, classe) in basecompletateste:
resultado = classificador.classify(frase)
previsto.append(resultado)
esperado.append(classe)
#esperado = 'alegria alegria alegria alegria medo medo surpresa surpresa'.split()
#previsto = 'alegria alegria medo surpresa medo medo medo surpresa'.split()
matriz = ConfusionMatrix(esperado, previsto)
print(matriz)
# 1. Cenário
# 2. Número de classes - 16%
# 3. ZeroRules - 21,05%
teste = 'eu sinto amor por voce'
testestemming = []
stemmer = nltk.stem.RSLPStemmer()
for (palavrastreinamento) in teste.split():
comstem = [p for p in palavrastreinamento.split()]
testestemming.append(str(stemmer.stem(comstem[0])))
#print(testestemming)
novo = extratorpalavras(testestemming)
#print(novo)
#print(classificador.classify(novo))
distribuicao = classificador.prob_classify(novo)
#for classe in distribuicao.samples():
# print("%s: %f" % (classe, distribuicao.prob(classe)))
|
from sklearn.model_selection import train_test_split
from preprocess import proces_dataset_faces
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
path_faces = "data/preprocess_faces/"
X,Y = proces_dataset_faces(path_faces)
print(X.shape)
print(Y.shape)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print("X Shape %s " % str(X.shape))
print("Y shape %s " % str(Y.shape))
print("X train Shape %s " % str(X_train.shape))
print("Y shape %s " % str(Y.shape))
n_components_PCA = 50
pca = PCA(n_components=n_components_PCA)
print("Fitting PCA with %s" % str(n_components_PCA))
pca.fit(X_train)
print("PCA fitted! with X_Train ")
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
print("X_train and X_test pca-transformed" )
print("X_test shape %s " % str(X_test.shape))
print("X_train shape %s " % str(X_train.shape))
param_grid={ 'kernel': [ 'poly', 'rbf', 'linear', 'sigmoid' ],
'C' : [ 1.0e-3, 1.0e-2, 1.0e-1, 1.0, 1.0e+1,1.0e2,1.0e3,1.0e4 ],
'degree' : [ 1, 2, 3 ],
'gamma': [ 0.001,0.01,0.0001 ],
'coef0' : [ 0.0 ] }
print("Training SVM")
svr = GridSearchCV( SVC( max_iter=10000 ), param_grid )
#print( 'Best regressor for %d ' % i, svr.best_estimator_ )
svr.fit( X_train, Y_train )
y_predict = svr.predict( X_test )
print("Best estimator for : %s "% svr.best_estimator_)
print("Predicting with SVM")
print("-"*50)
print("SVM ")
print( "%d muestras mal clasificadas de %d" % ( (Y_test != y_predict).sum(), len(Y_test) ) )
print( "Accuracy = %.1f%%" % ( ( 100.0 * (Y_test == y_predict).sum() ) / len(Y_test) ) )
print("-"*50)
|
# Python Substrate Interface Library
#
# Copyright 2018-2020 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from scalecodec.base import ScaleBytes, RuntimeConfigurationObject
from scalecodec.type_registry import load_type_registry_file, load_type_registry_preset
from substrateinterface import SubstrateInterface, Keypair, KeypairType
from test import settings
class KusamaTypeRegistryTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.substrate = SubstrateInterface(
url=settings.KUSAMA_NODE_URL,
ss58_format=2,
type_registry_preset='kusama'
)
def test_type_registry_compatibility(self):
for scale_type in self.substrate.get_type_registry():
obj = self.substrate.runtime_config.get_decoder_class(scale_type)
self.assertIsNotNone(obj, '{} not supported'.format(scale_type))
class PolkadotTypeRegistryTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.substrate = SubstrateInterface(
url=settings.POLKADOT_NODE_URL,
ss58_format=0,
type_registry_preset='polkadot'
)
def test_type_registry_compatibility(self):
for scale_type in self.substrate.get_type_registry():
obj = self.substrate.runtime_config.get_decoder_class(scale_type)
self.assertIsNotNone(obj, '{} not supported'.format(scale_type))
class RococoTypeRegistryTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.substrate = SubstrateInterface(
url=settings.ROCOCO_NODE_URL,
ss58_format=42,
type_registry_preset='rococo'
)
def test_type_registry_compatibility(self):
for scale_type in self.substrate.get_type_registry():
obj = self.substrate.runtime_config.get_decoder_class(scale_type)
self.assertIsNotNone(obj, '{} not supported'.format(scale_type))
#
# class DevelopmentTypeRegistryTestCase(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.substrate = SubstrateInterface(
# url="ws://127.0.0.1:9944",
# ss58_format=42,
# type_registry_preset='development'
# )
#
# def test_type_registry_compatibility(self):
#
# for scale_type in self.substrate.get_type_registry():
#
# obj = self.substrate.runtime_config.get_decoder_class(scale_type)
#
# self.assertIsNotNone(obj, '{} not supported'.format(scale_type))
class ReloadTypeRegistryTestCase(unittest.TestCase):
def setUp(self) -> None:
self.substrate = SubstrateInterface(
url='dummy',
ss58_format=42,
type_registry_preset='test'
)
def test_initial_correct_type_local(self):
decoding_class = self.substrate.runtime_config.type_registry['types']['index']
self.assertEqual(self.substrate.runtime_config.get_decoder_class('u32'), decoding_class)
def test_reloading_use_remote_preset(self):
# Intentionally overwrite type in local preset
u32_cls = self.substrate.runtime_config.get_decoder_class('u32')
u64_cls = self.substrate.runtime_config.get_decoder_class('u64')
self.substrate.runtime_config.type_registry['types']['index'] = u64_cls
self.assertEqual(u64_cls, self.substrate.runtime_config.get_decoder_class('Index'))
# Reload type registry
self.substrate.reload_type_registry()
self.assertEqual(u32_cls, self.substrate.runtime_config.get_decoder_class('Index'))
def test_reloading_use_local_preset(self):
# Intentionally overwrite type in local preset
u32_cls = self.substrate.runtime_config.get_decoder_class('u32')
u64_cls = self.substrate.runtime_config.get_decoder_class('u64')
self.substrate.runtime_config.type_registry['types']['index'] = u64_cls
self.assertEqual(u64_cls, self.substrate.runtime_config.get_decoder_class('Index'))
# Reload type registry
self.substrate.reload_type_registry(use_remote_preset=False)
self.assertEqual(u32_cls, self.substrate.runtime_config.get_decoder_class('Index'))
class AutodiscoverV14RuntimeTestCase(unittest.TestCase):
runtime_config = None
metadata_obj = None
metadata_fixture_dict = None
@classmethod
def setUpClass(cls):
module_path = os.path.dirname(__file__)
cls.metadata_fixture_dict = load_type_registry_file(
os.path.join(module_path, 'fixtures', 'metadata_hex.json')
)
cls.runtime_config = RuntimeConfigurationObject(implements_scale_info=True)
cls.runtime_config.update_type_registry(load_type_registry_preset("core"))
cls.metadata_obj = cls.runtime_config.create_scale_object(
'MetadataVersioned', data=ScaleBytes(cls.metadata_fixture_dict['V14'])
)
cls.metadata_obj.decode()
def setUp(self) -> None:
class MockedSubstrateInterface(SubstrateInterface):
def rpc_request(self, method, params, result_handler=None):
if method == 'system_chain':
return {
'jsonrpc': '2.0',
'result': 'test',
'id': self.request_id
}
return super().rpc_request(method, params, result_handler)
self.substrate = MockedSubstrateInterface(
url=settings.KUSAMA_NODE_URL
)
def test_type_reg_preset_applied(self):
self.substrate.init_runtime()
self.assertIsNotNone(self.substrate.runtime_config.get_decoder_class('SpecificTestType'))
class AutodetectAddressTypeTestCase(unittest.TestCase):
def test_default_substrate_address(self):
substrate = SubstrateInterface(
url=settings.POLKADOT_NODE_URL, auto_discover=False
)
keypair_alice = Keypair.create_from_uri('//Alice', ss58_format=substrate.ss58_format)
call = substrate.compose_call(
call_module='Balances',
call_function='transfer',
call_params={
'dest': keypair_alice.ss58_address,
'value': 2000
}
)
extrinsic = substrate.create_signed_extrinsic(call, keypair_alice)
self.assertEqual(extrinsic.value['address'], f'0x{keypair_alice.public_key.hex()}')
def test_eth_address(self):
substrate = SubstrateInterface(
url=settings.MOONBEAM_NODE_URL, auto_discover=False
)
keypair_alice = Keypair.create_from_mnemonic(Keypair.generate_mnemonic(), crypto_type=KeypairType.ECDSA)
call = substrate.compose_call(
call_module='Balances',
call_function='transfer',
call_params={
'dest': keypair_alice.ss58_address,
'value': 2000
}
)
extrinsic = substrate.create_signed_extrinsic(call, keypair_alice)
self.assertEqual(extrinsic.value['address'], f'0x{keypair_alice.public_key.hex()}')
if __name__ == '__main__':
unittest.main()
|
import pymysql
user = 'admin'
password = 'Fxa@880202'
host = '47.112.213.237'
port = 3306
database = 'companynews'
def getAllNewsList():
db = pymysql.connect(host=host, port=port, user=user, password=password, database=database)
cursor = db.cursor()
sql = 'select * from news'
cursor.execute(sql)
allNews = cursor.fetchall()
allNewsList=[]
for i in range(len(allNews)):
news = {}
news['id'] = allNews[i][0]
news['news_title'] = allNews[i][1]
news['news_link'] = allNews[i][2]
news['news_company'] = allNews[i][3]
news['news_date'] = allNews[i][4]
news['news_time'] = allNews[i][5]
allNewsList.append(news)
cursor.close()
db.close()
return allNewsList
|
import collections
import json
from functools import lru_cache
import dash_bootstrap_components as dbc
def get_component_metadata(component_path):
metadata = _load_metadata()
return metadata.get(component_path)
@lru_cache(maxsize=1)
def _load_metadata():
return _get_metadata(dbc._METADATA_PATH)
def _get_metadata(metadata_path):
# Copied from Dash source:
# https://github.com/plotly/dash/blob/02a157a4e78c9faa1705e8a44544d72aaa6c7018/dash/development/component_loader.py#L14-L21
with open(metadata_path) as data_file:
json_string = data_file.read()
data = json.JSONDecoder(
object_pairs_hook=collections.OrderedDict
).decode(json_string)
return data
|
def dont_learn_this_way():
print(len("xxxxx"))
print(f"{len('jjj')}")
def do_learn_this_way():
assert 1 != 1
assert 1 + 1 == 2
assert '1' + '1' == '11'
assert 2 * 3 == 6
assert 2 * '3' == '33'
assert len('jjj') == 3
def record_my_learning():
dont_learn_this_way()
do_learn_this_way()
if __name__ == "__main__":
record_my_learning()
|
#!/usr/bin/python3
import random
print("Jeu du nombre secret ")
n= random.randint(0,99)
print("J'ai choisi un nombre secret entre 0 et 99")
e=1
p = int(input("Devines "));
while(p != n):
e+=1
print("Incorrect")
if (p > n):
print("C'est un nombre plus petit")
else:
print("C'est un nombre plus grand");
p = int(input("Devines "));
print("Correct, réussi en {} essai(s)".format(e)) |
import os, argparse
import sqlite3
import pandas as pd
from util import util_newssniffer_parsing as unp
import warnings
warnings.filterwarnings("ignore")
data_path = '../data/diffengine-diffs/db'
if not os.path.exists(data_path):
data_path = '../data'
output_path = "output"
conn_mapper_dict = {
'nyt': os.path.join(data_path, 'newssniffer-nytimes.db'),
'wp': os.path.join(data_path, 'newssniffer-washpo.db'),
'ap': os.path.join(data_path, 'ap.db'),
'guardian': os.path.join(data_path, 'newssniffer-guardian.db'),
'bbc-1': os.path.join(data_path, 'bbc.db'),
'bbc-2': os.path.join(data_path, 'newssniffer-bbc.db'),
'reuters': os.path.join(data_path, 'reuters.db'),
'cnn': os.path.join(data_path, 'cnn.db'),
'cbc': os.path.join(data_path, 'cbc.db'),
'fox': os.path.join(data_path, 'fox.db'),
'independent': os.path.join(data_path, 'newssniffer-independent.db'),
'dailymail': os.path.join(data_path, 'dailymail.db'),
'therebel': os.path.join(data_path, 'therebel.db'),
'torontostar': os.path.join(data_path, 'torontostar.db'),
'torontosun': os.path.join(data_path, 'torontosun.db'),
'calgaryherald': os.path.join(data_path, 'calgaryherald.db'),
'globemail': os.path.join(data_path, 'globemail.db'),
'canadaland': os.path.join(data_path, 'canadaland.db'),
'whitehouse': os.path.join(data_path, 'whitehouse.db'),
'lapresse': os.path.join(data_path, 'lapresse.db'),
'nationalpost': os.path.join(data_path, 'nationalpost.db'),
'telegraph': os.path.join(data_path, 'telegraph.db'),
}
s_client = None
def get_storage_client():
global s_client
pass
_ds_client = None
def get_ds_client(refresh=False):
global _ds_client
if refresh:
_ds_client = None
if _ds_client is None:
from google.cloud import datastore
try:
# running on google cloud
_ds_client = datastore.Client()
except:
# running locally
import os
if os.uname().sysname == 'Darwin':
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/Users/alex/.google-cloud/usc-research-data-access.json'
else:
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'C:/Users/alexa/google-cloud/usc-research-c087445cf499.json'
_ds_client = datastore.Client()
return _ds_client
def put_entity(ent_type, key_name, to_exclude, data):
from google.api_core.exceptions import ServiceUnavailable
num_tries = 5
ds_client = get_ds_client()
for idx in range(num_tries):
try:
key = ds_client.key(ent_type, key_name)
e = datastore.Entity(key=key, exclude_from_indexes=to_exclude)
e.update(data)
ds_client.put(e)
break
except ServiceUnavailable:
ds_client = get_ds_client(refresh=True)
print('service unavailable, retrying %s...' % idx)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--source_db_name", type=str, default=None)
parser.add_argument("--n_file", type=int, default=None)
parser.add_argument("--output_different_db", action="store_true")
parser.add_argument("--num_version_cutoff", type=int, default=40)
parser.add_argument('--add_to_datastore', action='store_true')
parser.add_argument('--add_to_sqlite', action='store_true')
parser.add_argument('--n_splits', type=int, default=1)
parser.add_argument('--split_num', type=int, default=0)
args = parser.parse_args()
if args.add_to_datastore == True:
from google.cloud import datastore
# download dataset
print('downloading dataset...')
if not os.path.exists(data_path):
os.makedirs(data_path)
db_file_path = conn_mapper_dict[args.source_db_name]
if not os.path.exists(db_file_path):
from google.cloud import storage
from google.cloud.storage import Blob
client = storage.Client()
bucket = client.get_bucket("usc-data")
db_name = os.path.basename(db_file_path)
blob = Blob(os.path.join("edit-pathways", db_name), bucket)
with open(db_file_path, "wb") as file_obj:
client.download_blob_to_file(blob, file_obj)
# get connection
conn = sqlite3.connect(db_file_path)
# see if table exists
sql_tables = pd.read_sql("""
SELECT
name
FROM
sqlite_master
WHERE
type ='table' AND
name NOT LIKE 'sqlite_%';
""", con=conn)
## don't duplicate work
print('fetching duplicates...')
to_add = ""
if ('sentence_stats' in sql_tables.name.values):
to_add = " AND entry_id not in (SELECT DISTINCT a_id from sentence_stats)"
if args.add_to_datastore:
ds_client = get_ds_client()
q = ds_client.query(kind='edit-paths-sentence-stats').add_filter('source', '=', args.source_db_name)
r = list(q.fetch())
a_ids = list(set(map(lambda x: str(x['a_id']), r)))
to_add = " AND entry_id not in (%s)" % ', '.join(a_ids)
## don't select empty documents
to_add += " AND summary != ''"
# read data
print('reading data...')
select_sql = "SELECT * from entryversion"
# put a limit on it
if args.n_file is not None:
limit_line = """
WHERE entry_id IN (
SELECT DISTINCT entry_id from entryversion
WHERE num_versions < %s
and num_versions > 1
%s
ORDER BY RANDOM()
LIMIT %s
)""" % (args.num_version_cutoff, to_add, args.n_file)
else:
limit_line = " WHERE num_versions < %s AND num_versions > 1 %s" % (args.num_version_cutoff, to_add)
if args.n_splits != 1:
import copy
version_lim = copy.copy(limit_line)
n_items = pd.read_sql("""select count(DISTINCT entry_id) from entryversion %s""" % version_lim, con=conn).iloc[0][0]
n_per_split = int(n_items / args.n_splits)
limit_line += " AND entry_id in (SELECT DISTINCT entry_id from entryversion %s ORDER BY entry_id LIMIT %s OFFSET %s)" % (
version_lim, n_per_split, args.split_num * n_per_split
)
#
select_sql += '\n' + limit_line
sample_diffs = pd.read_sql(select_sql, con=conn)
# last-minute cleanup
sample_diffs = (sample_diffs
.loc[lambda df: df['summary'].str.strip() != ''] # make sure even the stripped text isn't null.
.loc[lambda df: df['entry_id'].isin(df['entry_id'].value_counts().loc[lambda s: s > 1].index)] # make sure only one version.
)
##
# get sentence stats df
data_processor_iter = unp.get_sentence_diff_stats(
sample_diffs,
get_sentence_vars=True,
output_type='iter'
)
if args.output_different_db == True:
dir_path, f_name = os.path.dirname(db_file_path), os.path.basename(db_file_path)
conn = sqlite3.connect('%s/outputs-%s' % (dir_path, f_name))
for sentence_stats_df, words_stats_df in data_processor_iter:
### handle error case
if (sentence_stats_df is None) and ('error' in words_stats_df.get('status')):
key_name ='%s-%s-%s-%s' % (args.source_db_name, words_stats_df['a_id'], words_stats_df['version_old'], words_stats_df['version_new'])
put_entity('edit-paths-sentence-stats', key_name, [], words_stats_df)
continue
# sentence stats
output_sentence_stats_df = (sentence_stats_df
.assign(version_old=lambda df: df['version_nums'].str.get(0))
.assign(version_new=lambda df: df['version_nums'].str.get(1))
.drop(['version_nums', 'vars_old', 'vars_new'], axis=1)
)
if args.add_to_sqlite == True:
output_sentence_stats_df.to_sql('sentence_stats', con=conn, index=False, if_exists='append')
if args.add_to_datastore:
ds_client = get_ds_client()
output_sentence_stats_df.loc[:, 'source'] = args.source_db_name
#
for output_dict in output_sentence_stats_df.to_dict(orient='records'):
key_name = '%s-%s-%s-%s' % (args.source_db_name, output_dict['a_id'], output_dict['version_old'], output_dict['version_new'])
to_exclude = ['num_added_sents', 'len_new_doc', 'num_removed_sents', 'len_old_doc', 'num_changed_sents']
put_entity('edit-paths-sentence-stats', key_name, to_exclude, output_dict)
# sentences
for vers, a_id, v_old, v_new in sentence_stats_df[['version_nums', 'a_id', 'vars_old', 'vars_new']].itertuples(index=False):
comb_sent_df = pd.concat([
(pd.DataFrame(v_old)
.assign(version_old=vers[0])
.assign(a_id=a_id)
.assign(s_idx=lambda df: df.reset_index()['index'])
.rename(columns={'text': 'sent_old', 'tag': 'tag_old'})
), (
pd.DataFrame(v_new)
.assign(version_new=vers[1])
.rename(columns={'text': 'sent_new', 'tag': 'tag_new'})
)
], axis=1)
##
output_comb_sent_df = comb_sent_df[['a_id', 's_idx', 'sent_old', 'sent_new', 'tag_old', 'tag_new', 'version_old', 'version_new']]
if args.add_to_sqlite == True:
output_comb_sent_df.to_sql('sentence_diffs', con=conn, if_exists='append', index=False)
if args.add_to_datastore:
output_comb_sent_df.loc[:, 'source'] = args.source_db_name
#
for output_dict in output_comb_sent_df.to_dict(orient='records'):
key_name = '%s-%s-%s-%s-%s' % (args.source_db_name, output_dict['a_id'], output_dict['version_old'], output_dict['version_new'], output_dict['s_idx'])
to_exclude = ['sent_old', 'sent_new', 'tag_old', 'tag_new']
put_entity('edit-paths-sentence-diffs', key_name, to_exclude, output_dict)
if words_stats_df is not None:
# word stats
output_word_stats_df = (words_stats_df
.assign(version_old=lambda df: df['version_nums'].str.get(0))
.assign(version_new=lambda df: df['version_nums'].str.get(1))
.drop(['version_nums', 's_old', 's_new'], axis=1)
)
if args.add_to_sqlite == True:
output_word_stats_df.to_sql('word_stats', con=conn, if_exists='append')
if args.add_to_datastore:
output_word_stats_df.loc[:, 'source'] = args.source_db_name
#
for output_dict in output_word_stats_df.to_dict(orient='records'):
key_name = '%s-%s-%s-%s-%s' % (args.source_db_name, output_dict['a_id'], output_dict['version_old'], output_dict['version_new'], output_dict['s_idx'])
to_exclude = ['num_removed_words', 'num_added_words', 'len_old_sent', 'len_new_sent']
put_entity('edit-paths-word-stats', key_name, to_exclude, output_dict)
# words
for vers, a_id, s_idx, v_old, v_new in (
words_stats_df
[['version_nums', 'a_id', 's_idx', 's_old', 's_new']]
.itertuples(index=False)
):
comb_word_df = pd.concat([
(pd.DataFrame(v_old)
.assign(version_old=vers[0])
.assign(a_id=a_id)
.assign(s_idx=s_idx)
.assign(word_idx=lambda df: df.reset_index()['index'])
.rename(columns={'text': 'word_old', 'tag': 'tag_old'})
),
(pd.DataFrame(v_new)
.assign(version_new=vers[1])
.rename(columns={'text': 'word_new', 'tag': 'tag_new'})
)
], axis=1)
output_comb_word_df = comb_word_df[['a_id', 's_idx', 'word_idx', 'word_old', 'word_new', 'tag_old', 'tag_new', 'version_old', 'version_new']]
if args.add_to_sqlite == True:
output_comb_word_df.to_sql('word_diffs', con=conn, if_exists='append', index=False)
if args.add_to_datastore:
output_comb_word_df.loc[:, 'source'] = args.source_db_name
for output_dict in output_comb_word_df.to_dict(orient='records'):
key_name = '%s-%s-%s-%s-%s-%s' % (args.source_db_name, output_dict['a_id'], output_dict['version_old'], output_dict['version_new'], output_dict['s_idx'], output_dict['word_idx'])
to_exclude = ['word_old', 'word_new', 'tag_old', 'tag_new']
put_entity('edit-paths-word-diffs', key_name, to_exclude, output_dict)
|
#!/usr/bin/env python3
import argparse
import collections
import itertools
import json
import logging
import os
import pickle
import warnings
import coloredlogs
import numpy as np
import pandas as pd
import sklearn.metrics.pairwise
from ismir2019_cifka.eval.style_profile import time_pitch_diff_hist
from ismir2019_cifka.eval.notes_chroma_similarity import chroma_similarity
CHROMA_SIMILARITY_PARAMS = dict(sampling_rate=12, window_size=24, stride=12, use_velocity=False)
CHROMA_SIMILARITY_PARAMS_2 = dict(sampling_rate=12, window_size=48, stride=48, use_velocity=False)
STYLE_SIMILARITY_PARAMS = dict(max_time=4, bins_per_beat=6)
def main():
coloredlogs.install(level='DEBUG', logger=logging.getLogger(), isatty=True)
parser = argparse.ArgumentParser()
parser.add_argument('--models-dir', type=str, metavar='DIR', default='.',
help='the directory containing all the model directories')
parser.add_argument('--style-list', type=str, metavar='FILE', required=True,
help='a file with the list of all the styles')
parser.add_argument('--style-profile-dir', type=str, metavar='DIR', required=True,
help='the style profile directory; expected to contain a file named '
'STYLE.json for each style')
parser.add_argument('--models', type=str, nargs='+', metavar='DIR', required=True,
help='the model names (directories in models-dir)')
parser.add_argument('--data-prefix', type=str, required=True, metavar='PREFIX',
help='the prefix for the pickle filename containing the model outputs, '
'relative to the model directory; will be suffixed with STYLE.pickle')
parser.add_argument('--instrument', type=str,
help='the instrument, e.g. Bass, Piano')
parser.add_argument('--source', type=str, required=True, metavar='FILE',
help='the file containing the source track segments (for computing the '
'chroma similarities)')
parser.add_argument('--reference-dir', type=str, metavar='DIR',
help='the root of reference files; expected to contain files named '
'STYLE/INSTRUMENT.pickle')
parser.add_argument('--styles', type=str, nargs='+', metavar='STYLE',
help='names of styles to evaluate on')
parser.add_argument('--output', type=str, required=True, metavar='FILE',
help='a pickle file to contain the results')
args = parser.parse_args()
if args.styles:
styles = args.styles
else:
with open(args.style_list) as f:
styles = [line.rstrip('\n') for line in f]
style_profiles = {}
for style in styles:
with open(os.path.join(args.style_profile_dir, style + '.json'), 'rb') as f:
style_profiles[style] = {k: np.array(v).reshape(-1) for k, v in json.load(f).items()}
with open(args.source, 'rb') as f:
sources = pickle.load(f)
style_eval = StyleProfileEvaluator(style_profiles, **STYLE_SIMILARITY_PARAMS)
chroma_eval = ChromaEvaluator(sources, **CHROMA_SIMILARITY_PARAMS)
chroma_eval2 = ChromaEvaluator(sources, **CHROMA_SIMILARITY_PARAMS_2)
for style in styles:
style_eval.evaluate('source', sources, style)
chroma_eval.evaluate('source', sources, None)
chroma_eval2.evaluate('source', sources, None)
if args.reference_dir:
if not args.instrument:
raise ValueError('Instrument not specified.')
sources_no_style = [((name.rsplit('.', maxsplit=1)[0], start, end), notes)
for (name, start, end), notes in sources]
keys, values = (list(x) for x in zip(*sources_no_style))
np.random.seed(1234)
np.random.shuffle(values)
sources_no_style_shuf = list(zip(keys, values))
for style in styles:
with open(os.path.join(args.reference_dir, style, f'{args.instrument}.pickle'), 'rb') as f:
references = pickle.load(f)
style_eval.evaluate('reference', references, style)
chroma_eval.evaluate('reference', references, style, references=sources_no_style)
chroma_eval2.evaluate('reference', references, style, references=sources_no_style)
chroma_eval.evaluate('random', references, style, references=sources_no_style_shuf)
chroma_eval2.evaluate('random', references, style, references=sources_no_style_shuf)
for model_name in args.models:
model_path = os.path.join(args.models_dir, model_name)
for style in styles:
with open(os.path.join(model_path, args.data_prefix + style + '.pickle'), 'rb') as f:
data = pickle.load(f)
style_eval.evaluate(model_name, data, style)
chroma_eval.evaluate(model_name, data, style)
chroma_eval2.evaluate(model_name, data, style)
style_results, song_style_results = style_eval.get_results(average=False)
chroma_results = chroma_eval.get_results(average=False)
chroma2_results = chroma_eval2.get_results(average=False)
with open(args.output, 'wb') as f:
pickle.dump((style_results, song_style_results, chroma_results, chroma2_results), f)
class ChromaEvaluator:
def __init__(self, references, **kwargs):
self._references = _group_by_segment_id(references)
self._kwargs = kwargs
self._similarities = []
if any(len(x) > 1 for x in self._references.values()):
warnings.warn('Repeated keys in reference', RuntimeWarning)
def evaluate(self, name, data, style, references=None):
if len(dict(data)) != len(data):
raise RuntimeError('Repeated keys in data')
references = _group_by_segment_id(references) if references else self._references
missing_keys = []
for segment_id, notes in data:
try:
ref_segments = references[segment_id]
except KeyError:
missing_keys.append(segment_id)
continue
for ref_notes in ref_segments:
similarity = chroma_similarity(ref_notes, notes, **self._kwargs)
self._similarities.append({'name': name, 'chroma_sim': similarity,
'song_name': segment_id[0], 'style': style})
if missing_keys:
warnings.warn(f'{name}: {len(missing_keys)} missing keys', RuntimeWarning)
def get_results(self, average=True):
similarities = pd.DataFrame(self._similarities)
if not average:
return similarities
return similarities.groupby('name').mean()
class StyleProfileEvaluator:
def __init__(self, style_profiles, max_time, bins_per_beat):
self._profiles = style_profiles
self._max_time = max_time
self._bins_per_beat = bins_per_beat
self._similarities = []
self._per_song_similarities = []
def evaluate(self, name, data, style):
ref_profile = self.get_profile(style)
# Compute overall similarity
profile = self.compute_profile([notes for _, notes in data])
[[similarity]] = sklearn.metrics.pairwise.cosine_similarity([profile], [ref_profile])
self._similarities.append({'name': name, 'style_sim': similarity, 'style': style})
# Compute similarity for each song
for song, segments in itertools.groupby(data, lambda x: x[0][0]):
profile = self.compute_profile([notes for _, notes in segments])
[[similarity]] = sklearn.metrics.pairwise.cosine_similarity([profile], [ref_profile])
self._per_song_similarities.append(
{'name': name, 'song_style_sim': similarity, 'style': style, 'song_name': song})
def get_results(self, average=True):
similarities = pd.DataFrame(self._similarities)
per_song_similarities = pd.DataFrame(self._per_song_similarities)
if not average:
return similarities, per_song_similarities
return pd.concat([
similarities.groupby('name').mean(),
per_song_similarities.groupby('name').mean(),
per_song_similarities.groupby('name').std().rename(
columns={'song_style_sim': 'song_style_sim_err'})
], axis=1)
def get_profile(self, style):
return self._profiles[style][f'time_pitch_diff_hist_t{self._max_time}_f{self._bins_per_beat}']
def compute_profile(self, data):
return np.nan_to_num(
time_pitch_diff_hist(data, max_time=self._max_time, bin_size=1/self._bins_per_beat,
normed=True).reshape(-1))
def _group_by_segment_id(data):
return {
segment_id: [notes for _, notes in segments]
for segment_id, segments in itertools.groupby(data, lambda x: x[0])
}
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 13 14:35:35 2021
@author: hirsh
"""
import pandas as pd
import openpyxl
import datetime as dt
import requests
from bs4 import BeautifulSoup as bs
# need to get turnovers from each player's page
# and iterate through table pages to get player url's and most recent year
links = []
players = []
yrs = []
for pg in range(0, 136):
print("Page " + str(pg+1) + "/" + "136")
url = "https://theaudl.com/league/players?page=" + str(pg)
response = requests.get(url)
soup = bs(response.text, "html.parser")
table = soup.find("tbody")
for tr in table.findAll("tr"):
last_yr = tr.find("td", class_ = "views-field views-field-field-team-display-name").text.strip()[-4:]
yrs.append(last_yr)
player = tr.find("td", class_ = "views-field views-field-field-player-display-name").text.strip()
players.append(player)
trs = tr.findAll("td", class_ = "views-field views-field-field-player-display-name")
for each in trs:
try:
link = each.find("a")["href"]
link = "https://theaudl.com" + link
links.append(link)
except:
pass
# only use current players
df = pd.DataFrame (links, columns=['url'])
df['player'] = names
df['player'] = df.player.str.lower()
df['yr'] = yrs
df = df[df.yr == "2021"]
# get a list of rostered players
file = r'C:\\Users\hirsh\OneDrive\Desktop\Data Science Stuff\random\AK UF\Book2.xlsx'
teams = pd.read_excel(file, sheet_name="Sheet2")
teams = teams.iloc[:, 1:15]
teams_long = teams.melt(id_vars=["Email Address", "What is your name?"],
var_name="Slot",
value_name="player")
teams_long["player"] = teams_long.player.str.lower()
rostered = teams_long.player.to_list()
rostered = list(set(rostered))
# only use rostered players
df_rostered = df[df['player'].isin(rostered)]
links = df_rostered.url.to_list()
players = df_rostered.player.to_list()
# get turnovers from each player's page
stats = pd.DataFrame()
for i in links:
print("Geting player stats..." + str(links.index(i)+1) + "/" + str(len(links)))
# get player name
response = requests.get(i)
soup = bs(response.text, "html.parser")
name = soup.find("div", class_ = "audl-player-display-name").text.lower()
# get player stats
try:
df = pd.read_html(i)[0]
df["player"] = name
df = df[df.YR == "2021"]
stats = stats.append(df)
except Exception:
pass
stats["TO"] = stats["T"] + stats["D"]
stats = stats.drop_duplicates()
# calculate fantasy points
stats["fpts"] = ((stats["GLS"] * 4)
+ (stats["AST"] * 4)
+ (stats["BLK"] * 5)
+ (stats["TO"] * -1)
+ (stats["Cmp"] * 0.2)
+ (stats["RY"] * 0.02)
+ (stats["TY"] * 0.02))
players_final = stats.loc[:,["player", "GLS", "AST", "BLK", "TO", "Cmp", "RY", "TY", "fpts"]]
players_final["player"] = stats.player.str.lower()
# calculate team fantasy points using top 10 rostered players
teams_final = pd.merge(teams_long, players_final, how="left", on="player")
teams_final.columns = ['Email', 'Name', 'Slot', 'Player', 'Goals', "Assists", 'Blocks', 'Turnovers', 'Completions', 'RecYds', 'ThrYds', 'fpts']
teams_final = teams_final.sort_values(by=["Name", "fpts"], ascending=[True, False])
standings = (teams_final
.sort_values(by=["Email", "fpts"], ascending=[True, False])
.groupby(["Email", "Name"])
.head(10))
standings = (standings
.groupby(["Email", "Name"])
.sum()
.sort_values("fpts", ascending=False))
#teams_final.to_csv(r'C:\\Users\hirsh\OneDrive\Desktop\Data Science Stuff\random\AK UF\players.csv', index=False)
standings.to_csv(r'C:\\Users\hirsh\OneDrive\Desktop\Data Science Stuff\random\AK UF\standings.csv')
|
#!/usr/bin/python3
# classes.py by Bill Weinman [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
class Duck:
def quack(self):
print('Quaaack!')
def walk(self):
print('Walks like a duck.')
def bark(self):
print('The duck can''t bark')
def fur(self):
print('The duck has feathers')
class Dog():
def quack(self):
print('The dog can''t Quack')
def walk(self):
print('Walks like a Dog')
def bark(self):
print('Woof!')
def fur(self):
print('The dog has brown and white fur!')
#Any object of any class the has the interface that is expected
#in that function, it can use the function
def in_the_forest(Dog):
Dog.bark()
Dog.fur()
def in_the_pond(duck):
duck.quack()
duck.walk()
def main():
donald = Duck()
fido = Dog()
for o in (donald,fido):
o.quack()
o.walk()
o.bark()
o.fur()
#Actually really complicated to explain
# If the object has the methods then it could run the functions as that object
#POLYMORPHISM and DUCK TYPING because of loose typing in python
#(If it acts like a duck and looks like a duck, it's a duck)
in_the_forest(donald)
in_the_pond(fido)
if __name__ == "__main__": main()
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
#altered version of Tetris game using PyQt5 http://zetcode.com/gui/pyqt5/tetris/
#Key is class Shape.coordsTable ,by which we could know every shape's whole inforamtion
'''
The bottom line is we could calculate a specific shape's every square's cordinates (x,y,means the x'th row and y'th column)
from a center square's coordinates and it's coordsTable which contains other squares' relative coordinates
For example:
shape:SShape
center square: (25,26)
other squares' coordsTable:((0, 0), (-1, 0), (-1, 1), (0, -1))
for each squares' coords:(newx,newy) = (25+x,25+y)
all squares' coordinates:(25,26),(24,26),(24,25),(25,25)
coordsTable = (
((0, 0), (0, 0), (0, 0), (0, 0)),
((0, 0), (0, 1), (-1, 0), (-1, -1)),
((0, 0), (-1, 0), (-1, 1), (0, -1)),
((0, 0), (0, 1), (0, 2), (0, -1)),
((0, 0), (-1, 0), (1, 0), (0, -1)),
((0, 0), (0, -1), (1, 0), (1, -1)),
((0, 0), (0, 1), (-1, 1), (0, -1)),
((0, 0), (0, 1), (1, 1), (0, -1))
)
Old version's coordsTable:
(newx,newy) = (25+x,25-y)
coordsTable = (
((0, 0), (0, 0), (0, 0), (0, 0)),
((0, -1), (0, 0), (-1, 0), (-1, 1)),
((0, -1), (0, 0), (1, 0), (1, 1)),
((0, -1), (0, 0), (0, 1), (0, 2)),
((-1, 0), (0, 0), (1, 0), (0, 1)),
((0, 0), (1, 0), (0, 1), (1, 1)),
((-1, -1), (0, -1), (0, 0), (0, 1)),
((1, -1), (0, -1), (0, 0), (0, 1))
)
'''
from PyQt5.QtWidgets import QMainWindow, QFrame, QDesktopWidget, QApplication, QAction, qApp, QHBoxLayout
from PyQt5.QtWidgets import QLCDNumber, QVBoxLayout, QPushButton, QGridLayout, QWidget, QLabel, QMessageBox
from PyQt5.QtCore import Qt, QBasicTimer, pyqtSignal, QDateTime
from PyQt5.QtGui import QPainter, QColor, QIcon
import sys, random
class Main(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
#initiates application UI#
self.TetrisWidget = TetrisWidget(self)
self.setCentralWidget(self.TetrisWidget)
self.statusbar = self.statusBar()
#change the statusbar following the signal of Tetris_board's msg2Statusbar
self.TetrisWidget.tTritrisBoard.msg2Statusbar[str].connect(self.statusbar.showMessage)
self.setWindowIcon(QIcon(r'C:\Users\zhong\Desktop\icon.jpg'))
# setting menu bar
menubar = self.menuBar()
OptionMenu = menubar.addMenu('&Option')
startAction = QAction(QIcon(r'C:\Users\zhong\Desktop\start.png'), '&Start', self)
startAction.setStatusTip('Start the game !')
startAction.setShortcut('Ctrl+O')
startAction.triggered.connect(self.TetrisWidget.start)
OptionMenu.addAction(startAction)
StopAction = QAction(QIcon(r'C:\Users\zhong\Desktop\stop.png'), '&Stop', self)
StopAction.setStatusTip('Stop the game !')
StopAction.setShortcut('Ctrl+P')
StopAction.triggered.connect(self.TetrisWidget.stop)
OptionMenu.addAction(StopAction)
RestartAction = QAction(QIcon(r'C:\Users\zhong\Desktop\restart.png'), '&Restart', self)
RestartAction.setStatusTip('Restart the game !')
RestartAction.setShortcut('Ctrl+R')
RestartAction.triggered.connect(self.TetrisWidget.restart)
OptionMenu.addAction(RestartAction)
SpeedAction = QAction(QIcon(r'C:\Users\zhong\Desktop\speed.png'), '&Speed', self)
SpeedAction.setStatusTip('Speed the game !')
SpeedAction.setShortcut('Ctrl+S')
SpeedAction.triggered.connect(self.TetrisWidget.speed)
OptionMenu.addAction(SpeedAction)
self.toolbar = self.addToolBar('Exit')
exitAction = QAction(QIcon(r'C:\Users\zhong\Desktop\stop.png'), '&Exit', self)
exitAction.setShortcut('Esc')
exitAction.triggered.connect(qApp.quit)
self.toolbar.addAction(exitAction)
self.resize(600, 500)
self.center()
self.setWindowTitle('Tetris')
self.show()
def closeEvent(self, event):
#creating question box when user try to exit the game
reply = QMessageBox.question(self, 'Message',
'Are you sure to escape?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def center(self):
#centers the window on the screen#
screen = QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width() - size.width()) / 2,
(screen.height() - size.height()) / 2)
class TritrisBoard(QFrame):
next=pyqtSignal(int)
status = pyqtSignal(str)
msg2Statusbar = pyqtSignal(str)
score = pyqtSignal(int)
TritrisBoardWidth = 16
TritrisBoardHeight = 22
Speed = 300
FastSpeed = 200
def __init__(self, parent):
super().__init__(parent)
self.initTritrisBoard()
def initTritrisBoard(self):
#initiates TritrisBoard#
self.created = False
self.TritrisBoard = []
self.clearTritrisBoard()
self.timer = QBasicTimer()
self.isWaitingAfterLine = False
self.curX = 0
self.curY = 0
self.numLinesRemoved = 0
self.setFocusPolicy(Qt.StrongFocus)
self.isStarted = False
self.isPaused = False
def clearTritrisBoard(self):
#clears shapes from the TritrisBoard#
self.TritrisBoard = []
for _ in range((TritrisBoard.TritrisBoardHeight + 2) * TritrisBoard.TritrisBoardWidth):
self.TritrisBoard.append(Shape.NoShape)
def start(self):
#starts game#
if self.isPaused:
return
self.nextPiece = Shape()
self.nextPiece.setShape(0)
self.next.emit(self.nextPiece.shape())
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.score.emit(self.numLinesRemoved)
self.newPiece()
self.timer.start(TritrisBoard.Speed, self)
def newPiece(self):
#creates two new shapes#
if self.nextPiece.shape() == Shape.NoShape:
self.curPiece = Shape()
self.curPiece.setRandomShape()
self.nextPiece = Shape()
self.nextPiece.setRandomShape()
self.next.emit(self.nextPiece.shape())
#create a new shape,and draw last shape
else:
self.curPiece=self.nextPiece
self.nextPiece=Shape()
self.nextPiece.setRandomShape()
self.next.emit(self.nextPiece.shape())
self.curX = TritrisBoard.TritrisBoardWidth // 2 + 1
self.curY = TritrisBoard.TritrisBoardHeight - 1
if not self.Move(self.curPiece, self.curX, self.curY):
self.curPiece.setShape(Shape.NoShape)
self.timer.stop()
self.isStarted = False
self.msg2Statusbar.emit("Game over")
self.score.emit(0)
def Move(self, newPiece, newX, newY):
#tries to move a shape#
for i in range(4):
x = newX + newPiece.x(i)
y = newY + newPiece.y(i)
if x < 0 or x >= TritrisBoard.TritrisBoardWidth or y < 0:
return False
if self.shapeAt(x, y) != Shape.NoShape:
return False
self.curPiece = newPiece
self.curX = newX
self.curY = newY
# use update to redraw the board showing the moving piece
self.update()
return True
def timerEvent(self, event):
#handles timer event#
if event.timerId() == self.timer.timerId():
#when removing a full row,set variable isWaitingAfterLine to True,then make a newPiece
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
else:
#move curpiece one row down every 300ms(if not changed) in normal times
self.oneLineDown()
else:
super(TritrisBoard, self).timerEvent(event)
def oneLineDown(self):
#drop a piece one line done ,if can'not move then it means this piece reached the bottom#
if not self.Move(self.curPiece, self.curX, self.curY - 1):
self.pieceDropped()
def pieceDropped(self):
#after piece drooped to the bootom, remove lines full of squares and create new shape#
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY + self.curPiece.y(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.removeTetrisWidgetLines()
if not self.isWaitingAfterLine:
self.newPiece()
def removeTetrisWidgetLines(self):
#removes all TetrisWidget lines from the TritrisBoard#
numTetrisWidgetLines = 0
rowsToRemove = []
#add up rows full of squares into list rowsToRemove
for i in range(TritrisBoard.TritrisBoardHeight):
n = sum(
self.shapeAt(j, i) != Shape.NoShape
for j in range(TritrisBoard.TritrisBoardWidth)
)
if n == self.TritrisBoardWidth:
rowsToRemove.append(i)
#process from up to bottom
rowsToRemove.reverse()
for m in rowsToRemove:
for k in range(m, TritrisBoard.TritrisBoardHeight):
for l in range(TritrisBoard.TritrisBoardWidth):
self.setShapeAt(l, k, self.shapeAt(l, k + 1))
numTetrisWidgetLines += len(rowsToRemove)
if numTetrisWidgetLines > 0:
self.numLinesRemoved = self.numLinesRemoved + numTetrisWidgetLines
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.score.emit(self.numLinesRemoved)
self.isWaitingAfterLine = True
self.curPiece.setShape(Shape.NoShape)
self.update()
def pause(self):
#pauses game#
if not self.isStarted:
return
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.stop()
self.msg2Statusbar.emit("paused")
self.status.emit('stopped')
else:
self.timer.start(TritrisBoard.Speed, self)
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.score.emit(self.numLinesRemoved)
self.status.emit('running')
self.update()
def paintEvent(self, event):
#paints all shapes of the game#
painter = QPainter(self)
rect = self.contentsRect()
TritrisBoardTop = rect.bottom() - TritrisBoard.TritrisBoardHeight * self.squareHeight()
# draw those piece in self.board which reached the bottom
for i in range(TritrisBoard.TritrisBoardHeight):
for j in range(TritrisBoard.TritrisBoardWidth):
shape = self.shapeAt(j, TritrisBoard.TritrisBoardHeight - i - 1)
if shape != Shape.NoShape:
self.drawSquare(painter,
rect.left() + j * self.squareWidth(),
TritrisBoardTop + i * self.squareHeight(), shape)
# draw the piece is dropping down which is not in the self.board
try:
if self.curPiece.shape() != Shape.NoShape:
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY + self.curPiece.y(i)
self.drawSquare(painter, rect.left() + x * self.squareWidth(),
TritrisBoardTop + (TritrisBoard.TritrisBoardHeight - y - 1) * self.squareHeight(),
self.curPiece.shape())
except:
return
def drawSquare(self, painter, x, y, shape):
#draws a square of a shape#
colorTable = [0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00]
color = QColor(colorTable[shape])
painter.fillRect(x + 1, y + 1, self.squareWidth() - 2,
self.squareHeight() - 2, color)
# draw lines surrounding the square
painter.setPen(color.lighter())
painter.drawLine(x, y + self.squareHeight() - 1, x, y)
painter.drawLine(x, y, x + self.squareWidth() - 1, y)
painter.setPen(color.darker())
painter.drawLine(x + 1, y + self.squareHeight() - 1,
x + self.squareWidth() - 1, y + self.squareHeight() - 1)
painter.drawLine(x + self.squareWidth() - 1,
y + self.squareHeight() - 1, x + self.squareWidth() - 1, y + 1)
def keyPressEvent(self, event):
#processes key press events#
if not self.isStarted or self.curPiece.shape() == Shape.NoShape:
super(TritrisBoard, self).keyPressEvent(event)
return
key = event.key()
if key == Qt.Key_P:
self.pause()
return
if self.isPaused:
return
elif key == Qt.Key_Left:
self.left()
elif key == Qt.Key_Right:
self.right()
elif key == Qt.Key_Down:
self.down()
elif key == Qt.Key_Up:
self.up()
elif key == Qt.Key_Space:
self.dropDown()
elif key == Qt.Key_D:
self.oneLineDown()
elif key == Qt.Key_C:
self.start()
else:
super(TritrisBoard, self).keyPressEvent(event)
def dropDown(self):
#quickly drop a shape to the bottom#
newY = self.curY
while newY > 0:
if not self.Move(self.curPiece, self.curX, newY - 1):
break
newY -= 1
self.pieceDropped()
def left(self):
#move the shape to left column
self.Move(self.curPiece, self.curX - 1, self.curY)
def right(self):
#move the shape to right column
self.Move(self.curPiece, self.curX + 1, self.curY)
def up(self):
#rotate the shape anti-clock
self.Move(self.curPiece.rotateLeft(), self.curX, self.curY)
def down(self):
#rotate the shape clockwise
self.Move(self.curPiece.rotateRight(), self.curX, self.curY)
def shapeAt(self, x, y):
#determines shape at the TritrisBoard position#
return self.TritrisBoard[(y * TritrisBoard.TritrisBoardWidth) + x]
def setShapeAt(self, x, y, shape):
#sets a shape at the TritrisBoard#
self.TritrisBoard[(y * TritrisBoard.TritrisBoardWidth) + x] = shape
def squareWidth(self):
#returns the width of one square#
return self.contentsRect().width() // TritrisBoard.TritrisBoardWidth
def squareHeight(self):
#returns the height of one square#
return self.contentsRect().height() // TritrisBoard.TritrisBoardHeight
#Board that shows the forthcoming shape
class NextShapeBoard(QFrame):
BoardWidth = 5
BoardHeight = 5
shape=0
def __init__(self,parent):
super().__init__(parent)
self.update()
def showNext(self,shape):
if shape == 0:
return
self.shape=shape
self.update()
def paintEvent(self,event):
if self.shape == 0:
return
painter = QPainter(self)
rect = self.contentsRect()
NextBoardTop = rect.bottom() - self.BoardHeight * self.squareHeight()
for i,j in Shape.coordsTable[self.shape]:
x = 2 + i
y = 2 + j
self.drawSquare(painter, rect.left() + x * self.squareWidth(),
NextBoardTop + (self.BoardHeight - y - 1) * self.squareHeight(),
self.shape)
def drawSquare(self, painter, x, y, shape):
#draws a square of a shape#
colorTable = [0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00]
color = QColor(colorTable[shape])
painter.fillRect(x + 1, y + 1, self.squareWidth() - 2,
self.squareHeight() - 2, color)
# draw lines surrounding the square
painter.setPen(color.lighter())
painter.drawLine(x, y + self.squareHeight() - 1, x, y)
painter.drawLine(x, y, x + self.squareWidth() - 1, y)
painter.setPen(color.darker())
painter.drawLine(x + 1, y + self.squareHeight() - 1,
x + self.squareWidth() - 1, y + self.squareHeight() - 1)
painter.drawLine(x + self.squareWidth() - 1,
y + self.squareHeight() - 1, x + self.squareWidth() - 1, y + 1)
def squareWidth(self):
#returns the width of one square#
return self.contentsRect().width() // self.BoardWidth
def squareHeight(self):
#returns the height of one square#
return self.contentsRect().height() // self.BoardHeight
class Shape(object):
NoShape = 0
ZShape = 1
SShape = 2
LineShape = 3
TShape = 4
SquareShape = 5
LShape = 6
MirroredLShape = 7
coordsTable = (
((0, 0), (0, 0), (0, 0), (0, 0)),
((0, 0), (0, 1), (-1, 0), (-1, -1)),
((0, 0), (-1, 0), (-1, 1), (0, -1)),
((0, 0), (0, 1), (0, 2), (0, -1)),
((0, 0), (-1, 0), (1, 0), (0, -1)),
((0, 0), (0, -1), (1, 0), (1, -1)),
((0, 0), (0, 1), (-1, 1), (0, -1)),
((0, 0), (0, 1), (1, 1), (0, -1))
)
def __init__(self):
self.coords = [[0, 0] for i in range(4)]
self.pieceShape = Shape.NoShape
self.setShape(Shape.NoShape)
def shape(self):
#returns shape#
return self.pieceShape
def setShape(self, shape):
#sets a shape#
table = Shape.coordsTable[shape]
for i in range(4):
for j in range(2):
self.coords[i][j] = table[i][j]
self.pieceShape = shape
def setRandomShape(self):
#create random shape#
self.setShape(random.randint(1, 7))
def x(self, index):
#returns x coordinate#
return self.coords[index][0]
def y(self, index):
#returns y coordinate#
return self.coords[index][1]
def setX(self, index, x):
#sets x coordinate#
self.coords[index][0] = x
def setY(self, index, y):
#sets y coordinate#
self.coords[index][1] = y
def rotateLeft(self):
#rotates shape to the left#
if self.pieceShape == Shape.SquareShape:
return self
result = Shape()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, -self.y(i))
result.setY(i, self.x(i))
return result
def rotateRight(self):
#rotates shape to the right#
if self.pieceShape == Shape.SquareShape:
return self
result = Shape()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, self.y(i))
result.setY(i, -self.x(i))
return result
class TetrisWidget(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.btns = ['STOP', 'ROTATEL', 'RESTART', 'LEFT', 'ROTATER', 'RIGHT']
pos = [(i + 1, j + 1) for i in range(2) for j in range(3)]
self.lcd = QLCDNumber(Qt.Horizontal, self)
self.btn = QPushButton('START', self)
self.btn.clicked.connect(self.dealbtn)
self.btn.setFocusPolicy(Qt.NoFocus)
self.grid = QGridLayout()
self.grid.setSpacing(10)
self.tTritrisBoard = TritrisBoard(self)
self.nextBoard = NextShapeBoard(self)
#create buttons above the lcd
self.grid.addWidget(self.tTritrisBoard, 1, 1, 5, 4)
for pos, name in zip(pos, self.btns):
button = QPushButton(name)
button.setFocusPolicy(Qt.NoFocus)
if name == 'STOP':
button.clicked.connect(self.stop)
elif name == 'ROTATEL':
button.clicked.connect(self.tTritrisBoard.up)
elif name == 'RESTART':
button.clicked.connect(self.restart)
elif name == 'LEFT':
button.clicked.connect(self.tTritrisBoard.left)
elif name == 'ROTATER':
button.clicked.connect(self.tTritrisBoard.down)
elif name == 'RIGHT':
button.clicked.connect(self.tTritrisBoard.right)
else:
return
self.grid.addWidget(button, pos[0], pos[1] + 4)
self.grid.addWidget(self.nextBoard,3,5,1,3)
self.grid.addWidget(self.lcd, 4, 5, 1, 3)
self.grid.addWidget(self.btn, 5, 5, 1, 3)
#show the score in lcd
self.tTritrisBoard.score[int].connect(self.lcd.display)
#handle tetris_board's operating status
self.tTritrisBoard.status[str].connect(self.handle_status)
#handle next shape signal
self.tTritrisBoard.next[int].connect(self.nextBoard.showNext)
self.tTritrisBoard.status.emit('wait_start')
self.setLayout(self.grid)
def start(self):
if self.btn.text() == 'START' and not self.tTritrisBoard.timer.isActive():
self.tTritrisBoard.start()
self.tTritrisBoard.status.emit('running')
def stop(self):
self.tTritrisBoard.pause()
def speed(self):
self.tTritrisBoard.timer.stop()
self.tTritrisBoard.timer.start(TritrisBoard.FastSpeed, self.tTritrisBoard)
def restart(self):
if not self.tTritrisBoard.timer.isActive():
self.tTritrisBoard.pause()
self.tTritrisBoard.clearTritrisBoard()
self.tTritrisBoard.start()
self.tTritrisBoard.status.emit('running')
self.tTritrisBoard.msg2Statusbar.emit('Successly restarted,Good luck!')
#change the start_button's text follwing the signal of tetris_board's operating status
def handle_status(self, status):
if status == 'wait_start':
self.btn.setText('START')
elif status == 'running':
self.btn.setText('STOP')
elif status == 'stopped':
self.btn.setText('CONTINUE')
else:
return
#control the function of start_button
def dealbtn(self):
if self.btn.text() == 'START':
self.start()
elif self.btn.text() == 'STOP':
self.stop()
elif self.btn.text() == 'CONTINUE':
self.stop()
else:
return
if __name__ == '__main__':
app = QApplication([])
tetris = Main()
sys.exit(app.exec_())
|
import pyodbc
import datetime
from faker import Faker
import Getname
import random
import string
from os import getenv
"""
CRUD Controller class, responsible for basic CRUD operations on our database.
"""
class Controller:
hostname = ''
login = ''
password = ''
database_name = ''
conn = ''
cursor = ''
fake = ''
def __init__(self, database_name):
self.fake = Faker()
self.hostname = getenv('MSSQL_HOSTNAME')
self.login = getenv('MSSQL_LOGIN')
self.password = getenv('MSSQL_PASSWORD')
self.database_name = database_name
driver = '{ODBC Driver 13 for SQL Server}'
format_string = 'DRIVER={};SERVER={};PORT=1433;DATABASE={};UID={};PWD={};'
self.conn = pyodbc.connect(format_string.format(driver, self.hostname, self.database_name, self.login,
self.password))
self.cursor = self.conn.cursor()
def close(self):
self.cursor.close()
self.conn.close()
def get_all_customers(self):
return_list = []
select_statement = 'SELECT * FROM Customers'
self.cursor.execute(select_statement)
row = self.cursor.fetchone()
while row:
return_list.append(row)
row = self.cursor.fetchone()
return return_list
def get_all_workshops(self):
return_list = []
select_statement = 'SELECT * FROM Workshops'
self.cursor.execute(select_statement)
for row in self.cursor:
workshop_new = Workshop.Workshop(row.WorkshopID, row.Name, row.StartTime, row.EndTime, row.Description,
row.Canceled, row.Free, row.Price, row.MaxCapacity, row.ConferenceDayID)
return_list.append(workshop_new)
return return_list
def get_all_participants(self):
select_statement = 'SELECT * FROM Participants'
self.cursor.execute(select_statement)
return self.cursor.fetchall()
def get_random_company_id(self):
select_statement = 'SELECT TOP 1 * FROM Companies ORDER BY NEWID()'
self.cursor.execute(select_statement)
return self.cursor.fetchone().CustomerID
def get_random_individual_id(self):
select_statement = 'SELECT TOP 1 * FROM Individuals ORDER BY NEWID()'
self.cursor.execute(select_statement)
return self.cursor.fetchone().CustomerID
def get_all_conferences(self):
return_list = []
select_statement = 'SELECT * FROM Conferences'
self.cursor.execute(select_statement)
for row in self.cursor:
conference_new = Conference.Conference(row[0], row[1], row[2], row[3], row[4], row[5])
return_list.append(conference_new)
return return_list
def create_participant(self):
names = Getname.get_address()
first_name = names['name']['first']
last_name = names['name']['last']
insert_statement = 'INSERT INTO Participants (FirstName, LastName) VALUES (\'{}\', \'{}\')'
self.cursor.execute(insert_statement.format(first_name, last_name))
self.cursor.commit()
def get_conference_id(self, start_date, end_date):
select_statement = 'SELECT TOP 1 ConferenceID FROM Conferences WHERE StartDate = \'{}\' AND EndDate = \'{}\''
self.cursor.execute(select_statement.format(start_date.strftime('%Y-%m-%d %H:%M:%S'),
end_date.strftime('%Y-%m-%d %H:%M:%S')))
return self.cursor.fetchone().ConferenceID
def create_conference(self, name, description, date, priceperdate):
insert_statement = 'INSERT INTO Conferences (Name, Description, StartDate, EndDate, PricePerDate) VALUES ' \
'(\'{}\', \'{}\', \'{}\', \'{}\', \'{}\')'
random_number = random.randint(2, 4)
delta = datetime.timedelta(days=random_number)
enddate = date + delta
self.cursor.execute(insert_statement.format(name, description, date.strftime('%Y-%m-%d'),
enddate.strftime('%Y-%m-%d'), priceperdate))
self.cursor.commit()
conference_id = self.get_conference_id(date, enddate)
discount_number = random.randint(2, 8)
self.create_discounts(conference_id, discount_number)
select_conferencedayid = 'SELECT * FROM ConferenceDays WHERE ConferenceID = {}'
for single_day in (date + datetime.timedelta(n) for n in range(random_number)):
self.create_conference_day(conference_id, single_day)
self.cursor.execute(select_conferencedayid.format(conference_id))
conferencedays = self.cursor.fetchall()
for conferenceday in conferencedays:
workshop_date = datetime.datetime.combine(datetime.date.today(), datetime.time(10, 0, 0))
for i in range(4):
self.create_workshop(''.join(self.fake.company().split(',')), self.fake.text(), workshop_date, random.randint(5, 10) *
10, conferenceday.ConferenceDayID)
workshop_date += datetime.timedelta(hours=2)
def create_customer(self, email, phone, country, city, address):
insert_text = 'INSERT INTO Customers (Email, Phone, Country, City, Address) VALUES ' \
'(\'{}\', \'{}\', \'{}\', \'{}\', \'{}\')'
self.cursor.execute(insert_text.format(email, phone, country, city, address))
self.cursor.commit()
def get_customer_id(self, email, phone, address):
select_text = 'SELECT TOP 1 CustomerID FROM Customers WHERE Email = \'{}\' AND Phone = \'{}\' AND Address =' \
' \'{}\''
self.cursor.execute(select_text.format(email, phone, address))
return self.cursor.fetchone().CustomerID
def create_company(self):
company = Getname.get_company()
address = Getname.get_address()
company_name = company['company']
company_email = company['email_u'] + '@' + company['email_d']
company_phone = ''.join(random.choice(string.digits) for i in range(9))
company_address = address['location']['street']
company_country = address['nat']
company_city = address['location']['city']
company_nip = company_country + ''.join(random.choice(string.digits) for i in range(10))
self.create_customer(company_email, company_phone, company_country, company_city, company_address)
customer_id = self.get_customer_id(company_email, company_phone, company_address)
insert_text = 'INSERT INTO Companies (CompanyName, NIP, CustomerID) VALUES (\'{}\', \'{}\', \'{}\')'
self.cursor.execute(insert_text.format(company_name, company_nip, customer_id))
self.cursor.commit()
def create_individual(self):
individual = Getname.get_company()
address = Getname.get_address()
firstname = address['name']['first']
lastname = address['name']['last']
email = individual['email_u'] + '@' + individual['email_d']
phone = ''.join(random.choice(string.digits) for i in range(9))
country = address['nat']
city = address['location']['city']
address = ' '.join(address['location']['street'].split('\n'))
self.create_customer(email, phone, country, city, address)
customer_id = self.get_customer_id(email, phone, address)
insert_text = 'INSERT INTO Individuals (FirstName, LastName, CustomerID) VALUES (\'{}\', \'{}\', \'{}\')'
self.cursor.execute(insert_text.format(firstname, lastname, customer_id))
self.cursor.commit()
def create_workshop(self, name, description, date, priceperdate, conference_day_id):
insert_statement = 'INSERT INTO Workshops (Name, StartTime, EndTime, Description, Price, ConferenceDayID) ' \
'VALUES (\'{}\', \'{}\', \'{}\', \'{}\', \'{}\', \'{}\')'
duration = 90
delta = datetime.timedelta(minutes=duration)
enddate = date + delta
self.cursor.execute(insert_statement.format(name, date.strftime('%H:%M:%S'),
enddate.strftime('%H:%M:%S'), description, priceperdate,
conference_day_id))
self.cursor.commit()
def create_conference_booking(self, customer_id):
self.cursor.execute('SELECT TOP 1 * FROM Conferences ORDER BY NEWID()')
day_conference_id = self.cursor.fetchone().ConferenceID
quantity = random.randint(2, 6) * 10
insert_text = 'INSERT INTO DayBookings (Quantity, CustomerID, ConferenceDayID) VALUES (\'{}\', \'{}\', \'{}\')'
self.cursor.execute(insert_text.format(quantity, customer_id, day_conference_id))
self.cursor.commit()
self.cursor.execute('SELECT TOP 1 * FROM DayBookings WHERE ConferenceDayID = {} AND CustomerID = {}'.format(
day_conference_id, customer_id))
day_booking_id = self.cursor.fetchone().DayBookingID
select_participants = 'SELECT TOP {} * FROM Participants ORDER BY NEWID()'
self.cursor.execute(select_participants.format(quantity))
participants = self.cursor.fetchall()
insert_day_participant = 'INSERT INTO DayParticipants (DayBookingID, ParticipantID) VALUES ({}, {})'
for i in participants:
self.cursor.execute(insert_day_participant.format(day_booking_id, i.ParticipantID))
self.cursor.commit()
select_workshops = 'SELECT * FROM Workshops WHERE ConferenceDayID = {}'
select_day_participants = 'SELECT TOP {} * FROM DayParticipants WHERE DayBookingID = {}'
insert_workshop_booking = 'INSERT INTO WorkshopBookings (Quantity, WorkshopID, DayBookingID, CustomerID) ' \
'VALUES ({}, {}, {}, {})'
insert_workshop_participant = 'INSERT INTO WorkshopParticipants (WorkshopBookingsID, ParticipantID) ' \
'VALUES ({}, {})'
get_workshop_booking_id = 'SELECT TOP 1 * FROM WorkshopBookings WHERE WorkshopID = {} AND DayBookingID = {} ' \
'AND CustomerID = {}'
self.cursor.execute(select_workshops.format(day_conference_id, day_booking_id))
workshops = self.cursor.fetchall()
for workshop in workshops:
self.cursor.execute(insert_workshop_booking.format(quantity, workshop.WorkshopID, day_booking_id,
customer_id))
self.cursor.commit()
self.cursor.execute(select_day_participants.format(min(workshop.MaxCapacity, quantity), day_booking_id))
day_participants = self.cursor.fetchall()
self.cursor.execute(get_workshop_booking_id.format(workshop.WorkshopID, day_booking_id, customer_id))
curr_workshop_booking_id = self.cursor.fetchone().WorkshopBookingsID
for p in day_participants:
self.cursor.execute(insert_workshop_participant.format(curr_workshop_booking_id, p.ParticipantID))
def create_conference_day(self, conference_id, single_day):
insert_statement = 'INSERT INTO ConferenceDays (ConferenceID, Date) VALUES (\'{}\', \'{}\')'
self.cursor.execute(insert_statement.format(conference_id, single_day.strftime('%Y-%m-%d %H:%M:%S')))
self.cursor.commit()
def create_discounts(self, conference_id, n):
insert_statement = 'INSERT INTO Discounts (DiscountPercent, DaysUntilConference, ConferenceID) VALUES (\'{}\',' \
'\'{}\', \'{}\')'
basic_discount = random.randint(4, 8)
basic_days = 30
for i in range(n):
discount = basic_discount * (i + 1)
days = basic_days * (i + 1)
self.cursor.execute(insert_statement.format(discount, days, conference_id))
self.cursor.commit()
|
import dash_bootstrap_components as dbc
carousel = dbc.Carousel(
items=[
{"key": "1", "src": "/static/images/slide1.svg"},
{"key": "2", "src": "/static/images/slide2.svg"},
{"key": "3", "src": "/static/images/slide3.svg"},
],
controls=True,
indicators=True,
)
|
#-*-coding:utf-8-*-
import argparse
from utils import str2bool
def get_config():
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--mode', type=str, default='train', help='train|test')
parser.add_argument('--image_size', type=int, default=64, help='image load resolution')
parser.add_argument('--resize_size', type=int, default=64, help='resolution after resizing')
parser.add_argument('--g_conv_dim', type=int, default=32, help='number of conv filters in the first layer of G')
parser.add_argument('--shuffle', type=str, default=True, help='shuffle when load dataset')
parser.add_argument('--dropLast', type=str2bool, default=False, help=' drop the last incomplete batch')
parser.add_argument('--version', type=str, default='JND', help='JND')
parser.add_argument('--init_type', type=str, default='kaiming', help='normal|xavier|kaiming|orthogonal')
parser.add_argument('--upsample_type', type=str, default='nn', help='nn|bilinear|subpixel|deconv')#nn/bilinear
parser.add_argument('--g_use_sn', type=str2bool, default=True, help='whether use spectral normalization in G')
# Training configuration.
parser.add_argument('--pretrained_model', type=int, default=95676)#pretrained
parser.add_argument('--total_epochs', type=int, default=40, help='total epochs to update the generator')#40(100)
parser.add_argument('--batch_size', type=int, default=20, help='mini batch size')
parser.add_argument('--num_workers', type=int, default=0, help='subprocesses to use for data loading')
parser.add_argument('--g_lr', type=float, default=0.0001, help='learning rate for G')
parser.add_argument('--lr_decay', type=str2bool, default=True, help='setup learning rate decay schedule')
parser.add_argument('--lr_num_epochs_decay', type=int, default=20, help='LambdaLR: epoch at starting learning rate')#half 20(50)
parser.add_argument('--lr_decay_ratio', type=int, default=20, help='LambdaLR: ratio of linearly decay learning rate to zero')#half 20(50)
parser.add_argument('--optimizer_type', type=str, default='adam', help='adam|rmsprop')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for Adam optimizer')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam optimizer')
parser.add_argument('--alpha', type=float, default=0.9, help='alpha for rmsprop optimizer')
parser.add_argument('--pair_shuffle', type=str2bool, default=False, help='shuffle unpaired image pair for each epoch')
# validation and test configuration
parser.add_argument('--test_epochs', type=int, default=200, help='test model from this epoch')
parser.add_argument('--num_epochs_start_val', type=int, default=0, help='start validate the model')
parser.add_argument('--val_epochs', type=int, default=1, help='validate the model every time after training these epochs')
# Directories.
parser.add_argument('--data_root_dir', type=str, default='./data/')
parser.add_argument('--save_root_dir', type=str, default='./results')
parser.add_argument('--train_dataset', type=str, default='train')
parser.add_argument('--train_data_dir_raw', type=str, default='OriginalPatch/')
parser.add_argument('--train_data_dir_exp', type=str, default='GroundPatch/', help='exp|FlickrHDR')
parser.add_argument('--val_dataset', type=str, default='val')
parser.add_argument('--val_data_dir_raw', type=str, default='val_ori/')
parser.add_argument('--val_data_dir_exp', type=str, default='val_gt/')
parser.add_argument('--test_dataset', type=str, default='test')
parser.add_argument('--test_data_dir_raw', type=str, default='test/')
parser.add_argument('--test_data_dir_exp', type=str, default='exp_all/')
parser.add_argument('--model_save_path', type=str, default='models')
parser.add_argument('--sample_path', type=str, default='samples')
parser.add_argument('--sample_pretrain_path', type=str, default='samples/pretrain')
parser.add_argument('--sample_enhanced_path', type=str, default='samples/enhanced')
parser.add_argument('--log_path', type=str, default='logs')
parser.add_argument('--validation_path', type=str, default='validation')
parser.add_argument('--test_result_path', type=str, default='test')
parser.add_argument('--train_csv_file', type=str, default='./data/train.csv', help='csv file for training images')
parser.add_argument('--val_csv_file', type=str, default='./data/val.csv', help='csv file for validation images')
parser.add_argument('--test_csv_file', type=str, default='./data/test.csv', help='csv file for images')
# step size
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--sample_step', type=int, default=1000)
parser.add_argument('--model_save_epoch', type=int, default=1)
# Misc
parser.add_argument('--parallel', type=str2bool, default=False, help='use multi-GPU for training')
parser.add_argument('--gpu_ids', default=[0, 1, 2, 3])
parser.add_argument('--use_tensorboard', type=str, default=True)
parser.add_argument('--is_print_network', type=str2bool, default=False)
return parser.parse_args() |
import os
import argparse
def main(argv=sys.argv[1:]):
pass
|
from tkinter import *
from tkinter.filedialog import askopenfilename
from math import sqrt, pow
from PIL import Image, ImageDraw, ImageFont, ImageTk
from time import sleep
# The basic interface is laid out. Not functionality yet.
# Additional Resources
# https://stackoverflow.com/questions/5501192/how-to-display-picture-and-get-mouse-click-coordinate-on-it
# https://stackoverflow.com/questions/8590234/capturing-x-y-coordinates-with-python-pil
# v4 - working interface, but functions dont really work.
# v5 - adding full functionality. Reset works. Must run calibration first before capturing points. Capture button set as toggle and only works when calibration has been completed. Calibration equations are not implemented.
#After this is complete, work on converting to classes.
calibUnitChoices = {
'um': 1e6,
'mm': 1e3,
'cm': 1e2,
'm': 1,
'km': 1e-3,
'in': 39.3701,
'ft': 3.28084,
'mi': 0.000621371,
}
def calibrate():
#lblStatus.selection_clear()
global statusCalibrated, statusCollectPoints
lblStatus.configure(text='Button Pressed - Select 2 calibration points on the image of a known distance.')
statusCalibrated = False
statusCollectPoints = False
btnCalibrate.configure(state = DISABLED)
# canvas.create_line(0,100,200,0,fill='black', dash=(4,4))
def calculatePixelDistance(startPoint, endPoint):
#indices
x = 0
y = 1
#calculate distance
distanceInPixels = sqrt( pow(endPoint[x] - startPoint[x],2) + pow(endPoint[y] - startPoint[y],2) )
return distanceInPixels;
def getCoordinates(event):
#outputting x and y coords to console
global clickCounter, xPrev, yPrev, calibrateComplete, statusCalibrated, statusCollectPoints
clickCounter += 1
# lblStatus.configure(text='Coordinates [x,y]: [{},{}], and you have clicked {} times'.format(event.x,event.y, clickCounter))
if statusCalibrated == False and statusCollectPoints == False: #Perform Calibration
lblStatus.configure(text='In calibration.')
# sleep(2)
if clickCounter%2 == 1:#First Click
xPrev = event.x
yPrev = event.y
else:#Second calibration click. Calculate the distance and value.
calibratedLengthPixels = calculatePixelDistance([xPrev, yPrev], [event.x, event.y])
# handles.unitConverter = C/Cp; %Use in the form Xp/X = Yp/Y where Xp is pixel length X, Yp is pixel length Y, X is known real length, Y is unknown real length
# %Y = Yp*(X/Xp) = Yp*unitConvert
calibratedLengthKnown = 25
calibrationUnit = 1e3
calibrationMultiplier = calibratedLengthKnown/calibrationUnit
calibrateComplete = True
lblStatus.configure(text='Calibration Complete.')
lstCollected.insert(0,'Calibration Done {}'.format(calibrationMultiplier))
statusCalibrated = True
btnCapture.configure(state=ACTIVE)
elif statusCalibrated == True and statusCollectPoints == True: #The calibration is complete so now we can collect points.
if clickCounter%2 == 1:#First Click
xPrev = event.x
yPrev = event.y
else:#Second Click
canvas.create_line(xPrev,yPrev, event.x, event.y)
calibUnit = calibUnitVar.get()
calibUnitValue = calibUnitChoices.get(calibUnit)
calibValue = strUnitLength.get()
distance = calculateDistance([xPrev, yPrev], [event.x, event.y], 1.54)
iterater = clickCounter/2
# outputString = '#{0:.0f}-{0:.3f}'.format(iterater, distance)
outputString = '#{0:.0f} - {1:.3f}'.format(iterater, distance)
canvas.create_text(xPrev-15,yPrev, text=outputString)
lstCollected.insert(0,outputString)
# Need to update history list from here.
else:
clickCounter = 0
lblStatus.configure(text='No Action!!! --> Click Count={}'.format(clickCounter))
return
radius = 2.5
color='green'
lblStatus.configure(text='Coordinates [x,y]: [{},{}], and you have clicked {} times'.format(event.x,event.y, clickCounter))
canvas.create_oval(event.x-radius,event.y-radius,event.x+radius, event.y+radius, fill=color)
# return
def captureState():
global statusCollectPoints
if btnCapture.config('relief')[-1] == 'sunken':
btnCapture.config(relief="raised")
statusCollectPoints = False
else:
btnCapture.config(relief="sunken")
statusCollectPoints = True
def loadImage():
filePath = askopenfilename(parent=root, initialdir='~/Documents/git/measureFromImage/',title='Choose an image.')
# canvas.create_image(0,0,image=image,anchor="nw")
# canvas.config(scrollregion=canvas.bbox(ALL))
# Adding the image to the Canvas
# filePath = '/home/yuanchueh/Documents/git/measureFromImage/car.png'
# Load Image into TKinter Interface
image = ImageTk.PhotoImage(Image.open(filePath))
canvas.image=image
height = image.height()
width = image.width()
canvas.create_image(0,0,image=canvas.image,anchor="nw")
canvas.config(scrollregion=canvas.bbox(ALL))
btnLoadImage.configure(state=DISABLED)
def calculateDistance(startPoint, endPoint, calibValue):
#indices
x = 0
y = 1
#calculate distance
distanceInPixels = sqrt( pow(endPoint[x] - startPoint[x],2) + pow(endPoint[y] - startPoint[y],2) )
distanceReal = distanceInPixels * calibValue
return distanceReal;
def setScale():
scaleFactor = strSetScale.get()
lblStatus.configure(text='Scale Factor: {}'.format(scaleFactor))
def resetInterface():
global statusCalibrated, statusCollectPoints
statusCalibrated = False
statusCollectPoints = True
btnLoadImage.configure(state=ACTIVE)
btnUndo.configure(state=DISABLED)
btnRedo.configure(state=DISABLED)
btnCalibrate.configure(state=ACTIVE)
btnCapture.configure(state=DISABLED, relief='raised')
btnSetScale.configure(state=DISABLED)
btnExport.configure(state=DISABLED)
btnReset.configure(state=ACTIVE)
#strSetScale.configure(state=DISABLED)
# Set widget defaults for tools frames
strUnitLength.delete(0, END)
strUnitLength.insert(0,'1.000')
strSetScale.delete(0, END)
strSetScale.insert(0,'100')
calibUnitVar.set('m')
strFilePath.delete(0, END)
strFilePath.insert(0,'/home/yuanchueh/Documents/git/measureFromImage/car.png')
canvas.delete("all")
lstCollected.delete(0, END)
lblStatus.configure(text='Interface Reset')
if __name__ == '__main__':
root = Tk()
root.title('Measure Distance from Image - by Anatomy3D')
root.geometry('{}x{}'.format(460, 350))
# root.geometry('{}x{}'.format(460, 350))
# Create interface items
calibVariable = StringVar(root)
calibVariable.set(calibUnitChoices['m'])
#Create main frame containers
frmTools = Frame(root, bg='cyan', width=100, height=200, padx=3, pady=3)
frmCanvas = Frame(root, bg='magenta', width=200, height=200, padx=3, pady=3)
frmBottom = Frame(root, bg='yellow', width=200, height=15, padx=3, pady=3)
# Layout all of the main containers
root.grid_rowconfigure(0, weight=1) #Elastic top row
root.grid_rowconfigure(1, weight=0)
root.grid_columnconfigure(0, weight=0)
root.grid_columnconfigure(1, weight=1) #Elastic second column
frmTools.grid(rowspan=2, column=0, sticky='ns')
frmCanvas.grid(row=0,column=1, sticky='nsew')
frmBottom.grid(row=1,columnspan=2, sticky='ew')
# Create widgets for the tools frame
btnWidth = 8
strFilePath = Entry(frmTools, background='pink', width=btnWidth*2)
#btnFilePath = Button(frmTools, background='yellow', text='Select File', width=btnWidth)
btnLoadImage = Button(frmTools, text='Load Image', width=btnWidth, command=loadImage)
btnUndo = Button(frmTools, text='Undo', width=btnWidth, state=DISABLED)
btnRedo = Button(frmTools, text='Redo', width=btnWidth, state=DISABLED)
btnCalibrate = Button(frmTools, text='Calibrate', width=btnWidth, state=ACTIVE, command=calibrate)
lblUnit = Label(frmTools, text='Set Unit Length')
strUnitLength = Entry(frmTools, width=10)
#set drop down menu
calibUnitVar = StringVar()
# calibUnitVar.set = calibUnitChoices[1].keys()
# calibUnitVar.set = 'Choose an Option'
drpUnitSelect = OptionMenu(frmTools, calibUnitVar, *calibUnitChoices.keys())
btnCapture = Button(frmTools, text='Capture', width=btnWidth, state=DISABLED, relief='raised', command=captureState)
strSetScale = Entry(frmTools, width=btnWidth)
lblSetScale = Label(frmTools, text='Set Image Scale Factor (%)')
btnSetScale = Button(frmTools, text='Apply', width=btnWidth, command=setScale)
btnExport = Button(frmTools, text='Export Image', width=btnWidth)
btnReset = Button(frmTools, text='Reset', width=btnWidth, command=resetInterface)
lstCollected = Listbox(frmTools, width=btnWidth*2, height=50)
# Layout widgets for tools frames
strFilePath.grid(row=0, column=0, columnspan=2, sticky='ew')
# btnFilePath.grid(row=2, column=0)
btnLoadImage.grid(row=2, column=1)
btnUndo.grid(row=4, column=0, pady=15)
btnRedo.grid(row=4, column=1)
lblUnit.grid(row=6, column=0)
strUnitLength.grid(row=8, column=0, sticky='e')
drpUnitSelect.grid(row=8, column=1, sticky='ew')
btnCalibrate.grid(row=10, column=0, columnspan=2, pady=10)
btnCapture.grid(row=12, column=0, columnspan=2, pady=25)
lblSetScale.grid(row=13, column=0, columnspan=2)
strSetScale.grid(row=14, column=0, sticky='e')
btnSetScale.grid(row=14, column=1, sticky='w')
btnExport.grid(row=18, column=0)
btnReset.grid(row=18, column=1)
lstCollected.grid(row=20, column=0, columnspan=2, sticky='ew', padx=3, pady=3)
# Create Widgets for information panel
lblStatus = Label(frmBottom, text="Temporary, No Data to Show!!!")
# Layout Widgets for information panel
lblStatus.grid(row=0,column=0, sticky='w')
# Create Widgets for canvas
xscroll = Scrollbar(frmCanvas, orient=HORIZONTAL)
yscroll = Scrollbar(frmCanvas, orient=VERTICAL)
canvas = Canvas(frmCanvas, bg='red', bd=0, xscrollcommand=xscroll.set, yscrollcommand=yscroll.set)#, width=200, height=100)
canvas.create_line(0,0,200,100)
canvas.create_line(0,100,200,0,fill='black', dash=(4,4))
# Layout Widgets for Canvas
frmCanvas.grid_rowconfigure(0, weight=1)
frmCanvas.grid_columnconfigure(0, weight=1)
canvas.grid(row=0, column=0, sticky='nsew')
xscroll.grid(row=1, column=0, sticky='ew')
yscroll.grid(row=0, column=1, sticky='ns')
# Configure Widgets for Canvas
xscroll.config(command=canvas.xview)
yscroll.config(command=canvas.yview)
# Set widget defaults for tools frames
resetInterface()
#mouseclick event
canvas.bind("<Button 1>",getCoordinates)
clickCounter = 0
xPrev = 0
yPrev = 0
# Initialize Variables
calibrateComplete = False
captureActive = False
statusCalibrated=False
statusCollectPoints = True # This is set to true so that there is a logical incongruency between
# the logic loop to ensure the calibrate button has been run before points can be collected.
# Loop GUI
root.minsize(width=600,height=400);
root.mainloop()
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def sumOddLengthSubarrays(self, arr: List[int]) -> int:
result = 0
for length in range(1, len(arr) + 1, 2):
for i in range(len(arr) - length + 1):
result += sum(arr[i : i + length])
return result
if __name__ == "__main__":
solution = Solution()
assert 58 == solution.sumOddLengthSubarrays([1, 4, 2, 5, 3])
assert 3 == solution.sumOddLengthSubarrays([1, 2])
assert 66 == solution.sumOddLengthSubarrays([10, 11, 12])
|
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import codecs
import nltk
import re
import pickle
# function to build the dictionary for words to be used for context features
def getContextDictionary(articles):
vectorizer1 = CountVectorizer(min_df=1)
vectorizer2 = TfidfVectorizer(min_df=1)
vectorizer1.fit(articles)
vectorizer2.fit(articles)
print "Computed vectorizers."
return vectorizer1, vectorizer2
def load_data_file(data_file):
print('loading file ', data_file)
raw_data = []
if not isinstance(data_file, list):
data_file = [data_file]
for file_name in data_file:
with codecs.open(file_name, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
fields = line.split('\t')
pID = fields[0]
sourceID = fields[1]
targetID = fields[2]
rels = fields[3].split(',')
sent = fields[4]
raw_data.append((pID,sourceID,targetID,rels,sent,len(raw_data)))
return raw_data
def load_confidence(conf_file):
raw_conf = []
rels = []
with codecs.open(conf_file, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
fields = line.split('\t')
if len(rels) == 0:
rels = fields
else:
confs = (rels,[float(f) for f in fields])
raw_conf.append(confs)
return rels, raw_conf
def load_entity_name(entity_name_file):
entity_map = dict()
with codecs.open(entity_name_file, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
fields = line.split('\t')
id = fields[0]
name = fields[1]
if id not in entity_map:
entity_map[id] = name
return entity_map
def combine(raw_data, raw_conf):
return [(pid,sid,tid,rlabels,sent,predrs,confs,id1) for (pid,sid,tid,rlabels,sent,id1), (predrs,confs) in zip(raw_data,raw_conf)]
def group(combined_raw):
grouped_data = dict()
for pid,sid,tid,rlabels,sent,predrs,confs,id in combined_raw:
if (pid,sid,tid) not in grouped_data:
grouped_data[(pid,sid,tid)] = [(rlabels,sent,predrs,confs,id)]
else:
grouped_data[(pid,sid,tid)].append((rlabels,sent,predrs,confs,id))
return grouped_data
def make_data(grouped_data, entity_map, rels):
articles = []
identifiers = []
preds = []
confidences = []
entities = []
for (pid,sid,tid), slist in grouped_data.items():
articlelist = []
conflist = []
predlist = []
sname = entity_map[sid]
tname = entity_map[tid]
assert sname
assert tname
for rlabels,sent,predrs,confs,id in slist:
articlelist.append(sent)
predlist.append(predrs)
conflist.append(confs)
articles.append([articlelist])
entities.append([(sname,tname)])
preds.append([predlist])
confidences.append([conflist])
identifier = []
rlabelset = set(rlabels)
for i in range(len(rels)):
if rels[i] in rlabelset:
identifier.append(rels[i])
else:
identifier.append('NA')
identifiers.append([identifier])
return articles,identifiers,entities,preds,confidences
def calculate_cosine_sim(first_articles,downloaded_articles,numLists=1):
''' now to calculate cosine_sim using tf-idf calculated using all the downloaded articles'''
tfidf_vectorizer = TfidfVectorizer()
cosine_sim = [None] * len(first_articles)
for i in range(len(cosine_sim)):
cosine_sim[i] = [None] * numLists
for indx, article in enumerate(first_articles):
allArticles = [article]
for i in range(numLists):
allArticles += downloaded_articles[indx][i]
tfidf_matrix = tfidf_vectorizer.fit_transform(allArticles)
cnt = 1
for listNum in range(len(downloaded_articles[indx])):
sublist = downloaded_articles[indx][listNum]
if len(sublist) > 0:
cosine_sim[indx][listNum] = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[cnt:cnt + len(sublist)])[0]
else:
print "not enough elements in sublist for cosine_sim"
cosine_sim[indx][listNum] = []
# pdb.set_trace()
cnt += len(sublist)
# cnt += len(sublist)
return cosine_sim
def extract_entity_context(article,entity,vectorizer,context=3):
'''extract context of entity in an article'''
vocab = vectorizer.vocabulary_
raw_article = article.lower()
cleaned_article = re.sub(r'[^\x00-\x7F]+', ' ',raw_article)
tokens = nltk.word_tokenize(cleaned_article)
std_entity = nltk.word_tokenize(entity.lower())
phrase = []
vec = []
for i,word in enumerate(tokens):
if word in std_entity:
for j in range(1, context + 1):
if i - j >= 0:
phrase.append(tokens[i - j])
else:
phrase.append('XYZUNK') # random unseen phrase
for j in range(1, context + 1):
if i + len(std_entity) - 1 + j < len(tokens):
phrase.append(tokens[i + len(std_entity) - 1 + j])
else:
phrase.append('XYZUNK') # random unseen phrase
break
mat = vectorizer.transform([' '.join(phrase)]).toarray()
for w in phrase:
feat_indx = vocab.get(w)
if feat_indx:
vec.append(float(mat[0, feat_indx]))
else:
vec.append(0.)
# take care of all corner cases
if len(vec) == 0:
vec = [0. for q in range(2 * context)]
return vec
def extract_context(articles, entities, vectorizer, context=3):
contexts = [None] * len(articles)
for i in range(len(contexts)):
contexts[i] = [None] * len(articles[i])
for j in range(len(contexts[i])):
contexts[i][j] = [None] * len(articles[i][j])
for indx, queryLists in enumerate(articles):
for listNum, articleList in enumerate(queryLists):
ent1, ent2 = entities[indx][listNum]
for articleNum, ents in enumerate(articleList):
article = articles[indx][listNum][articleNum]
vec1 = extract_entity_context(article,ent1,vectorizer,context)
vec2 = extract_entity_context(article,ent2,vectorizer,context)
contexts[indx][listNum][articleNum] = []
contexts[indx][listNum][articleNum].append(vec1)
contexts[indx][listNum][articleNum].append(vec2)
return contexts
def fillblank(size):
s = ''
for i in range(size):
s += ' '
return s
dir = '/home/gyc/Data/held_out_02'
mode = 'test'
raw_data = load_data_file('{}/{}.sent.txt'.format(dir,mode))
rels, raw_conf = load_confidence('{}/{}.scores.txt'.format(dir,mode))
entity_map = load_entity_name('/home/gyc/Data/held_out_dir/filtered-freebase-simple-topic-dump-3cols.tsv')
sents = [sent for pid,sid,tid,rs,sent,rid in raw_data]
combined_raw = combine(raw_data,raw_conf)
grouped_data = group(combined_raw)
articles,identifiers,entities,preds,confidences = make_data(grouped_data, entity_map, rels)
vec1,vec2 = getContextDictionary(sents)
contexts1 = extract_context(articles,entities,vec1,context=3)
contexts2 = extract_context(articles,entities,vec2,context=3)
with open('{}/{}.p'.format(dir,mode), "wb") as f:
pickle.dump([articles,identifiers,entities,preds,confidences,contexts1,contexts2,vec1,vec2],f)
sentence = u'But he doubted that modified calendars produce any overall academic benefits , a view shared by Gene V. Glass , a professor of education policy at Arizona State University , who said that at least a half-dozen studies suggest that \'\' there is not a scrap of evidence that shows a year-round calendar improves achievement . \'\''
vec = extract_entity_context(sentence,u'Gene V. Glass',vec2)
print vec |
import time
import random
from pylo import loader
DMPyJEMMicroscope = loader.getDeviceClass("DM + PyJEM Microscope")
class DMPyJEMTestMicroscope(DMPyJEMMicroscope):
def __init__(self, *args, **kwargs) -> None:
"""Get the microscope instance"""
self._lorentz_mode = False
super().__init__(*args, **kwargs)
self.pyjem_olcurrent_args += ["--debug"]
def setInLorentzMode(self, lorentz_mode: bool) -> None:
# fake some hardware duration time
time.sleep(random.random())
super().setInLorentzMode(lorentz_mode)
self._lorentz_mode = lorentz_mode
def getInLorentzMode(self) -> bool:
return self._lorentz_mode
def resetToSafeState(self) -> None:
try:
import DigitalMicrograph as DM
except Exception:
DM = None
text = "Setting {} to safe state!".format(self.__class__.__name__)
if DM is not None:
DM.OkDialog(text)
else:
print(text)
# def _setXTilt(self, angle, *args, **kwargs):
# if angle > 10:
# raise AssertionError("This is a test error caused by an x angle > 10 deg")
# super()._setXTilt(angle, *args, **kwargs)
@staticmethod
def defineConfigurationOptions(*args, **kwargs) -> None:
DMPyJEMMicroscope.defineConfigurationOptions(*args, **kwargs)
|
# -*- coding: utf8 -*-
# [학번] [이름]
# numpy 소개
# 각 행 주석 입력
# 배열, 행렬 관련 기능을 담고 있는 numpy 모듈을 불러 들여 np 라는 이름 아래 연결함
# import numpy as np
# r1 = np.array((1.0, 2.0))
# print("r1 = %s" % r1)
# r2 = np.array((-2.0, 1.0))
# print("r2 = %s" % r2)
# r3 = r1 + r2
# print("r3 = %s" % r3)
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class SettingsMappingMappingSettings(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SettingsMappingMappingSettings - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'gid_range_enabled': 'bool',
'gid_range_max': 'int',
'gid_range_min': 'int',
'gid_range_next': 'int',
'uid_range_enabled': 'bool',
'uid_range_max': 'int',
'uid_range_min': 'int',
'uid_range_next': 'int'
}
self.attribute_map = {
'gid_range_enabled': 'gid_range_enabled',
'gid_range_max': 'gid_range_max',
'gid_range_min': 'gid_range_min',
'gid_range_next': 'gid_range_next',
'uid_range_enabled': 'uid_range_enabled',
'uid_range_max': 'uid_range_max',
'uid_range_min': 'uid_range_min',
'uid_range_next': 'uid_range_next'
}
self._gid_range_enabled = None
self._gid_range_max = None
self._gid_range_min = None
self._gid_range_next = None
self._uid_range_enabled = None
self._uid_range_max = None
self._uid_range_min = None
self._uid_range_next = None
@property
def gid_range_enabled(self):
"""
Gets the gid_range_enabled of this SettingsMappingMappingSettings.
Enables use of a fixed range for allocating GIDs.
:return: The gid_range_enabled of this SettingsMappingMappingSettings.
:rtype: bool
"""
return self._gid_range_enabled
@gid_range_enabled.setter
def gid_range_enabled(self, gid_range_enabled):
"""
Sets the gid_range_enabled of this SettingsMappingMappingSettings.
Enables use of a fixed range for allocating GIDs.
:param gid_range_enabled: The gid_range_enabled of this SettingsMappingMappingSettings.
:type: bool
"""
self._gid_range_enabled = gid_range_enabled
@property
def gid_range_max(self):
"""
Gets the gid_range_max of this SettingsMappingMappingSettings.
Specifies ending number for allocating GIDs.
:return: The gid_range_max of this SettingsMappingMappingSettings.
:rtype: int
"""
return self._gid_range_max
@gid_range_max.setter
def gid_range_max(self, gid_range_max):
"""
Sets the gid_range_max of this SettingsMappingMappingSettings.
Specifies ending number for allocating GIDs.
:param gid_range_max: The gid_range_max of this SettingsMappingMappingSettings.
:type: int
"""
self._gid_range_max = gid_range_max
@property
def gid_range_min(self):
"""
Gets the gid_range_min of this SettingsMappingMappingSettings.
Specifies starting number for allocating GIDs.
:return: The gid_range_min of this SettingsMappingMappingSettings.
:rtype: int
"""
return self._gid_range_min
@gid_range_min.setter
def gid_range_min(self, gid_range_min):
"""
Sets the gid_range_min of this SettingsMappingMappingSettings.
Specifies starting number for allocating GIDs.
:param gid_range_min: The gid_range_min of this SettingsMappingMappingSettings.
:type: int
"""
self._gid_range_min = gid_range_min
@property
def gid_range_next(self):
"""
Gets the gid_range_next of this SettingsMappingMappingSettings.
Specifies the next GID to be allocated.
:return: The gid_range_next of this SettingsMappingMappingSettings.
:rtype: int
"""
return self._gid_range_next
@gid_range_next.setter
def gid_range_next(self, gid_range_next):
"""
Sets the gid_range_next of this SettingsMappingMappingSettings.
Specifies the next GID to be allocated.
:param gid_range_next: The gid_range_next of this SettingsMappingMappingSettings.
:type: int
"""
self._gid_range_next = gid_range_next
@property
def uid_range_enabled(self):
"""
Gets the uid_range_enabled of this SettingsMappingMappingSettings.
Uses a fixed range for allocating UIDs.
:return: The uid_range_enabled of this SettingsMappingMappingSettings.
:rtype: bool
"""
return self._uid_range_enabled
@uid_range_enabled.setter
def uid_range_enabled(self, uid_range_enabled):
"""
Sets the uid_range_enabled of this SettingsMappingMappingSettings.
Uses a fixed range for allocating UIDs.
:param uid_range_enabled: The uid_range_enabled of this SettingsMappingMappingSettings.
:type: bool
"""
self._uid_range_enabled = uid_range_enabled
@property
def uid_range_max(self):
"""
Gets the uid_range_max of this SettingsMappingMappingSettings.
Specifies ending number for allocating UIDs.
:return: The uid_range_max of this SettingsMappingMappingSettings.
:rtype: int
"""
return self._uid_range_max
@uid_range_max.setter
def uid_range_max(self, uid_range_max):
"""
Sets the uid_range_max of this SettingsMappingMappingSettings.
Specifies ending number for allocating UIDs.
:param uid_range_max: The uid_range_max of this SettingsMappingMappingSettings.
:type: int
"""
self._uid_range_max = uid_range_max
@property
def uid_range_min(self):
"""
Gets the uid_range_min of this SettingsMappingMappingSettings.
Specifies starting number for allocating UIDs.
:return: The uid_range_min of this SettingsMappingMappingSettings.
:rtype: int
"""
return self._uid_range_min
@uid_range_min.setter
def uid_range_min(self, uid_range_min):
"""
Sets the uid_range_min of this SettingsMappingMappingSettings.
Specifies starting number for allocating UIDs.
:param uid_range_min: The uid_range_min of this SettingsMappingMappingSettings.
:type: int
"""
self._uid_range_min = uid_range_min
@property
def uid_range_next(self):
"""
Gets the uid_range_next of this SettingsMappingMappingSettings.
Specifies the next UID to be allocated.
:return: The uid_range_next of this SettingsMappingMappingSettings.
:rtype: int
"""
return self._uid_range_next
@uid_range_next.setter
def uid_range_next(self, uid_range_next):
"""
Sets the uid_range_next of this SettingsMappingMappingSettings.
Specifies the next UID to be allocated.
:param uid_range_next: The uid_range_next of this SettingsMappingMappingSettings.
:type: int
"""
self._uid_range_next = uid_range_next
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# name generator exercise
# algorithmic narrative lab / (LNA in spanish)
# Variable random mixing from lists (made for soldiers in a RTS)
# by 220 & AA (2019) GPLv3 license
import random
lista_nombres = ["Alfredo", "Alejandro", "Armando", "Román", "Ramón", "Mario", "Antonio", "Eduardo", "Javier", "Ricardo", "Federico", "Honorato", "Gumersindo", "Atanasio", "Hipólito", "Salvador", "Luis", "Rubén", "Héctor", "Julio", "Rangel", "Pierre"]
lista_apellidos = ["Castellanos", "Castañeda", "Aragón", "Hernández", "Márquez", "Vázquez", "Velázquez", "Solís", "Alvarado", "Valencia", "Molina", "Leana", "Goicochea", "López", "Foucalt", "Pérez", "Prado"]
indice_nombre = random.randint (0, len (lista_nombres)-1)
indice_apellidos = random.randint (0, len (lista_apellidos)-1)
nombre = lista_nombres [indice_nombre]
apellido = lista_apellidos [indice_apellidos]
print ("{0} {1}".format (nombre, apellido))
|
import abc
from collections import deque
import hashlib
import io
import logging
import mimetypes
import os
import os.path as osp
import tempfile
import six
from smqtk.exceptions import InvalidUriError, NoUriResolutionError, \
ReadOnlyError
from smqtk.representation import SmqtkRepresentation
from smqtk.utils import file_utils
from smqtk.utils import plugin
MIMETYPES = mimetypes.MimeTypes()
class DataElement (SmqtkRepresentation, plugin.Pluggable):
"""
Abstract interface for a byte data container.
The primary "value" of a ``DataElement`` is the byte content wrapped. Since
this can technically change due to external forces, we cannot guarantee that
an element is immutable. Thus ``DataElement`` instances are not considered
generally hashable. Specific implementations may define a ``__hash__``
method if that implementation reflects a data source that guarantees
immutability.
UUIDs should be cast-able to a string and maintain unique-ness after
conversion.
"""
@classmethod
def from_uri(cls, uri):
"""
Construct a new instance based on the given URI.
This function may not be implemented for all DataElement types.
:param uri: URI string to resolve into an element instance
:type uri: str
:raises NoUriResolutionError: This element type does not implement URI
resolution.
:raises smqtk.exceptions.InvalidUriError: This element type could not
resolve the provided URI string.
:return: New element instance of our type.
:rtype: DataElement
"""
raise NoUriResolutionError()
def __init__(self):
super(DataElement, self).__init__()
self._temp_filepath_stack = []
# Because we can't generally guarantee external data immutability.
__hash__ = None
def __del__(self):
self.clean_temp()
def __eq__(self, other):
return isinstance(other, DataElement) and \
self.get_bytes() == other.get_bytes()
def __ne__(self, other):
return not (self == other)
@abc.abstractmethod
def __repr__(self):
return self.__class__.__name__
def _write_new_temp(self, d):
"""
Actually write our bytes to a new temp file
Always creates new file.
:param d: directory to write temp file in or None to use system default.
:returns: path to file written
"""
if d:
file_utils.safe_create_dir(d)
ext = MIMETYPES.guess_extension(self.content_type() or '')
# Exceptions because mimetypes is apparently REALLY OLD
if ext in {'.jpe', '.jfif'}:
ext = '.jpg'
fd, fp = tempfile.mkstemp(
suffix=ext or '',
dir=d
)
os.close(fd)
with open(fp, 'wb') as f:
f.write(self.get_bytes())
return fp
def _clear_no_exist(self):
"""
Clear paths in temp stack that don't exist on the system.
"""
no_exist_paths = deque() # tmp list of paths to remove
for fp in self._temp_filepath_stack:
if not osp.isfile(fp):
no_exist_paths.append(fp)
for fp in no_exist_paths:
self._temp_filepath_stack.remove(fp)
def md5(self):
"""
Get the MD5 checksum of this element's binary content.
:return: MD5 hex checksum of the data content.
:rtype: str
"""
return hashlib.md5(self.get_bytes()).hexdigest()
def sha1(self):
"""
Get the SHA1 checksum of this element's binary content.
:return: SHA1 hex checksum of the data content.
:rtype: str
"""
return hashlib.sha1(self.get_bytes()).hexdigest()
def sha512(self):
"""
Get the SHA512 checksum of this element's binary content.
:return: SHA512 hex checksum of the data content.
:rtype: str
"""
return hashlib.sha512(self.get_bytes()).hexdigest()
def write_temp(self, temp_dir=None):
"""
Write this data's bytes to a temporary file on disk, returning the path
to the written file, whose extension is guessed based on this data's
content type.
It is not guaranteed that the returned file path does not point to the
original data, i.e. writing to the returned filepath may modify the
original data.
NOTE:
The file path returned should not be explicitly removed by the user.
Instead, the ``clean_temp()`` method should be called on this
object.
:param temp_dir: Optional directory to write temporary file in,
otherwise we use the platform default temporary files directory.
If this is an empty string, we count it the same as having provided
None.
:type temp_dir: None or str
:return: Path to the temporary file
:rtype: str
"""
# Write a new temp file if there aren't any in the stack, or if the none
# of the entries' base directory is the provided temp_dir (when one is
# provided).
# Clear out paths that don't exist.
self._clear_no_exist()
if temp_dir:
abs_temp_dir = osp.abspath(osp.expanduser(temp_dir))
# Check if dir is the base of any path in the current stack.
for tf in self._temp_filepath_stack:
if osp.dirname(tf) == abs_temp_dir:
return tf
# nothing in stack with given base directory, create new temp file
self._temp_filepath_stack.append(self._write_new_temp(temp_dir))
elif not self._temp_filepath_stack:
# write new temp file to platform specific temp directory
self._temp_filepath_stack.append(self._write_new_temp(None))
# return last written temp file.
return self._temp_filepath_stack[-1]
def clean_temp(self):
"""
Clean any temporary files created by this element. This does nothing if
no temporary files have been generated for this element yet.
"""
if len(self._temp_filepath_stack):
for fp in self._temp_filepath_stack:
if os.path.isfile(fp):
os.remove(fp)
self._temp_filepath_stack = []
def uuid(self):
"""
UUID for this data element.
This many take different forms from integers to strings to a uuid.UUID
instance. This must return a hashable data type.
By default, this ends up being the hex stringification of the SHA1 hash
of this data's bytes. Specific implementations may provide other UUIDs,
however.
:return: UUID value for this data element. This return value should be
hashable.
:rtype: collections.Hashable
"""
# TODO(paul.tunison): Change to SHA512.
return self.sha1()
def to_buffered_reader(self):
"""
Wrap this element's bytes in a ``io.BufferedReader`` instance for use as
file-like object for reading.
As we use the ``get_bytes`` function, this element's bytes must safely
fit in memory for this method to be usable.
:return: New BufferedReader instance
:rtype: io.BufferedReader
"""
return io.BufferedReader(io.BytesIO(self.get_bytes()))
def is_read_only(self):
"""
:return: If this element can only be read from.
:rtype: bool
"""
return not self.writable()
###
# Abstract methods
#
@abc.abstractmethod
def content_type(self):
"""
:return: Standard type/subtype string for this data element, or None if
the content type is unknown.
:rtype: str or None
"""
@abc.abstractmethod
def is_empty(self):
"""
Check if this element contains no bytes.
The intend of this method is to quickly check if there is any data
behind this element, ideally without having to read all/any of the
underlying data.
:return: If this element contains 0 bytes.
:rtype: bool
"""
@abc.abstractmethod
def get_bytes(self):
"""
:return: Get the bytes for this data element.
:rtype: bytes
"""
@abc.abstractmethod
def writable(self):
"""
:return: if this instance supports setting bytes.
:rtype: bool
"""
@abc.abstractmethod
def set_bytes(self, b):
"""
Set bytes to this data element.
Not all implementations may support setting bytes (check ``writable``
method return).
This base abstract method should be called by sub-class implementations
first. We check for mutability based on ``writable()`` method return and
invalidate checksum caches.
:param b: bytes to set.
:type b: str
:raises ReadOnlyError: This data element can only be read from / does
not support writing.
"""
if not self.writable():
raise ReadOnlyError("This %s element is read only." % self)
def get_data_element_impls(reload_modules=False):
"""
Discover and return discovered ``DataElement`` classes. Keys in the
returned map are the names of the discovered classes, and the paired values
are the actual class type objects.
We search for implementation classes in:
- modules next to this file this function is defined in (ones that begin
with an alphanumeric character),
- python modules listed in the environment variable
``DATA_ELEMENT_PATH``
- This variable should contain a sequence of python module
specifications, separated by the platform specific PATH separator
character (``;`` for Windows, ``:`` for unix)
Within a module we first look for a helper variable by the name
``DATA_ELEMENT_CLASS``, which can either be a single class object or
an iterable of class objects, to be specifically exported. If the variable
is set to None, we skip that module and do not import anything. If the
variable is not present, we look at attributes defined in that module for
classes that descend from the given base class type. If none of the above
are found, or if an exception occurs, the module is skipped.
:param reload_modules: Explicitly reload discovered modules from source.
:type reload_modules: bool
:return: Map of discovered class object of type ``DataElement``
whose keys are the string names of the classes.
:rtype: dict[str, type]
"""
this_dir = os.path.abspath(os.path.dirname(__file__))
env_var = "DATA_ELEMENT_PATH"
helper_var = "DATA_ELEMENT_CLASS"
return plugin.get_plugins(__name__, this_dir, env_var, helper_var,
DataElement, reload_modules=reload_modules)
def from_uri(uri, impl_generator=get_data_element_impls):
"""
Create a data element instance from available plugin implementations.
The first implementation that can resolve the URI is what is returned. If no
implementations can resolve the URL, an ``InvalidUriError`` is raised.
:param uri: URI to try to resolve into a DataElement instance.
:type uri: str
:param impl_generator: Function that returns a dictionary mapping
implementation type names to the class type. By default this refers to
the standard ``get_data_element_impls`` function, however this can be
changed to refer to a custom set of classes if desired.
:type impl_generator: () -> dict[str, type]
:raises smqtk.exceptions.InvalidUriError: No data element implementations
could resolve the given URI.
:return: New data element instance providing access to the data pointed to
by the input URI.
:rtype: DataElement
"""
log = logging.getLogger(__name__)
log.debug("Trying to parse URI: '%s'", uri)
#: :type: collections.Iterable[DataElement]
de_type_iter = six.itervalues(impl_generator())
inst = None
for de_type in de_type_iter:
try:
inst = de_type.from_uri(uri)
except NoUriResolutionError:
# Expected error signaling that DataElement implementation does not
# or cannot resolve from a URI.
pass
except InvalidUriError as ex:
log.debug("Implementation '%s' failed to parse URI: %s",
de_type.__name__, ex.reason)
if inst is not None:
break
if inst is None:
# TODO: Assume final fallback of FileElement?
# Since any string could be a file?
raise InvalidUriError(uri, "No available implementation to handle URI.")
return inst
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind = engine)
session = DBSession()
all_rest = session.query(Restaurant).all()
for y in all_rest:
print(y.name)
just_right = session.query(Restaurant).filter_by(name="Not Really A Test").delete()
session.commit()
|
def binarySearch(arr,num,l,r):
if(r-l>=1):
mid = (l+r)//2
if(num == arr[mid]):
return True
elif(num>arr[mid]):
return binarySearch(arr,num,mid+1,r)
else:
return binarySearch(arr,num,0,mid)
return -1
def fixedPoint(arr,l,r):
if(r-l>=1):
mid = (l+r)//2
if(arr[mid] == mid):
return mid
elif(arr[mid]<mid):
return fixedPoint(arr,mid+1,r)
else:
return fixedPoint(arr,0,mid)
return -1
#arr = [-10,1,0,3,10,11,30,50,100]
arr = [-10,5,0,5,8,2]
print(fixedPoint(arr,0,len(arr))) |
# DEFLATE.py
# Compresses files with a DEFLATE-ish algorithm. (Working towards compliance.)
# NOTE: output NUMBER of length/literal/etc values before outputting huffman trees
# NOTE: rework length/distance -> code to utilize the pattern ?
# NOTE: change algorithm to only use next_char for non-repeated letters and dist/length for repeats, instead of triples
import heapq as hq
import sys
import huff_functions as huff
import deflate_fns as defl
# -------------------------------------------------------
# Function that takes care of buffer for writing individual bits to file.
# NOTE: remember to flush buffer before closing
to_write = 0
bits_written = 0
def writebits(n):
print(n)
global to_write
global bits_written
if n == 0:
bits_written = bits_written + 1
else:
power = 1
while power * 2 <= n:
power = power * 2
while power >= 1:
if n - power >= 0:
bit = 1
n = n - power
else:
bit = 0
power = power / 2
bit_flicker = bit << (7-bits_written)
to_write = to_write | bit_flicker
bits_written = bits_written + 1
if bits_written == 8:
output.write(to_write.to_bytes(1, byteorder = "big"))
towrite = 0
bits_written = 0
if bits_written == 8:
output.write(to_write.to_bytes(1, byteorder = "big"))
towrite = 0
bits_written = 0
# -------------------------------------------------------
search_capacity = 32000
search_size = 0
lookahead_capacity = 258
lookahead_size = 0
chars_sent = 0 # Position of next character to send, relative to the start of the file. (Gives a consistent frame of reference for offsets.)
# Read arguments from command line to determine which file to decompress and where to
if len(sys.argv) == 3:
inputname = sys.argv[1]
outputname = sys.argv[2]
elif len(sys.argv) == 2:
inputname = sys.argv[1]
outputname = sys.argv[1] + "_deflated"
else:
print("Please provide at least one argument")
sys.exit()
# Setup for lookahead and search buffers, and the dictionary "search" (which contains the locations of all the three-length strings encountered)
text = open(inputname, "rb")
search_buffer = bytearray(search_capacity)
lookahead = bytearray(lookahead_capacity)
search = {}
# We use LZ77 algorithm to compute three lists: offsets, lengths and next_chars; will be compressed and sent in triples
offsets = []
lengths = []
next_chars = []
# Fill lookahead buffer with first [lookahead_capacity] chars
next_char = text.read(1)
while (lookahead_size != lookahead_capacity) and next_char:
lookahead[lookahead_size] = int.from_bytes(next_char, byteorder = "big")
lookahead_size = lookahead_size + 1
next_char = text.read(1)
print(lookahead)
# Main LZ77 loop
while not lookahead_size <= 0:
print("search: " + str(search))
offset = 0
length = 0
shift = 0
# If there are at least three bytes left, search for a match
if not lookahead_size <= 2:
# Get first three bytes as string for hashing
next_three = chr(lookahead[0]) + chr(lookahead[1]) + chr(lookahead[2])
if not next_three in search:
print("Sending as literal")
# Send next char as literal
offsets.append(0)
lengths.append(0)
next_chars.append(lookahead[0])
shift = 1
# String has not been encountered previously, so construct an entry in search with the index of this match
print("Adding " + next_three + " at index " + str(chars_sent))
search[next_three] = [chars_sent]
else:
print("Attempting to send " + next_three + " as match")
# print(str(search_buffer))
print(str(lookahead))
# Look through all matches for the longest recent one
# NOTE: Take care of case where only matches are >32000 back
length = 3
matches = search[next_three]
offset = chars_sent - matches[0]
for match in matches:
print("Examining match at " + str(match))
cur_length = 3
cur_offset = chars_sent - match
if not cur_offset >= 32000:
# Compare characters [cur_length] into lookahead and [cur_length]
# until 1) they don't match 2) we spill out of search buffer
# 3) we're matching entire lookahead buffer
while cur_offset > cur_length and search_buffer[len(search_buffer) - cur_offset + cur_length] == lookahead[cur_length] and not cur_length == lookahead_size - 1:
cur_length = cur_length + 1
# Then if 2) happened, compare with beginning of lookahead
if cur_offset <= cur_length:
print("Spilling over into lookahead buffer...")
while lookahead[cur_length - cur_offset] == lookahead[cur_length] and not cur_length == lookahead_size - 1:
cur_length = cur_length + 1
# If this is new longest match, store it in length/offset
if cur_length > length:
length = cur_length
offset = cur_offset
print("... which has offset " + str(cur_offset) + " and length " + str(cur_length))
offsets.append(offset)
lengths.append(length)
next_chars.append(lookahead[length])
shift = length + 1
# Add this index to the entry for next_string
# (At the beginning, so search will prioritize more recent matches)
print("Adding " + next_three + " to search at index " + str(chars_sent))
search[next_three].insert(0, chars_sent)
else:
# Less than three bytes left, so send as literal
offsets.append(0)
lengths.append(0)
next_chars.append(lookahead[0])
shift = 1
# Shift lookahead and search buffers, and add three-strings to search as we
# watch them go by
# Shift search buffer left by [shift] chars, and fill from lookahead
for i in range(0, len(search_buffer) - shift):
search_buffer[i] = search_buffer[i+shift]
for i in range(0, shift):
search_buffer[len(search_buffer) - shift + i] = lookahead[i]
# Increase size of search buffer if not already full
search_size = search_size + shift
if search_size >= search_capacity:
search_size = search_capacity
# Get and save three-strings up to the one that will be examined in next loop
for i in range(1, shift):
if i <= lookahead_size - 3:
next_three = chr(lookahead[i]) + chr(lookahead[i+1]) + chr(lookahead[i+2]);
print("Examining string " + next_three + " at index " + str(i))
if next_three in search:
search[next_three].insert(0, chars_sent + i)
else:
search[next_three] = [chars_sent + i]
else:
break
# Shift lookahead buffer left by [shift] chars, and fill from text
for i in range(0, lookahead_size - shift):
lookahead[i] = lookahead[i + shift]
lookahead_size = lookahead_size - shift
for i in range(0, shift):
if next_char:
lookahead[len(lookahead) - shift + i] = int.from_bytes(next_char, byteorder = "big")
lookahead_size = lookahead_size + 1
next_char = text.read(1)
else:
break
chars_sent = chars_sent + shift
# Write an end-of-block character (there will only be one of these right now since it's all in one block)
offsets.append(0)
lengths.append(0)
next_chars.append(256)
print(str(offsets))
print(str(lengths))
print(str(next_chars))
# Constructing huffman tree for lengths and literals
# First count frequencies of codes: 0-255 are literals, 256 is end of block, 257-285 represent lengths (some are ranges of lengths, with extra bits to be placed after symbol)
# Simultaneously, build list of length codes & list of extra bits to append after codes representing ranges of lengths
ll_frequencies = {}
length_codes = []
length_extrabits = []
for nc in next_chars:
if nc in ll_frequencies:
ll_frequencies[nc] = ll_frequencies[nc] + 1
else:
ll_frequencies[nc] = 1
for l in lengths:
code = -1
extrabits = -1
if l <= 10:
code = 254 + l
elif l == 11 or l == 12:
code = 265
extrabits = l - 11
elif l == 13 or l == 14:
code = 266
extrabits = l - 13
elif l == 15 or l == 16:
code = 267
extrabits = l - 15
elif l == 17 or l == 18:
code = 268
extrabits = l - 17
elif l >= 19 and l <= 22:
code = 269
extrabits = l - 19
elif l >= 23 and l <= 26:
code = 270
extrabits = l - 23
elif l >= 27 and l <= 30:
code = 271
extrabits = l - 27
elif l >= 31 and l <= 34:
code = 272
extrabits = l - 31
elif l >= 35 and l <= 42:
code = 273
extrabits = l - 35
elif l >= 43 and l <= 50:
code = 274
extrabits = l - 43
elif l >= 51 and l <= 58:
code = 275
extrabits = l - 51
elif l >= 59 and l <= 66:
code = 276
extrabits = l - 59
elif l >= 67 and l <= 82:
code = 277
extrabits = l - 67
elif l >= 83 and l <= 98:
code = 278
extrabits = l - 83
elif l >= 99 and l <= 114:
code = 279
extrabits = l - 99
elif l >= 115 and l <= 130:
code = 280
extrabits = l - 115
elif l >= 131 and l <= 162:
code = 281
extrabits = l - 131
elif l >= 163 and l <= 194:
code = 282
extrabits = l - 163
elif l >= 195 and l <= 226:
code = 283
extrabits = l - 195
elif l >= 227 and l <= 257:
code = 284
extrabits = l - 227
elif l == 258:
code = 285
length_codes.append(code)
length_extrabits.append(extrabits)
if code in ll_frequencies:
ll_frequencies[code] = ll_frequencies[code] + 1
else:
ll_frequencies[code] = 1
# Build generic huffman tree from frequencies
ll_tree = huff.buildhufftree_full(ll_frequencies)
# Get ordered list of code lengths to create canonical huffman code
ll_codelengths = huff.getcodelengths(ll_tree)
ll_codelengths_list = huff.lengthslist(range(0, 286), ll_codelengths)
ll_canonical = huff.makecanonical(range(0, 286), ll_codelengths_list)
print(ll_codelengths_list)
# Construct list of code length codes for canonical huffman tree for lengths/literals
ll_codes_plus_extrabits = defl.getcodelengthcodes(ll_codelengths_list)
ll_codelengthcodes = ll_codes_plus_extrabits[0]
ll_repeat_extrabits = ll_codes_plus_extrabits[1]
# Now repeat for distance alphabet
# First, collect distance codes, extra bits, and code frequencies.
dist_frequencies = {}
dist_codes = []
dist_extrabits = []
for dist in offsets:
code = -1
extrabits = -1
if dist == 1:
code = 0
elif dist == 2:
code = 1
elif dist == 3:
code = 2
elif dist == 4:
code = 3
elif dist == 5 or dist == 6:
code = 4
extrabits = dist - 5
elif dist == 7 or dist == 8:
code = 5
extrabits = dist - 7
elif dist >= 9 and dist <= 12:
code = 6
extrabits = dist - 9
elif dist >= 13 and dist <= 16:
code = 7
extrabits = dist - 13
elif dist >= 17 and dist <= 24:
code = 8
extrabits = dist - 17
elif dist >= 25 and dist <= 32:
code = 9
extrabits = dist - 25
elif dist >= 33 and dist <= 48:
code = 10
extrabits = dist - 33
elif dist >= 49 and dist <= 64:
code = 11
extrabits = dist - 49
elif dist >= 65 and dist <= 96:
code = 12
extrabits = dist - 65
elif dist >= 97 and dist <= 128:
code = 13
extrabits = dist - 97
elif dist >= 129 and dist <= 192:
code = 14
extrabits = dist - 129
elif dist >= 193 and dist <= 256:
code = 15
extrabits = dist - 193
elif dist >= 257 and dist <= 384:
code = 16
extrabits = dist - 257
elif dist >= 385 and dist <= 512:
code = 17
extrabits = dist - 385
elif dist >= 513 and dist <= 768:
code = 18
extrabits = dist - 513
elif dist >= 769 and dist <= 1024:
code = 19
extrabits = dist - 769
elif dist >= 1025 and dist <= 1536:
code = 20
extrabits = dist - 1025
elif dist >= 1537 and dist <= 2048:
code = 21
extrabits = dist - 1537
elif dist >= 2049 and dist <= 3072:
code = 22
extrabits = dist - 2049
elif dist >= 3073 and dist <= 4096:
code = 23
extrabits = dist - 3073
elif dist >= 4097 and dist <= 6144:
code = 24
extrabits = dist - 4097
elif dist >= 6145 and dist <= 8192:
code = 25
extrabits = dist - 6145
elif dist >= 8193 and dist <= 12288:
code = 26
extrabits = dist - 8193
elif dist >= 12289 and dist <= 16384:
code = 27
extrabits = dist - 12289
elif dist >= 16385 and dist <= 24576:
code = 28
extrabits = dist - 16385
elif dist >= 24577 and dist <= 32768:
code = 29
extrabits = dist - 24577
dist_codes.append(code)
dist_extrabits.append(extrabits)
if code in dist_frequencies:
dist_frequencies[code] = dist_frequencies[code] + 1
else:
dist_frequencies[code] = 1
# Build generic huffman tree from frequencies
dist_tree = huff.buildhufftree_full(dist_frequencies)
# Get ordered list of code lengths to create canonical huffman code
dist_codelengths = huff.getcodelengths(dist_tree)
dist_codelengths_list = huff.lengthslist(range(0, 30), dist_codelengths)
dist_canonical = huff.makecanonical(range(0, 30), dist_codelengths_list)
print(dist_codelengths_list)
# Construct list of code length codes for canonical huffman tree for distances
dist_codes_plus_extrabits = defl.getcodelengthcodes(dist_codelengths_list)
dist_codelengthcodes = dist_codes_plus_extrabits[0]
dist_repeat_extrabits = dist_codes_plus_extrabits[1]
# Compress ALL code length codes with ANOTHER canonical huffman code
# First collect frequencies from both ll and dist code length code lists
clc_frequencies = {}
for code in ll_codelengthcodes:
if code in clc_frequencies:
clc_frequencies[code] = clc_frequencies[code] + 1
else:
clc_frequencies[code] = 1
for code in dist_codelengthcodes:
if code in clc_frequencies:
clc_frequencies[code] = clc_frequencies[code] + 1
else:
clc_frequencies[code] = 1
clc_tree = huff.buildhufftree_full(clc_frequencies)
# Get ordered list of code lengths to create canonical huffman code
clc_codelengths = huff.getcodelengths(clc_tree)
clc_codelengths_list = huff.lengthslist(range(0, 19), clc_codelengths)
clc_canonical = huff.makecanonical(range(0, 19), clc_codelengths_list)
print(clc_canonical)
# Open output stream; towrite is a one-byte buffer, bits_written keeps track of how much of it is full
output = open(outputname, "wb")
# Currently we are putting all data in one dynamically compressed block
# So write BFINAL = 1 and BTYPE = 0b10 to the buffer, to signify that it is final and dynamically compressed
writebits(6)
# Output code lengths for clc tree in this weird order
for i in [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]:
writebits(clc_codelengths_list[i])
# Create list of all clcs, ll and dist together
codelengthcodes = ll_codelengthcodes + dist_codelengthcodes
all_extrabits = ll_repeat_extrabits + dist_repeat_extrabits
print(codelengthcodes)
print(all_extrabits)
# Then output clcs using canonical huffman code
extrabits_index = 0
for code in codelengthcodes:
writebits(clc_canonical[code])
if code >= 16:
writebits(all_extrabits[extrabits_index])
extrabits_index = extrabits_index + 1
# The decompressor can now construct the canonical huffman codes for code length codes, then use that to construct the canonical huffman codes for lengths/literals and distances. So data can actually be output now, taken from lists offsets, lengths, and next_chars and then encoded with the appropriate huffman code (extra bits added if necessary)
|
from jobs import *
from json import *
from optimizer import *
from plots import *
from statistics import *
|
L=[]
for n in range(1,10001):
num=0
for i in range(1,n):
if n%i==0:
num=num+i
if num==n:
L.append(n)
print(L)
|
from time import sleep
import requests
import serial
# Configuration
# USB port - Adruino USB connection to PC
usb_serial_port = "COM3"
# Website host address
host= "http://localhost/Water-Quality-Monitoring-System-Website/" # End url with a slash '/'
ser = serial.Serial(usb_serial_port,9600)
while True:
getVal = ser.readline()
val = str(getVal).replace("b'","").replace("\\r\\n'","")
arr = val.split(",")
print(arr)
# send to web server (php)
userdata = {"temperature": arr[0], "turbidity": arr[1], "ph": arr[2]}
resp = requests.post(host + "insert_data.php", params=userdata) |
# Generated by Django 2.0.5 on 2019-05-02 12:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calculation', '0048_auto_20180807_1139'),
]
operations = [
migrations.AlterUniqueTogether(
name='dish',
unique_together=set(),
),
]
|
from __future__ import unicode_literals
import re
from django.utils.six import text_type, string_types
from django.contrib.gis.db import models
from django.core import urlresolvers
from django.template.defaultfilters import slugify
from django.contrib.gis.geos import GEOSGeometry
from appconf import AppConf
from jsonfield import JSONField
class MyAppConf(AppConf):
# To override any of these settings, set BOUNDARIES_<setting name>
# in the main Django settings.
MAX_GEO_LIST_RESULTS = 350 # In a /boundary/shape query, if more than this
# number of resources are matched, throw an error
SHAPEFILES_DIR = './data/shapefiles'
SIMPLE_SHAPE_TOLERANCE = 0.0002
# The value for the Access-Control-Allow-Origin header
ALLOW_ORIGIN = '*'
# To enable the throttle, in the main Django settings.py, set
# BOUNDARIES_THROTTLE = 'boundaries.throttle.AnonRateThrottle'
THROTTLE = ''
# The HTTP header containing the IP the request is coming from.
# If you're behind a reverse proxy, you might want e.g.
# BOUNDARIES_THROTTLE_IP_HEADER = 'X_REAL_IP'
THROTTLE_IP_HEADER = 'REMOTE_ADDR'
# Rates are in the form (number of requests, number of seconds)
DEFAULT_THROTTLE_RATES = {
'anon': (90, 90) # Throttle after 90 requests in 90 seconds.
}
# Any IP addresses here won't be throttled
THROTTLE_IP_WHITELIST = set()
# If an API key in THROTTLE_APIKEY_LIST is provided,
# via the HEADER header or PARAM GET parameter,
# the request won't be throttled
THROTTLE_APIKEY_HEADER = 'X-Represent-Key'
THROTTLE_APIKEY_PARAM = 'key'
THROTTLE_APIKEY_LIST = set()
THROTTLE_LOG = False # On True, throws a warning whenever someone's throttled
app_settings = MyAppConf()
class BoundarySet(models.Model):
"""
A set of related boundaries, such as all Wards or Neighborhoods.
"""
slug = models.SlugField(max_length=200, primary_key=True, editable=False,
help_text="The name of this BoundarySet used in API URLs.")
name = models.CharField(max_length=100, unique=True,
help_text='Category of boundaries, e.g. "Community Areas".')
singular = models.CharField(max_length=100,
help_text='Name of a single boundary, e.g. "Community Area".')
authority = models.CharField(max_length=256,
help_text='The entity responsible for this data\'s accuracy, e.g. "City of Chicago".')
domain = models.CharField(max_length=256,
help_text='The area that this BoundarySet covers, e.g. "Chicago" or "Illinois".')
last_updated = models.DateField(
help_text='The last time this data was updated from its authority (but not necessarily the date it is current as of).')
source_url = models.URLField(blank=True,
help_text='The url this data was found at, if any.')
notes = models.TextField(blank=True,
help_text='Notes about loading this data, including any transformations that were applied to it.')
licence_url = models.URLField(blank=True,
help_text='The URL to the text of the licence this data is distributed under')
extent = JSONField(blank=True, null=True,
help_text='The bounding box of the boundaries in EPSG:4326 projection, as a list such as [xmin, ymin, xmax, ymax].')
extra = JSONField(blank=True, null=True,
help_text="Any other nonstandard metadata provided when creating this boundary set.")
class Meta:
ordering = ('name',)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
return super(BoundarySet, self).save(*args, **kwargs)
def __str__(self):
return self.name
__unicode__ = __str__
name_plural = property(lambda s: s.name)
name_singular = property(lambda s: s.singular)
api_fields = ('name_plural', 'name_singular', 'authority', 'domain', 'source_url', 'notes', 'licence_url', 'last_updated', 'extent', 'extra')
api_fields_doc_from = { 'name_plural': 'name', 'name_singular': 'singular' }
def as_dict(self):
r = {
'related': {
'boundaries_url': urlresolvers.reverse('boundaries_boundary_list', kwargs={'set_slug': self.slug}),
},
}
for f in self.api_fields:
r[f] = getattr(self, f)
if not isinstance(r[f], (string_types, int, list, tuple, dict)) and r[f] != None:
r[f] = text_type(r[f])
return r
@staticmethod
def get_dicts(sets):
return [
{
'url': urlresolvers.reverse('boundaries_set_detail', kwargs={'slug': s.slug}),
'related': {
'boundaries_url': urlresolvers.reverse('boundaries_boundary_list', kwargs={'set_slug': s.slug}),
},
'name': s.name,
'domain': s.domain,
} for s in sets
]
class Boundary(models.Model):
"""
A boundary object, such as a Ward or Neighborhood.
"""
set = models.ForeignKey(BoundarySet, related_name='boundaries',
help_text='Category of boundaries that this boundary belongs, e.g. "Community Areas".')
set_name = models.CharField(max_length=100,
help_text='Category of boundaries that this boundary belongs, e.g. "Community Areas".')
slug = models.SlugField(max_length=200, db_index=True,
help_text="The name of this BoundarySet used in API URLs.")
external_id = models.CharField(max_length=64,
help_text='The boundaries\' unique id in the source dataset, or a generated one.')
name = models.CharField(max_length=192, db_index=True,
help_text='The name of this boundary, e.g. "Austin".')
metadata = JSONField(blank=True,
help_text='The complete contents of the attribute table for this boundary from the source shapefile, structured as json.')
shape = models.MultiPolygonField(
help_text='The geometry of this boundary in EPSG:4326 projection.')
simple_shape = models.MultiPolygonField(
help_text='The geometry of this boundary in EPSG:4326 projection and simplified to %s tolerance.' % app_settings.SIMPLE_SHAPE_TOLERANCE)
centroid = models.PointField(
null=True,
help_text='The centroid (weighted center) of this boundary in EPSG:4326 projection.')
extent = JSONField(blank=True, null=True,
help_text='The bounding box of the boundary in EPSG:4326 projection, as a list such as [xmin, ymin, xmax, ymax].')
label_point = models.PointField(
blank=True, null=True, spatial_index=False,
help_text='The suggested location to label this boundary in EPSG:4326 projection. '
'Used by represent-maps, but not actually used within represent-boundaries.')
objects = models.GeoManager()
class Meta:
unique_together = (('slug', 'set'))
verbose_name_plural = 'Boundaries'
def save(self, *args, **kwargs):
return super(Boundary, self).save(*args, **kwargs)
def __str__(self):
return "%s (%s)" % (self.name, self.set_name)
__unicode__ = __str__
@models.permalink
def get_absolute_url(self):
return 'boundaries_boundary_detail', [], {'set_slug': self.set_id, 'slug': self.slug}
api_fields = ['boundary_set_name', 'name', 'metadata', 'external_id', 'extent', 'centroid']
api_fields_doc_from = { 'boundary_set_name': 'set_name' }
@property
def boundary_set(self):
return self.set.slug
@property
def boundary_set_name(self):
return self.set_name
def as_dict(self):
my_url = self.get_absolute_url()
r = {
'related': {
'boundary_set_url': urlresolvers.reverse('boundaries_set_detail', kwargs={'slug': self.set_id}),
'shape_url': my_url + 'shape',
'simple_shape_url': my_url + 'simple_shape',
'centroid_url': my_url + 'centroid',
'boundaries_url': urlresolvers.reverse('boundaries_boundary_list', kwargs={'set_slug': self.set_id}),
}
}
for f in self.api_fields:
r[f] = getattr(self, f)
if isinstance(r[f], GEOSGeometry):
r[f] = {
"type": "Point",
"coordinates": r[f].coords
}
if not isinstance(r[f], (string_types, int, list, tuple, dict)) and r[f] is not None:
r[f] = text_type(r[f])
return r
@staticmethod
def prepare_queryset_for_get_dicts(qs):
return qs.values_list('slug', 'set', 'name', 'set_name', 'external_id')
@staticmethod
def get_dicts(boundaries):
return [
{
'url': urlresolvers.reverse('boundaries_boundary_detail', kwargs={'slug': b[0], 'set_slug': b[1]}),
'name': b[2],
'related': {
'boundary_set_url': urlresolvers.reverse('boundaries_set_detail', kwargs={'slug': b[1]}),
},
'boundary_set_name': b[3],
'external_id': b[4],
} for b in boundaries
]
|
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, num_classes=10):
super(Net, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 32, 3) # 30
self.relu1 = nn.ReLU()
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, 3) # 28
self.relu2 = nn.ReLU()
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, 3) # 26
self.relu3 = nn.ReLU()
self.bn3 = nn.BatchNorm2d(128)
self.conv4 = nn.Conv2d(128, 32, 1) # 26
self.relu4 = nn.ReLU()
self.bn4 = nn.BatchNorm2d(32)
self.maxpool1 = nn.MaxPool2d(2) # 13
self.conv5 = nn.Conv2d(32, 64, 3) # 11
self.relu5 = nn.ReLU()
self.bn5 = nn.BatchNorm2d(64)
self.conv6 = nn.Conv2d(64, 128, 3) # 9
self.relu6 = nn.ReLU()
self.bn6 = nn.BatchNorm2d(128)
self.conv7 = nn.Conv2d(128, 32, 1) # 9
self.relu7 = nn.ReLU()
self.bn7 = nn.BatchNorm2d(32)
self.conv8 = nn.Conv2d(32, 64, 3) # 7
self.relu8 = nn.ReLU()
self.bn8 = nn.BatchNorm2d(64)
self.conv9 = nn.Conv2d(64, 128, 3) # 5
self.relu9 = nn.ReLU()
self.bn9 = nn.BatchNorm2d(128)
self.conv10 = nn.Conv2d(128, self.num_classes, 1) # 5
self.gap = nn.AdaptiveAvgPool2d(1) # 1
def forward(self, x):
x = self.bn1(self.relu1(self.conv1(x)))
x = self.bn2(self.relu2(self.conv2(x)))
x = self.bn3(self.relu3(self.conv3(x)))
x = self.bn4(self.relu4(self.conv4(x)))
x = self.maxpool1(x)
x = self.bn5(self.relu5(self.conv5(x)))
x = self.bn6(self.relu6(self.conv6(x)))
x = self.bn7(self.relu7(self.conv7(x)))
x = self.bn8(self.relu8(self.conv8(x)))
x = self.bn9(self.relu9(self.conv9(x)))
x = self.conv10(x)
x = self.gap(x)
x = x.view(-1, self.num_classes)
return x |
import jinja2
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment( loader=templateLoader )
TEMPLATE_FILE = "./template.jinja"
template = templateEnv.get_template( TEMPLATE_FILE )
templateVars = { "title" : "Test Example",
"description" : "A simple inquiry of function." }
outputText = template.render( templateVars )
print outputText
|
def LCs (x , y) :
global X,Y,dp
for i in range (x+1) :
for j in range (y+1) :
if (i == 0) or (j == 0) :
dp[i][j] = 0
else :
if (X[i-1] == Y[j-1]) :
dp[i][j] = 1 + dp[i-1][j-1]
else :
dp[i][j] = 0
return max ([max(i) for i in dp])
X = input()
Y = input()
x = X.__len__()
y = Y.__len__()
dp = [[0 for i in range (y+1)]for j in range(x+1)]
print(LCs(x,y))
|
import os
import dj_database_url
from .base import *
DEBUG = False
TEMPLATE_DEBUG = False
# SSL Settings
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# SECURE_SSL_REDIRECT = True
ADMINS = (
('Joao Figueiredo', 'joaonvfigueiredo@gmail.com'),
)
ALLOWED_HOSTS = [
'.joao-e-paola.xyz',
'ec2-52-30-42-82.eu-west-1.compute.amazonaws.com',
]
STATIC_ROOT = os.getenv('STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
|
#Red Neural Artificial Adaline
#Programado por: Pedro Bermeo
#Ing. Sistemas - IA2
'''
Ejemplo con valores:
| x1 | x2 | d |
-----------------
| 1 | 1 | -1 |
| 1 | -1 | 1 |
| -1 | 1 | -1 |
| -1 | -1 | -1 |
W = [0.2, 0.2]
θ = 0.2
α = 0.2
'''
#%matplotlib inline
import numpy as np
import matplotlib.pyplot as pp
class Adalyne():
def __init__(self):
self.Matriz = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
self.W = np.array([0.2, 0.2])
self.bias = 0.2
self.alfa = 0.2
self.delta = [-1, 1, -1, -1]
self.contI = 0
self.cont0 = 0
self.estado = False
self.sE2=[]
self.sumaE2=0
self.MatrizResutlados=[]
def calular(self):
while(self.estado == False):
self.contI+=1
self.cont = 0
for i in self.Matriz:
self.X = np.array(i)
self.y = np.sum(np.multiply(self.X, self.W)) + self.bias
''''
if (n < 0):
y = 0
else:
y = 1
'''
#print("Entradas", self.X)
#print("Salidas Deseada: ", self.delta[self.cont])
self.error = self.delta[self.cont] - self.y
#print("Salida Obtenida: ", self.y)
#print("Error", self.error)
self.delta1=[]
for j in self.X:
self.delta1.append(self.alfa*self.error*j)
self.delta1.append(self.alfa*self.error)
#print("Deltas",self.delta1)
#print("Pesos", self.W)
#print("Beta",self.bias)
self.error2=pow(self.error,2)
self.sumaE2+=self.error2
#print("Error 2",self.error2)
lista=[]
lista.append(self.X)
#print("lista",lista)
lista.append(self.delta[self.cont])
self.cont += 1
lista.append(self.y)
lista.append(self.error)
lista.append(self.delta1)
if (self.error != 0):
cont0=0
#print("Actualiza pesos")
self.W = (self.W + np.multiply(self.alfa* self.error, self.X))
#print("Nuevo W", self.W)
self.bias = self.bias + (self.alfa * self.error)
#print("Nuevo bias", self.bias)
lista.append(self.W)
lista.append(self.bias)
else:
lista.append(0)
lista.append(0)
lista.append(self.error2)
self.MatrizResutlados.append(lista)
if (self.cont == 4):
self.sE2.append(self.sumaE2)
self.imprimirMatriz()
#print("Resultados Iteracion",MatrizResutlados)
#print("Suma Errores2", self.sumaE2)
self.sumaE2 = 0
else:
cont0+=1
if(cont0==len(self.delta)):
print("Termino la Ejecucion. \n")
print("Pesos Finales",self.W)
print("Bias Final",self.bias)
self.estado=True
break
#print("******************************\n")
def imprimirMatriz(self):
print("***********************************************************************************************************************************************************************")
print("| Resultados Iteracion # ",self.contI,": |")
print("|x1| \t\t|x2| \t\t |d| \t\t\t|y| \t\t |e| \t\t\t|Δw1| \t\t\t |Δw2| \t\t |Δθ| \t\t\t |w1| \t\t |w2| \t\t |θ| \t\t |Error^2|")
for i in self.MatrizResutlados:
wt = i[0]
yt = str(i[2])
bt = i[4]
wtn = i[5]
print(wt[0], "\t\t\t", wt[1],"\t\t\t",i[1],"\t\t ",yt[:5],"\t\t",str(i[3])[:5],"\t\t ",str(bt[0])[:5],"\t\t\t",str(bt[1])[:5],
"\t\t\t",str(bt[2])[:5],"\t\t\t",str(wtn[0])[:5],"\t\t",str(wtn[1])[:5],"\t\t",str(i[6])[:5],"\t\t ",str(i[7])[:5])
print("|Suma Errores ^2:", self.sumaE2, '|')
if(len(self.sE2)>3):
contr=0
for i in self.sE2:
n1=str(i)[:5]
n2=str(self.sumaE2)[:5]
#print("n1",n1)
#print("n2", n1)
if n1 == n2:
contr+=1
if(contr ==3):
print("Termino la Ejecucion. \n")
print("Pesos Finales", self.W)
print("Bias Final", self.bias)
self.crearGrafica()
self.estado = True
break
print("***********************************************************************************************************************************************************************\n")
#print(i)
self.MatrizResutlados=[]
def crearGrafica(self):
x =list(range(1,len(self.sE2)+1))
pp.plot(x, self.sE2, color="teal", linewidth=2.5, linestyle="-")
pp.suptitle("Grafica de Error Total por Iteracion", color="teal")
a = "Total de Iteraciones",(len(self.sE2)+1)
pp.title(a, color='red')
pp.plot(x, self.sE2, 'ro')
#pp.plot(x, y, color="teal", linewidth=2.5, linestyle="-", label="a")
pp.xlabel("#Iteracion", color="sienna")
pp.ylabel("Sumatoria de Error", color="sienna")
pp.legend(loc='upper center')
pp.grid(True)
# pp.set_title('Grafica Final')
pp.savefig('grafico.png')
pp.show()
if __name__ == '__main__':
np.set_printoptions(precision=2, suppress=True)
ada = Adalyne()
ada.calular() |
'''
Created on Oct 1, 2018
@author: root
'''
import paho.mqtt.client as mqtt
class MQTT_Client:
def __init__(self):
return mqtt.Client()
def on_connect(self, userdata, flags, rc):
print('Connected with result code' + str(rc))
|
def collatz(number):
if number % 2 == 0:
number = number // 2
print(str(number))
else:
number = (3 * number) + 1
return number
def output():
print ('Please enter a number.')
value = int(input())
num = collatz(value)
while num != 1:
num = collatz(num)
print('The preceding numbers are the collatz sequence for ' + str(value))
print('Would you like to try another integer?')
print('Type y/n:')
answer = input()
while answer != 'y' and answer != 'n':
print('Type y/n:')
answer = input()
if answer == 'n':
print('Thanks for playing')
return answer
print('This program runs the collatz sequence on a given number.')
answer2 = output()
while(answer2 == 'y'):
answer2 = output()
|
from gtts import gTTS
import os
print("choose your language")
print("1: for Hindi")
print("2: for English")
print("3: for Spanish")
l=int(input("Enter your choice :"))
if l == 1:
tell = "hi"
text = open("hindi.txt","r").read().replace("\n","")
elif l == 2:
tell = "en"
text = open("english.txt","r").read().replace("\n","")
elif l == 3:
tell = "es"
text = open("spanish.txt","r").read().replace("\n","")
else:
print("Wrong input")
speech = gTTS(text = str(text), lang =tell, slow = False)
speech.save("voice.mp3")
os.system("play voice.mp3")
|
from sys import argv
i , u = argv
txt = open ( u )
print txt.read()
txt.close() |
# amaranth: UnusedElaboratable=no
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Helpers for clock domain crossings. """
import unittest
import warnings
from unittest import TestCase
from amaranth import Record, Module, Signal
from amaranth.lib.cdc import FFSynchronizer
from amaranth.lib.io import Pin
from amaranth.hdl.rec import DIR_FANIN, DIR_FANOUT
from ..test import LunaGatewareTestCase, sync_test_case
def synchronize(m, signal, *, output=None, o_domain='sync', stages=2):
""" Convenience function. Synchronizes a signal, or equivalent collection.
Parameters:
input -- The signal to be synchronized.
output -- The signal to output the result of the synchronization
to, or None to have one created for you.
domain -- The name of the domain to be synchronized to.
stages -- The depth (in FFs) of the synchronization chain.
Longer incurs more delay. Must be >= 2 to avoid metastability.
Returns:
record -- The post-synchronization signal. Will be equivalent to the
`output` record if provided, or a new, created signal otherwise.
"""
# Quick function to create a synchronizer with our domain and stages.
def create_synchronizer(signal, output):
return FFSynchronizer(signal, output, o_domain=o_domain, stages=stages)
if output is None:
if isinstance(signal, Signal):
output = Signal.like(signal)
else:
output = Record.like(signal)
# If the object knows how to synchronize itself, let it.
if hasattr(signal, '_synchronize_'):
signal._synchronize_(m, output, o_domain=o_domain, stages=stages)
return output
# Trivial case: if this element doesn't have a layout,
# we can just synchronize it directly.
if not hasattr(signal, 'layout'):
m.submodules += create_synchronizer(signal, output)
return output
# Otherwise, we'll need to make sure we only synchronize
# elements with non-output directions.
for name, layout, direction in signal.layout:
# If this is a record itself, we'll need to recurse.
if isinstance(signal[name], (Record, Pin)):
synchronize(m, signal[name], output=output[name],
o_domain=o_domain, stages=stages)
continue
# Skip any output elements, as they're already
# in our clock domain, and we don't want to drive them.
if (direction == DIR_FANOUT) or (hasattr(signal[name], 'o') and ~hasattr(signal[name], 'i')):
m.d.comb += signal[name].eq(output[name])
continue
m.submodules += create_synchronizer(signal[name], output[name])
return output
class SynchronizedTest(TestCase):
def test_signal(self):
m = Module()
synchronize(m, Signal())
def test_directional_record(self):
m = Module()
record = Record([
('sig_in', 1, DIR_FANIN),
('sig_out', 1, DIR_FANOUT)
])
synchronize(m, record)
def test_nested_record(self):
m = Module()
record = Record([
('sig_in', 1, DIR_FANIN),
('sig_out', 1, DIR_FANOUT),
('nested', [
('subsig_in', 1, DIR_FANIN),
('subsig_out', 1, DIR_FANOUT),
])
])
synchronize(m, record)
def stretch_strobe_signal(m, strobe, *, to_cycles, output=None, domain=None, allow_delay=False):
""" Stretches a given strobe to the given number of cycles.
Parameters:
strobe -- The strobe signal to stretch.
to_cycles -- The number of cycles to stretch the given strobe to. Must be >= 1.
output -- If provided, the given signal will be used as the output signal.
domain -- If provided, the given domain _object_ will be used in lieu of the sync domain.
Returns the output signal. If output is provided, this is the same signal; otherwise, it is the
signal that was created internally.
"""
# Assume the sync domain if no domain is provided.
if domain is None:
domain = m.d.sync
# If we're not given an output signal to target, create one.
if output is None:
output = Signal()
# Special case: if to_cycles is '1', we don't need to modify the strobe.
# Connect it through directly.
if to_cycles == 1:
m.d.comb += output.eq(strobe)
return output
# Create a signal that shifts in our strobe constantly, so we
# have a memory of its last N values.
if allow_delay:
delayed_strobe = Signal(to_cycles)
domain += delayed_strobe.eq((delayed_strobe << 1) | strobe)
m.d.comb += output.eq(delayed_strobe != 0)
else:
delayed_strobe = Signal(to_cycles - 1)
domain += delayed_strobe.eq((delayed_strobe << 1) | strobe)
m.d.comb += output.eq(strobe | (delayed_strobe != 0))
return output
class StrobeStretcherTest(LunaGatewareTestCase):
""" Test case for our strobe stretcher function. """
def instantiate_dut(self):
m = Module()
# Create a module that only has our stretched strobe signal.
m.strobe_in = Signal()
m.stretched_strobe = stretch_strobe_signal(m, m.strobe_in, to_cycles=2)
return m
def initialize_signals(self):
yield self.dut.strobe_in.eq(0)
@sync_test_case
def test_stretch(self):
# Ensure our stretched strobe stays 0 until it sees an input.
yield
self.assertEqual((yield self.dut.stretched_strobe), 0)
yield
self.assertEqual((yield self.dut.stretched_strobe), 0)
# Apply our strobe, and validate that we immediately see a '1'...
yield self.dut.strobe_in.eq(1)
yield
self.assertEqual((yield self.dut.stretched_strobe), 1)
# ... ensure that 1 lasts for a second cycle ...
yield self.dut.strobe_in.eq(0)
yield
self.assertEqual((yield self.dut.stretched_strobe), 1)
# ... and then returns to 0.
yield
self.assertEqual((yield self.dut.stretched_strobe), 0)
yield
self.assertEqual((yield self.dut.stretched_strobe), 0)
if __name__ == "__main__":
warnings.filterwarnings("error")
unittest.main()
|
import fourier
import pcf
import ioutils as io
from mathutils import *
import setup
#============================================================================
LOGS_DIR = "../fig8b-bnot/"
TARGET_DIR = "../targets/"
FILE_EXT = ".pdf"
#============================================================================
def buildEnvironment():
print("Building environment...")
trainingSetup = setup.TrainingSetup(
# input
pointCount = 1024,
dimCount = 2,
batchSize = 2,
griddingDims = 0,
# architecture
convCount = 60,
kernelCount = 20,
kernelSampleCount = 64,
receptiveField = 0.5,
projectionsStrings = [ '01' ],
customOp = True,
# training
trainIterations = 100000,
learningRate = 10e-7,
# evaluation
displayGrid = False,
evalRealizations = 1000,
saveEvalRealizations = True,
# IO
storeNetwork = True,
backupInterval = 5000,
weightDir = None
)
histogramSetupList = []
fourierSetupList = []
fourierSetup0 = setup.FourierSetup(
resolution=64,
cancelDC=True,
mcSamplesPerShell=48)
fourierSetup0.loadTarget1D(io.joinPath(TARGET_DIR, "spectra/bnot-powspec-radialmean-d2-n1024.txt"))
fourierSetupList.append(fourierSetup0)
return setup.Environment(trainingSetup, fourierSetupList, histogramSetupList)
#============================================================================
def lossSetup(env, outputNode):
histogramNode = None
spectrumNode = fourier.radialSpectrumMC(outputNode, env.fourierSetupList[0])
lossNode = l1Loss(spectrumNode, env.fourierSetupList[0].target)
return lossNode, spectrumNode, histogramNode
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.