text stringlengths 8 6.05M |
|---|
#prime number
num=int(input("enter number:"))
for i in range(2,num//2+1):
if num%i==0:
print("Not prime")
break
else:
print("Prime")
|
#!/usr/bin/env python
import os
import sys
import Queue
import socket
import threading
import dnslib
from time import sleep
from abstractbackend import abstract_backend
QRTYPE = {
'A': 1, # a host address
'NS': 2, # an authoritative name server
'MD': 3, # a mail destination (Obsolete - use MX)
'MF': 4, # a mail forwarder (Obsolete - use MX)
'CNAME': 5, # the canonical name for an alias
'SOA': 6, # marks the start of a zone of authority
'MB': 7, # a mailbox domain name (EXPERIMENTAL)
'MG': 8, # a mail group member (EXPERIMENTAL)
'MR': 9, # a mail rename domain name (EXPERIMENTAL)
'NULL': 10, # a null RR (EXPERIMENTAL)
'WKS': 11, # a well known service description
'PTR': 12, # a domain name pointer
'HINFO': 13, # host information
'MINFO': 14, # mailbox or mail list information
'MX': 15, # mail exchange
'TXT': 16, # text strings
'AXFR': 252, # A request for a transfer of an entire zone
'MAILB': 253, # A request for mailbox-related records (MB, MG or MR)
'MAILA': 254, # A request for mail agent RRs (Obsolete - see MX)
'*': 255, # A request for all records
}
class udp_worker(threading.Thread):
def __init__(self, dispatcher, queue, s, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group=None, target=None, name=None, args=(), kwargs={})
self.d = dispatcher
self.q = queue
self.s = s
def run(self):
while not self.d.shutting_down:
try:
data, addr = self.q.get(True, 1)
except Queue.Empty:
# This is to ensure we won't block forever
continue
try:
self.process(data, addr)
except:
# some error handling would be great
raise
finally:
# remove task anyway
self.q.task_done()
def process(self, data, addr):
try:
d = dnslib.DNSRecord.parse(data)
except:
# could not properly parse, so just fail
print "could not parse request"
return
questions = list()
for q in d.questions:
questions.append( (q.qtype, q.qname, q.qclass) )
try:
err, aa, answer, authority, additional = self.d.backend.get_result(questions)
except:
err = 2
aa = 0
answer = list()
authority = list()
additional = list()
print "error getting result from backend"
a = self.build_answer(d, err, aa, answer, authority, additional)
self.respond(addr, a)
def build_answer(self, d, err, aa, answer, authority, additional):
# Construct answer
reply = dnslib.DNSRecord(dnslib.DNSHeader(qr=1, aa=aa, rd=0, rcode=err, id=d.header.id))
reply.questions = d.questions
# answer section
for rr in answer:
print rr
reply.add_answer(dnslib.RR(
rtype=rr['type'],
rclass=rr['class'],
ttl=rr['ttl'],
rname=rr['name'],
rdata=dnslib.RDMAP[dnslib.QTYPE[rr['type']]](rr['rdata'])
))
# authority section
for rr in authority:
reply.add_answer(dnslib.RR(
rtype=rr['type'],
rclass=rr['class'],
ttl=rr['ttl'],
rdata=rr['rdata']
))
# additional section
for rr in additional:
reply.add_answer(dnslib.RR(
rtype=QRTYPE[rr['type']],
rclass=QRCLASS[rr['class']],
ttl=rr['ttl'],
rdata=rr['rdata']
))
return reply.pack()
def respond(self, addr, data):
self.s.sendto(data, addr)
class udp_dispatcher(threading.Thread):
def __init__(self, queue, s, backend, min_threads=1, max_threads=0, start_threads=5, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group=None, target=None, name=None, args=(), kwargs={})
self.daemon = True
self.q = queue
self.max_threads = max_threads
self.min_threads = min_threads
self.start_threads = start_threads
self.workers = list()
self.worker_class = udp_worker
self.backend = backend
self.shutting_down = False
self.down = False
self.s = s
for i in range(start_threads): self.start_worker()
def start_worker(self):
t = self.worker_class(self, self.q, self.s, self.backend)
self.workers.append(t)
t.start()
def shutdown(self):
self.shutting_down = True
for w in self.workers:
w.join()
self.down = True
def run(self):
while not self.down: sleep(1)
if __name__=="__main__":
q = Queue.Queue()
host, port = '', 53
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
ud = udp_dispatcher(q, s, abstract_backend())
ud.start()
while True:
try:
# we'll read datagram right in the main loop,
# hopefully it's fast enough, but have to test later
# DNS Datagram can not be over 512 bytes
data, addr = s.recvfrom(512)
# put datagram to queue
q.put((data, addr))
except KeyboardInterrupt:
print "Calling dispatcher shutdown"
ud.shutdown()
print "Waiting for queue to be processed"
q.join()
print "Bye-bye!"
sys.exit(0)
|
from tkinter import Toplevel, Text, Button, Label, N, S, E, W
def confirm_pb (menu, timer):
if menu._layer1: # kind of a redundant check bc if menu is not None, then layer1 needs to be false anyway, but whatever
menu._root.bell()
return
menu._layer1 = True
##########
# other TODO: can use a MessageBox instead - is a lot easier bc it's built into Tkinter
# and huh - there's other dialogue box features available with Toplevel that let you have a little more control
# over what can/can't be interacted with while the dialog is open https://tkdocs.com/tutorial/windows.html
# tbh I'm fairly comfortable with what I currently have
# the nice thing about these is they might be able to give a little more native-looking/OS specific dialog boxes
#
########## need to add a cancel option - wouldn't need to change much: just do a destroy() without the finish_reset() part
# bc the first part of reset() that calls confirm() doesn't change any instance variables of the timer
# actually that's a lie: we'd need to change both _pbed and _exists_best to False (just in case)
def destroy ():
menu._layer1 = False
window.destroy()
#print(timer._pbed)
def y ():
destroy()
# what a hack lol
timer.finish_reset(menu) # assume if you want to save pb, you also want to set bests; note that here, menu isn't None for sure
# timer.mid_reset(menu)
def n ():
timer._pbed = False
# timer._exist_bests = False
destroy()
# timer.finish_reset()
timer.mid_reset(menu) # if you don't want to save pb, that has no bearing on setting bests from that run
window = Toplevel(menu._root)
window.title('Congratulations!')
window.protocol('WM_DELETE_WINDOW', n)
txt = Label(window) # justify 'center' or anchor 'e' or grid.sticky??
txt.configure(text = "You've achieved a PB!\nWould you like to save these times over your previous PB?")
yes = Button(window, text = 'Yes', command = y)
no = Button(window, text = 'No', command = n)
txt.grid(row = 0, column = 0, columnspan = 2)
no.grid(row = 1, column = 0, sticky = E)
yes.grid(row = 1, column = 1, sticky = W)
window.columnconfigure(0, weight = 1)
window.columnconfigure(1, weight = 1)
window.rowconfigure(0, weight = 1)
window.rowconfigure(1, weight = 1)
def confirm_best (menu, timer):
if menu._layer1: # again kind of a redundant check bc if menu is not None, then layer1 needs to be false anyway, but whatever
menu._root.bell()
return
menu._layer1 = True
def destroy ():
menu._layer1 = False
window.destroy()
def y ():
destroy()
# timer.finish_reset(lambda : File_settings(menu).save_run(True))
timer.finish_reset(menu) # do want to save for sure
def n ():
timer._exist_bests = False
destroy()
# timer.finish_reset(None if not timer._pbed else (lambda : File_settings(menu).save_run(True)))
# don't want to save, and it's logically impossible for pbed to be True but exist_bests to be False in this n() function
timer.finish_reset()
window = Toplevel(menu._root)
window.title('Congratulations!')
window.protocol('WM_DELETE_WINDOW', n)
txt = Label(window) # justify 'center' or anchor 'e' or grid.sticky??
txt.configure(text = "You've achieved one or more best splits!\nWould you like to save these times over your previous best times?")
yes = Button(window, text = 'Yes', command = y)
no = Button(window, text = 'No', command = n)
txt.grid(row = 0, column = 0, columnspan = 2)
no.grid(row = 1, column = 0, sticky = E)
yes.grid(row = 1, column = 1, sticky = W)
window.columnconfigure(0, weight = 1)
window.columnconfigure(1, weight = 1)
window.rowconfigure(0, weight = 1)
window.rowconfigure(1, weight = 1) |
#!/bin/env python2
#import multiprocessing
bind = "127.0.0.1:8000"
#workers = multiprocessing.cpu_count() * 2 + 1
workers = 30
worker_class ='egg:gunicorn#gevent'
graceful_timeout = 3000
user = "admin"
group = "admin"
daemon = True
timeout = 30
keepalive = 5
limit_request_line = 4094
max_requests = 102400
worker_connections = 2000
syslog =True
preload_app = True
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spwawning workers")
def on_reload(server):
pass
|
import io
import os
pid = os.fork()
try:
f = io.open('my_data', 'x')
except IOError:
print("Failed to create file; I'm the slave")
f = io.open('my_data', 'r')
while True:
s = f.read()
if s:
print("Received", s)
break
else:
print("Created file; I'm the master!")
f.write('hello')
|
from django.contrib import messages
from django.db.models import Q
from django.shortcuts import get_object_or_404, render
from django.views.generic import ListView, DetailView
from django.views.generic.edit import FormMixin
from books.models import Book, Category
from cart.forms import CartAddForm
class BookListView(ListView):
"""این متد لیست نمام کتاب ها را نشان میدهد"""
paginate_by = 5
model = Book
template_name = 'books/book_list.html'
def get_context_data(self, **kwargs):
context = super(BookListView, self).get_context_data(**kwargs)
context['book_list'] = Book.objects.all()
print(context)
return context
class CategoryListView(ListView):
model = Category
template_name = 'books/home.html'
def get_context_data(self, **kwargs):
context = super(CategoryListView, self).get_context_data(**kwargs)
context['cat_list'] = Category.objects.all()
return context
class SearchResultsListView(ListView):
"""این کلاس مربوط به سرج کردن کناب و نویسنده میباشد"""
model = Book
context_object_name = 'book_list'
template_name = 'books/search_results.html'
def get_queryset(self):
query = self.request.GET.get('q')
search = Book.objects.filter(
Q(title__icontains=query) | Q(author__icontains=query)
)
if search:
return search
else:
message = messages.info(self.request, 'نتیجه یافت نشد')
return message
class BookDetailView(FormMixin, DetailView):
"""جزییات هر کتاب را نشان میدهد"""
model = Book
template_name = 'books/book_detail.html'
form_class = CartAddForm
def get_context_data(self, **kwargs):
context = super(BookDetailView, self).get_context_data(**kwargs)
context['form'] = CartAddForm(initial={'book': self.object})
return context
def category_menu(request):
category = request.GET.get('category')
if category is None:
books = Book.objects.all().order_by('-sold')[:6]
else:
books = Book.objects.filter(category__title=category)
categories = Category.objects.all()
context = {
'books': books,
'categories': categories
}
return render(request, 'books/home.html', context)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 11:34:57 2014
@author: atproofer - mbocamazo
"""
# you do not have to use these particular modules, but they may help
from random import randint
from math import * ## SS: I added this line - don't forget to add the math library dependency!
import Image
#need to include prod(a,b)=ab, cos_pi(a)=cos(pi*a), sin '', x(a,b)=a, y '' + 2 others
#avg, diff/2, sigmoid appropriate
full_list = ['x','y','prod','cos_pi','sin_pi','avg','half_diff','sigmoid']
unary_list = ['cos_pi','sin_pi','sigmoid']
binary_list = ['prod','avg','half_diff']
func_list = unary_list+binary_list
terminal_list = ['x','y']
def rand_selection(x):
"""Takes in list. Returns random element"""
return x[randint(0,len(x)-1)]
#The following two functions were explorations/visualizations of how to implement recursion
# and were not used
def unlimited_build():
z = rand_selection(full_list)
if z in terminal_list:
return [z]
if z in unary_list:
return [z,unlimited_build]
if z in binary_list:
return [z,unlimited_build,unlimited_build]
def get_arg(case):
if case==0:
return [rand_selection(terminal_list)]
if case==1:
return [rand_selection(func_list)]
if case==2:
return [rand_selection(full_list)]
def build_random_function(min_depth, max_depth):
"""Generates a random function based on the global function list and
specified min and max depths. min_depth ought to be greater than max_depth."""
#Depth numbering convention based on example provided in hw description
#if depth is equal to maximum, take from the terminal list
if max_depth==1:
return [rand_selection(terminal_list)]
#if depth is less than minimum, take from the func list
if min_depth>1:
z = rand_selection(func_list)
if z in unary_list:
return [z,build_random_function(min_depth-1,max_depth-1)]
if z in binary_list:
return [z,build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]
#if depth is less than maximum and greater or equal to minimum, take from the full list
if min_depth<=1 and max_depth>1:
z = rand_selection(full_list)
if z in terminal_list:
return [z]
if z in unary_list:
return [z,build_random_function(min_depth-1,max_depth-1)]
if z in binary_list:
return [z,build_random_function(min_depth-1,max_depth-1),build_random_function(min_depth-1,max_depth-1)]
## SS: Passed my tests :)
## SS: for this function, you might make use of 'elif' statements, even though the functionailty
## is the same, stylistically, it's preferable
def evaluate_random_function(f, x, y):
"""Takes as input a function constructed by nested lists, x, and y and returns
a value between 0 and 1 that evaluates it."""
#print f[0] to check function as it is evaluated
if f[0]=='x':
return x
if f[0]=='y':
return y
if f[0]=='prod':
return evaluate_random_function(f[1], x, y)*evaluate_random_function(f[2], x, y)
if f[0]=='cos_pi':
return cos(pi*evaluate_random_function(f[1], x, y))
if f[0]=='sin_pi':
return sin(pi*evaluate_random_function(f[1], x, y))
if f[0]=='avg':
return (evaluate_random_function(f[1], x, y)+evaluate_random_function(f[2], x, y))/2
if f[0]=='half_diff':
return (evaluate_random_function(f[1], x, y)-evaluate_random_function(f[2], x, y))/2
if f[0]=='sigmoid':
return 1.0/(1.0+exp(-evaluate_random_function(f[1], x, y)))
print 'fail to identify' #if the argument isn't caught
print f[0]
#unit test: ['prod', ['prod', ['x'], ['y']], ['prod', ['x'], ['y']]] should return (xy)^2
## SS: Passed my tests :)
## SS: maybe use some more descriptive variable names here
def remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):
""" Maps the input value that is in the interval [input_interval_start, input_interval_end]
to the output interval [output_interval_start, output_interval_end]. The mapping
is an affine one (i.e. output = input*c + b).
Rescales and shifts the input value.
"""
c = float(output_interval_end-output_interval_start)/(input_interval_end-input_interval_start)
b = output_interval_start
return c*(val-input_interval_start)+b
def generate_image():
"""Generates an image using random functions methods. Saves output in folder."""
<<<<<<< HEAD
for k in range(10):
xsize = 350
ysize = 350
im = Image.new("RGB",(xsize,ysize))
pix = im.load()
min_args = 12
max_args = 18
funcR=build_random_function(min_args,max_args)
funcG=build_random_function(min_args,max_args)
funcB=build_random_function(min_args,max_args)
for i in range(xsize-1):
xcoord = remap_interval(i,0,xsize,-1,1)
for j in range(ysize-1):
ycoord = remap_interval(j,0,ysize,-1,1)
r=evaluate_random_function(funcR,xcoord,ycoord)
g=evaluate_random_function(funcG,xcoord,ycoord)
b=evaluate_random_function(funcB,xcoord,ycoord)
mappedR = int(remap_interval(r,-1,1,0,255))
mappedG = int(remap_interval(g,-1,1,0,255))
mappedB = int(remap_interval(b,-1,1,0,255))
pix[i,j]=(mappedR,mappedG,mappedB)
name = 'ex4'+str(k)+'.png'
im.save(name)
=======
xsize = 350
ysize = 350
im = Image.new("RGB",(xsize,ysize))
pix = im.load()
min_args = 4
max_args = 5
funcR=build_random_function(min_args,max_args)
funcG=build_random_function(min_args,max_args)
funcB=build_random_function(min_args,max_args)
for i in range(xsize-1):
xcoord = remap_interval(i,0,xsize,-1,1)
for j in range(ysize-1):
ycoord = remap_interval(j,0,ysize,-1,1)
r=evaluate_random_function(funcR,xcoord,ycoord)
g=evaluate_random_function(funcG,xcoord,ycoord)
b=evaluate_random_function(funcB,xcoord,ycoord)
mappedR = int(remap_interval(r,-1,1,0,255))
mappedG = int(remap_interval(g,-1,1,0,255))
mappedB = int(remap_interval(b,-1,1,0,255))
pix[i,j]=(mappedR,mappedG,mappedB)
im.save('ex7.png')
generate_image()
## SS: Hey, great job! This is flawless in functionality (or so I can tell), and you have good
## documentation, which I really appreciate.
>>>>>>> 41504e1f25df6080449c122c4927de2d04be3089
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
from django.http import FileResponse
import sys
sys.path.append('./DB')
from login import *
from profile import *
def audio(request):
audio = open('./Sound/test.wav','rb')
return HttpResponse(audio, content_type = 'audio/mpeg')
def image(request):
image = open('./trips/test.png','rb')
return FileResponse(image)
def username(request):
return JsonResponse({"title": "isLoginSuccess","user_id":"Allen"})
def getusername(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
print (user_id)
user_name = getUserName(user_id)
print (user_name)
return JsonResponse({"title":"getUserName","name":user_name})
def setuservoice(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
#undone
def getuservoice(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
print (user_id)
filename = getUserVoice(user_id)
print (filename)
audio = open(filename,'rb')
return HttpResponse(audio, content_type = 'audio/mpeg')
def setuserphoto(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
#undone
def testp():
print (getUserPhoto('1'))
def getuserphoto(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
print (info)
filename = getUserPhoto(user_id)
print (filename)
image = open(filename,'rb')
return FileResponse(image)
def isloginsuccess(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
password = info[3]
print (user_id,password)
result = isLoginSuccess(user_id, password)
print (result)
if result == True:
return JsonResponse({"title": "isLoginSuccess","result":"True"})
else:
return JsonResponse({"title": "isLoginSuccess","result":"False"})
# Create your views here.
def setuseraccount(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
password = info[3]
print (user_id,password)
result = setUserAccount(user_id,password)
print (result)
if result == True:
return JsonResponse({"title": "setUserAccount","result":"True"})
else:
return JsonResponse({"title": "setUserAccount","result":"False"})
def changepassword(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
new_password = info[3]
print (user_id,new_password)
result = changePassword(user_id,new_password)
print (result)
if result == True:
return JsonResponse({"title": "changePassword","result":"True"})
else:
return JsonResponse({"title": "changePassword","result":"False"})
# def getuserinformation(request):
# info = request.META['PATH_INFO'].strip().split('/')
# print (info)
# user_id = info[0]
# name,photo,voice = getUserInformation(user_id)
# return JsonResponse({"title": "getUserInformation","name":name,"photo":photo,"voice":voice})
def setuserphoto(request):
info = request.META['PATH_INFO'].strip().split('/')
user_id = info[2]
print (user_id)
result = setUserPhoto(user_id,test_photo)
print (result)
if result == "True":
return JsonResponse({"title":'setUserPhoto',"result":"True"})
else:
return JsonResponse({"title":'setUserPhoto',"result":"False"})
def test_setuserphoto(request):
a = open('./trips/test.png','rb')
l = 0
f = ""
for line in a:
if l == 0:
f = line
l = 1
else:
f += line
setUserPhoto('a',f)
return JsonResponse({"title":'setUserPhoto',"result":"True"})
def test_setuservoice(request):
a = open('./trips/test.wav','rb')
l = 0
f = ""
for line in a:
if l == 0:
f = line
l = 1
else:
f += line
setUserVoice('a',f)
return JsonResponse({"title":'setUserVoice',"result":"True"})
def testing():
print (setUserPhoto("b","c"))
def setuservoice(request):
info = request.META['PATH_INFO'].split('/')
user_id = info[0]
sound = info[1]
print (user_id,sound)
result = setUserVoice(user_id,photo)
print (result)
if result == "True":
return JsonResponse({"title":'setUserVoice',"result":"True"})
else:
return JsonResponse({"title":'setUserVoice',"result":"False"})
# name (string), photo (Binary string), sound (Binary string)
# True/False (Bool)
# True/False (Bool)
def getNearbyPin(request):
info = request.META['PATH_INFO'].split('/')
latitude = info[0]
longitude = info[1]
def getNearbyPinByTag(request):
info = request.META['PATH_INFO'].split('/')
latitude = info[0]
longitude = info[1]
tag = info[2]
def getSoundBysound_id(request):
info = request.META['PATH_INFO'].split('/')
sound_id = info[0]
def storeSound(request):
info = request.META['PATH_INFO'].split('/')
latitude = info[0]
longitude = info[1]
user_id = info[2]
title = info[3]
description = info[4]
date = info[5]
tag = info[6]
def deleteSound(request):
info = request.META['PATH_INFO'].split('/')
sound_id = info[0]
def changeSoundInformation(request):
info = request.META['PATH_INFO'].split('/')
title = info[0]
description = info[1]
tag = info[2]
# getNearbyPin latitude longitude
# getNearbyPinByTag latitude longitude tag
# getSoundBysound_id sound_id
# storeSound latitude longitude user_id sound title description date tag
# deleteSound sound_id
# changeSoundInformation title description tag
|
"""algumas funções possíveis com strings"""
A = "lucas "
B = "LIMA"
############################
""" concatenar (+) strings """
juntar = A + B
print(juntar, "\n")
""" len() -> numero de instens no (objeto) """
tamanho = len(R)
print(tamanho, "\n")
""" exebir posição de um caracter da string """
#funciona tipo como um vetor
print(A[0], B[2])
print(juntar[0:5], "\n") # : significa até
""" lower() -> passar tudo pra caixa baixa """
""" upper() -> passar tudo pra caixa alta """
A = A.upper()
B = B.lower()
print(A, B)
# posso simplesmente usar dentro do print sem alterar minha variável
""" strip() -> remove caracteres especiais NO COMEÇO e NO FIM"""
C = "\nLucas "
print(len(C), C.strip(), len(C.strip()))
""" split() -> converte em uma lista """
# é realizado pelo caracter " " (espaço)
D = A + B
print(D.split())
#pode-se fazer a separação de outras
#isso é feito passando parâmetros a função
E = "O rato roeu a roupa do rei de roma"
print(E.split("r"), "\n")
" find() -> encontrar letra/ termo em uma string"
print(E.find("rei"))
#exemplo de utilidade
busca = E.find("rei")
print(E[busca: ], "\n")
# se a função find() não encontrar o paramentro, ele retora -1
""" replace() -> busca um valor e troca por outro """
#só funciona trocando valores do tipo STRING
F = E.replace("o rei", "a rainha") # o que vou trocar, valor a ser trocado
print(F)
print(E.replace("O rato", "A ratasana"), "\n") |
from onegov.ballot import ComplexVote
from onegov.ballot import Election
from onegov.ballot import ElectionCompound
from onegov.ballot import ElectionCompoundPart
from onegov.ballot import ProporzElection
from onegov.ballot import Vote
from onegov.core.utils import Bunch
from onegov.election_day.layouts.detail import DetailLayout
from onegov.election_day.models import Principal
from tests.onegov.election_day.common import DummyRequest
from textwrap import dedent
def test_hidden_tabs_mixin():
principal = Principal.from_yaml(dedent("""
name: Kanton St. Gallen
canton: sg
hidden_elements:
tabs:
elections:
- party-panachage
elections-part:
- party-strengths
election:
- statistics
vote:
- districts
"""))
request = DummyRequest()
request.app.principal = principal
for model, tab in (
(Vote(), 'districts'),
(ComplexVote(), 'districts'),
(Election(), 'statistics'),
(ProporzElection(), 'statistics'),
(ElectionCompound(), 'party-panachage'),
(ElectionCompoundPart(Bunch(id='1', date=None), 'x', 'y'),
'party-strengths'),
):
layout = DetailLayout(model, request)
assert layout.hide_tab(tab) is True
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 30 21:17:26 2021
@author: arthur
"""
#解題關鍵 Backtracking
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
stack = [[]]
for i in nums:
new_stack = []
for j in stack:
print('j:{}'.format(j))
new_stack.append(j)
if j:
new_stack.append(j + [i])
else:
new_stack.append([i])
stack = new_stack
return stack |
from psycopg2 import Error, connect
def create_connection():
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
try:
with open('connection_string', 'rt') as f:
connection_string = f.read()
conn = connect(connection_string)
print("Conn : ", conn)
return conn
except Error as e:
print(e)
return None
def login(umail):
""" Requête permettant l'identification d'un utilisateur. On vient comparer le PW de la db avec
celui qu'il a saisi.
:param user_mail: mail saisi dans le formulaire.
:return data: mot de passe associé au mail dans la db
"""
with create_connection() as conn:
cur = conn.cursor()
try:
print
cur.execute("SELECT * FROM utilisateur WHERE umail = %s", (umail,))
except Error as e:
print("login_query : ", e)
return None
data = cur.fetchall()[0]
user = {'user_id': data[0], 'lastname': data[1], 'firstname': data[2],
'mail': data[3], 'password': data[4]}
print(user)
return user
def get_user_by_id(user_id):
""" Requête permettant l'identification d'un utilisateur. On vient comparer le PW de la db avec
celui qu'il a saisi.
:param user_mail: mail saisi dans le formulaire.
:return data: mot de passe associé au mail dans la db
"""
with create_connection() as conn:
cur = conn.cursor()
try:
print
cur.execute(
"SELECT * FROM utilisateur WHERE user_id = %s", (user_id,))
except Error as e:
print("login_query : ", e)
return None
data = cur.fetchall()[0]
user = {'user_id': data[0], 'lastname': data[1], 'firstname': data[2],
'mail': data[3], 'password': data[4]}
print(user)
return user
def get_client_by_id(client_id):
with create_connection() as conn:
cur = conn.cursor()
try:
print
cur.execute(
"SELECT * FROM client WHERE client_id = %s", (client_id,))
except Error as e:
print("get_user_by_id : ", e)
return None
data = cur.fetchall()[0]
client = {'client_id': data[0], 'name': data[1], 'address': data[2],
'cp': data[3], 'city': data[4], 'country': data[5],
'phone': data[6], 'mail': data[7], 'id_user': data[8]}
print(client)
return client
def get_id_client_by_client_name(client_name):
with create_connection() as conn:
cur = conn.cursor()
try:
print
cur.execute(
"SELECT client_id FROM client WHERE c_name = %s", (client_name,))
except Error as e:
print("get_user_by_id : ", e)
return None
client_id = cur.fetchall()[0]
print(client_id[0])
return client_id[0]
def get_needs_from_client(client_id):
with create_connection() as conn:
cur = conn.cursor()
try:
cur.execute("SELECT need_id FROM need "
"JOIN client ON (client.client_id = need.client_id) "
"WHERE need.client_id = %s "
"AND active = TRUE "
"ORDER BY need.status_id ASC, latest_date DESC", (client_id,))
except Error as e:
print("get_all_needs_query : ", e)
data = cur.fetchall()[0]
need = {'need_id': data[0]}
print(need)
return need
def get_needs_from_user(id_user, args=None):
""" Requête permettant de récupérer tous les besoins d'un utilisateur sans filtre particuliers.
Ils sont triés par OPEN et date au plus tard
:return need_list: Liste de tous les needs actifs
"""
filters = get_filters(args)
with create_connection() as conn:
cur = conn.cursor()
try:
cur.execute("SELECT c_name, title, latest_date, label_st, need_id, creation_date FROM need "
"JOIN status ON (need.status_id = status.status_id) "
"JOIN client ON (client.client_id = need.client_id) "
"JOIN utilisateur ON (utilisateur.user_id = need.user_id) "
"WHERE need.user_id = %s" + filters + " AND active = TRUE "
"ORDER BY need.status_id ASC, latest_date DESC", (id_user,))
datalist = cur.fetchall()
except Error as e:
print("get_all_needs_query : ", e)
return None
finally:
cur.close()
needs = [{
'client_name': row[0],
'title': row[1],
'latest_date': row[2],
'label_st': row[3],
'need_id': row[4],
'creation_date': row[5]} for row in datalist]
return needs
def get_filters(args):
filters = ""
if args is None:
return filters
if args['states']:
states = [arg for arg in args['states'] if arg is not None]
if len(states) != 0:
filters += " AND need.status_id IN ({})".format(", ".join(states))
if args.get('min_date'):
filters += " AND creation_date >= '{}'".format(args['min_date'])
if args.get('max_date'):
filters += " AND latest_date <= '{}'".format(args['max_date'])
if args.get('client_name'):
filters += " AND c_name = '{}'".format(args['client_name'])
if args.get('title'):
filters += " AND title ILIKE '%{}%'".format(args['title'])
return filters
def get_need_by_id(need_id):
""" Select un besoin spécifique pour l'affiche
:return need: need """
with create_connection() as conn:
cur = conn.cursor()
try:
cur.execute("SELECT * FROM need WHERE need_id = %s", (need_id,))
except Error as e:
print("select_need_query : ", e)
data = cur.fetchall()[0]
need = {'need_id': data[0], 'title': data[1], 'description': data[2],
'creation_date': data[3], 'latest_date': data[4], 'month_duration': data[5],
'day_duration': data[6], 'price_ht': data[7],
'consultant_name': data[8], 'client_id': data[9],
'status_id': data[10], 'active': data[11], 'user_id': data[12], 'key_factors': data[13]}
print(need)
return need
def get_clients():
with create_connection() as conn:
cur = conn.cursor()
try:
cur.execute("SELECT client_id, c_name FROM client")
clients = cur.fetchall()
except Error as e:
print("select_clients : ", e)
return []
return clients
def insert_need(new_need):
with create_connection() as conn:
cur = conn.cursor()
try:
cur.execute(
"SELECT need_id FROM need ORDER BY need_id DESC LIMIT 1")
max_id = [int(record[0]) for record in cur.fetchall()][0] + 1
new_need['need_id'] = max_id
new_need['active'] = True
print("MAX : ", max_id)
inserter = [(str(k), '%s', str(v)) for k, v in new_need.items()]
fields, place_holders, values = zip(*inserter)
cur.execute("INSERT INTO need (" +
", ".join(fields) +
") VALUES (" +
", ".join(place_holders) +
")", tuple(values))
except Error as e:
print("insert_need_query : ", e)
return None
return max_id
def update_need(need_id, description, latest_date, month_duration,
day_duration, price_ht, consultant_name, status_id, key_factors):
with create_connection() as conn:
cur = conn.cursor()
print("PASSE PAR LE UPDATE")
try:
cur.execute("UPDATE need SET "
"description = %s ,"
"latest_date = %s,"
"month_duration = %s, "
"day_duration = %s, "
"price_ht = %s, "
"consultant_name = %s, "
"status_id = %s, "
"key_factors = %s "
"WHERE need_id = %s",
(description, latest_date, month_duration,
day_duration, price_ht, consultant_name, status_id, key_factors, need_id))
except Error as e:
print("update_need : ", e)
return None
def delete_need(need_id):
""" Requête permettant de passer un need en inactif (= supprimer).
:param id_need: id du need à rendre inactif
"""
print("PASSE PAR DELETE NEED")
with create_connection() as conn:
cur = conn.cursor()
try:
cur.execute(
"UPDATE need SET active = FALSE where need_id = %s", (need_id,))
except Error as e:
print("delete_need_query : ", e)
return None
if __name__ == "__main__":
pass
|
from cgi import FieldStorage
from io import BytesIO
from wtforms import EmailField, TextAreaField
from onegov.agency import _
from onegov.agency.collections import ExtendedAgencyCollection
from onegov.agency.models import ExtendedAgency
from onegov.agency.utils import handle_empty_p_tags
from onegov.core.security import Private
from onegov.core.utils import linkify, ensure_scheme
from onegov.form import Form
from onegov.form.fields import ChosenSelectField, HtmlField
from onegov.form.fields import MultiCheckboxField
from onegov.form.fields import UploadField
from onegov.form.validators import FileSizeLimit
from onegov.form.validators import WhitelistedMimeType
from onegov.gis import CoordinatesField
from sqlalchemy import func
from wtforms.fields import StringField
from wtforms.validators import InputRequired
class ExtendedAgencyForm(Form):
""" Form to edit agencies. """
title = StringField(
label=_("Title"),
validators=[
InputRequired()
],
)
portrait = HtmlField(
label=_("Portrait"),
render_kw={'rows': 10}
)
location_address = TextAreaField(
label=_("Location address"),
render_kw={'rows': 2},
)
location_code_city = StringField(
label=_("Location Code and City")
)
postal_address = TextAreaField(
label=_("Postal address"),
render_kw={'rows': 2},
)
postal_code_city = StringField(label=_("Postal Code and City"))
phone = StringField(label=_("Phone"))
phone_direct = StringField(label=_("Alternate Phone Number / Fax"))
email = EmailField(label=_("E-Mail"))
website = StringField(label=_("Website"), filters=(ensure_scheme, ))
opening_hours = TextAreaField(
label=_("Opening hours"),
render_kw={'rows': 5},
)
organigram = UploadField(
label=_("Organigram"),
validators=[
WhitelistedMimeType({
'image/jpeg',
'image/png',
}),
FileSizeLimit(1 * 1024 * 1024)
]
)
coordinates = CoordinatesField(
label=_('Location'),
description=_(
'Search for the exact address to set a marker. The zoom of '
'the map will be saved as well.'
),
fieldset=_("Map"),
render_kw={'data-map-type': 'marker'},
)
export_fields = MultiCheckboxField(
label=_("Fields to include for each membership"),
choices=[
('membership.title', _("Membership: Title")),
('membership.since', _("Membership: Since")),
('membership.addition', _("Membership: Addition")),
('person.title', _("Person: Title")),
('person.function', _("Person: Function")),
('person.last_name', _("Person: Last Name")),
('person.first_name', _("Person: First Name")),
('person.born', _("Person: Born")),
('person.academic_title', _("Person: Academic Title")),
('person.profession', _("Person: Profession")),
('person.location_address', _("Person: Location Address")),
('person.location_code_city', _("Person: Location Code and City")),
('person.postal_address', _("Person: Postal Address")),
('person.postal_code_city', _("Person: Postal Code and City")),
('person.political_party', _("Person: Political Party")),
('person.parliamentary_group', _("Person: Parliamentary Group")),
('person.phone', _("Person: Phone")),
('person.phone_direct', _("Person: Direct Phone")),
],
default=['membership.title', 'person.title'],
fieldset=_("PDF Export"),
render_kw={'class_': 'sortable-multi-checkbox'}
)
def on_request(self):
self.request.include('sortable-multi-checkbox')
def get_useful_data(self):
exclude = {'csrf_token', 'organigram'}
result = super(ExtendedAgencyForm, self).get_useful_data(exclude)
if self.organigram.data:
result['organigram_file'] = self.organigram.file
if self.portrait.data:
result['portrait'] = linkify(self.portrait.data, escape=False)
return result
def update_model(self, model):
model.title = self.title.data
model.portrait = handle_empty_p_tags(
linkify(self.portrait.data, escape=False)
)
model.location_address = self.location_address.data
model.location_code_city = self.location_code_city.data
model.postal_address = self.postal_address.data
model.postal_code_city = self.postal_code_city.data
model.phone = self.phone.data
model.phone_direct = self.phone_direct.data
model.email = self.email.data
model.website = self.website.data
model.opening_hours = self.opening_hours.data
model.export_fields = self.export_fields.data
if self.organigram.action == 'delete':
del model.organigram
if self.organigram.action == 'replace':
if self.organigram.data:
model.organigram_file = self.organigram.file
model.coordinates = self.coordinates.data
if hasattr(self, 'access'):
model.access = self.access.data
if hasattr(self, 'publication_start'):
model.publication_start = self.publication_start.data
if hasattr(self, 'publication_end'):
model.publication_end = self.publication_end.data
def reorder_export_fields(self):
titles = dict(self.export_fields.choices)
self.export_fields.choices = [
(choice, titles[choice]) for choice in self.export_fields.data
] + [
choice for choice in self.export_fields.choices
if choice[0] not in self.export_fields.data
]
def apply_model(self, model):
self.title.data = model.title
self.portrait.data = model.portrait
self.location_address.data = model.location_address
self.location_code_city.data = model.location_code_city
self.postal_address.data = model.postal_address
self.postal_code_city.data = model.postal_code_city
self.phone.data = model.phone
self.phone_direct.data = model.phone_direct
self.email.data = model.email
self.website.data = model.website
self.opening_hours.data = model.opening_hours
self.export_fields.data = model.export_fields
if model.organigram_file:
fs = FieldStorage()
fs.file = BytesIO(model.organigram_file.read())
fs.type = model.organigram_file.content_type
fs.filename = model.organigram_file.filename
self.organigram.data = self.organigram.process_fieldstorage(fs)
self.coordinates.data = model.coordinates
if hasattr(self, 'access'):
self.access.data = model.access
if hasattr(self, 'publication_start'):
self.publication_start.process_data(model.publication_start)
if hasattr(self, 'publication_end'):
self.publication_end.process_data(model.publication_end)
self.reorder_export_fields()
class MoveAgencyForm(Form):
""" Form to move an agency. """
parent_id = ChosenSelectField(
label=_("Destination"),
choices=[],
validators=[
InputRequired()
]
)
def on_request(self):
self.request.include('common')
self.request.include('chosen')
agencies = ExtendedAgencyCollection(self.request.session)
self.parent_id.choices = [
(str(agency.id), agency.title)
for agency in agencies.query().order_by(None).order_by(
func.unaccent(ExtendedAgency.title)
)
if self.request.has_permission(agency, Private)
]
if self.request.has_permission(agencies, Private):
self.parent_id.choices.insert(
0, ('root', self.request.translate(_("- Root -")))
)
def ensure_valid_parent(self):
"""
As a new destination (parent page) every menu item is valid except
yourself. You cannot assign yourself as the new destination
:return: bool
"""
if self.parent_id.data and self.parent_id.data.isdigit():
new_parent_id = int(self.parent_id.data)
# prevent selecting yourself as new parent
if self.model.id == new_parent_id:
self.parent_id.errors.append(
_("Invalid destination selected"))
return False
return True
def update_model(self, model):
session = self.request.session
agencies = ExtendedAgencyCollection(session)
new_parent_id = None
new_parent = None
if self.parent_id.data and self.parent_id.data.isdigit():
new_parent_id = int(self.parent_id.data)
new_parent = agencies.by_id(new_parent_id)
model.name = agencies.get_unique_child_name(model.title, new_parent)
model.parent_id = new_parent_id
def apply_model(self, model):
def remove(item):
item = (str(item.id), item.title)
if item in self.parent_id.choices:
self.parent_id.choices.remove(item)
def remove_with_children(item):
remove(item)
for child in item.children:
remove_with_children(child)
if model.parent:
remove(model.parent)
else:
self.parent_id.choices.pop(0)
remove_with_children(model)
|
import os
from typing import Tuple
import numpy as np
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
LOGS_DIR = os.path.abspath("C:/Users/Jan/Dropbox/_Coding/UdemyTF/logs")
if not os.path.exists(LOGS_DIR):
os.mkdir(LOGS_DIR)
MODEL_LOG_DIR = os.path.join(LOGS_DIR, "mnist_cnn4")
def prepare_dataset(num_classes: int) -> tuple:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype(np.float32)
x_train = np.expand_dims(x_train, axis=-1)
x_test = x_test.astype(np.float32)
x_test = np.expand_dims(x_test, axis=-1)
y_train = to_categorical(y_train, num_classes=num_classes, dtype=np.float32)
y_test = to_categorical(y_test, num_classes=num_classes, dtype=np.float32)
return (x_train, y_train), (x_test, y_test)
def build_model(img_shape: Tuple[int, int, int], num_classes: int) -> Model:
input_img = Input(shape=img_shape)
x = Conv2D(filters=32, kernel_size=3, padding="same")(input_img)
x = Activation("relu")(x)
x = Conv2D(filters=32, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Conv2D(filters=64, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = Conv2D(filters=64, kernel_size=3, padding="same")(x)
x = Activation("relu")(x)
x = MaxPool2D()(x)
x = Flatten()(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)
model = Model(
inputs=[input_img],
outputs=[y_pred]
)
model.summary()
return model
if __name__ == "__main__":
img_shape = (28, 28, 1)
num_classes = 10
(x_train, y_train), (x_test, y_test) = prepare_dataset(num_classes)
model = build_model(img_shape, num_classes)
model.compile(
loss="categorical_crossentropy",
optimizer=Adam(learning_rate=0.0005),
metrics=["accuracy"]
)
tb_callback = TensorBoard(
log_dir=MODEL_LOG_DIR,
histogram_freq=1,
write_graph=True
)
# model.fit(
# x=x_train,
# y=y_train,
# epochs=40,
# batch_size=128,
# verbose=1,
# validation_data=(x_test, y_test),
# callbacks=[tb_callback]
# )
# scores = model.evaluate(
# x=x_test,
# y=y_test,
# verbose=0
# )
# print(scores)
|
# -*- coding: utf-8 -*-
#rnn from scratch by Yijun D.
import numpy as np
data = open('kafka.txt','r').read()
chars = list(set(data))
data_size,vocab_size = len(data), len(chars)
print ('data has %d chars, %d unique' % (data_size,vocab_size))
|
def add(p, q, r):
return p + q + r
def add1(q, p, r):
return p - q + r
# forwarding function
def add2(p, q, r):
return add1(p,q,r)
d1 = [1000, 20, 10]
s = add(*d1)
print(s)
|
def main():
x = float(input("Coordenada x: "))
y = float(input("Coordenada y: "))
if 1 <= y <= 2 and -3 <= x <= 3:
print("dentro")
elif (4 <= y <= 5 or 6 <= x <= 7) and ( -4 <= x <= -3 or -2 <= x <= -1 or 1 <= x <= 2 or 3 <= x <= 4):
print("dentro")
else:
print("fora")
#-----------------------------------------------------
if __name__ == '__main__': # chamada da funcao principal
main()
|
#import sys
#input = sys.stdin.readline
def main():
n, m = map( int, input().split())
n %= 12
m /= 60
n += m
n /= 12
n *= 360
m *= 360
ans = abs(n-m)
print(min(ans, 360-ans))
if __name__ == '__main__':
main()
|
def get_user_ip(request):
if request.headers.get('X-Forwarded-For'):
return request.headers['X-Forwarded-For']
elif request.headers.get('X-Real-IP'):
return request.headers.get('X-Real-IP')
else:
return request.remote_addr
|
class Copy:
def __init__(self, *args):
self.src = args[:-1]
self.dst = args[-1]
def init_build(self, script):
count = self.dst.count(':')
self.mode = None
self.owner = None
if count == 2:
self.dst, self.mode, self.owner = self.dst.split(':')
elif count == 1:
self.dst, self.mode = self.dst.split(':')
self.owner = script.variable('user')
def build(self, script):
script.run(f'sudo mkdir -p {self.dst}')
for item in self.src:
script.append(f'cp -a {item} $mnt{self.dst}')
if self.mode:
script.run(f'sudo chmod {self.mode} $mnt{self.dst}')
if self.owner:
script.run(f'sudo chown -R {self.owner} $mnt{self.dst}')
|
import cv2
from darkflow.net.build import TFNet
import numpy as np
import time
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
options = {
'model':'C:/Users/LENOVO/Anaconda3/darkflow-master/cfg/yolo.cfg',
'load':'C:/Users/LENOVO/Desktop/project/darkflow-master/darkflow-master/bin/yolov2.weights',
'threshold':0.3,
'gpu': 1.0
}
tfnet = TFNet(options) #print modlw arch
count=0
capture = cv2.VideoCapture("C:\\Users\\LENOVO\\Desktop\\project\\mansi\\video.mp4")
colors = [tuple(255 * np.random.rand(3)) for i in range(5)]
#it returns a tuple containing 5 (R,G,B) sets
print(colors)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out11 = cv2.VideoWriter('C:\\Users\\LENOVO\\Desktop\\project\\mansi\\output2.mp4',fourcc, 20.0, (1500,800))
#This time we create a VideoWriter object. We should specify the output file name (eg: output.avi).
#Then we should specify the FourCC code (details in next paragraph). Then number of frames per
#second (fps) and frame size should be passed. And last one is isColor flag. If it is True,
#encoder expect color frame, otherwise it works with grayscale frame.
count=0
while (capture.isOpened()):
stime = time.time()
ret, frame = capture.read()
if ret:
results = tfnet.return_predict(frame)
for color, result in zip(colors, results):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label = result['label']
frame = cv2.rectangle(frame, tl, br, color, 7)
frame = cv2.putText(frame, label, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
resize=cv2.resize(frame,(640,480))
cv2.imshow('video',resize)
out11.write(resize)
print('FPS {:.1f}'.format(1 / (time.time() - stime)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
capture.release()
cv2.destroyAllWindows()
break
print("finished")
out11.release()
cv2.destroyAllWindows() |
# -*-coding:utf-8-*-
# AUTHOR:tyltr
# TIME :2018/11/27
import time
def get_timestamp():
"""
时间戳,基于毫秒
:return:
"""
_time = int(time.time()*1000)
return _time
if __name__ == '__main__':
print(get_timestamp())
|
import sys,os,argparse,time
import copy
import numpy as np
import importlib
import torch
import easydict
import utils
sys.stdout.flush()
tstart=time.time()
tstart = time.time()
args = easydict.EasyDict({
"seed": 0,
# "experiment": 'auto_ML',
# "experiment": 'split_MNIST',
"experiment": 'cifar',
# "experiment": 'mixture',
# "approach": 'hat_hebb',
# "approach": 'hebb_bibn_mask',
# "approach": 'hebb_mask',
"approach": 'hat_con',
# "approach": 'joint-automl',
"nepochs": 400,
"lr": 0.2,
"parameter": '',
"output": 'output_incr_cifar_hat.txt',
"model_dir": '../models/comarison_cifar_model'
})
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
if args == '':
args = '../res/' + args.experiment + '_' + args.approach + '_' + str(args.seed) + '.txt'
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
########################################################################################################################
# Seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
else:
print('[CUDA unavailable]'); sys.exit()
# Args -- Experiment
if args.experiment == 'mnist2':
from dataloaders import mnist2 as dataloader
elif args.experiment == 'pmnist':
from dataloaders import pmnist as dataloader
elif args.experiment == 'cifar':
from dataloaders import cifar as dataloader
elif args.experiment == 'mixture':
from dataloaders import mixture as dataloader
elif args.experiment == 'auto_ML':
from dataloaders import AutoML_loader as dataloader
elif args.experiment == 'split_MNIST':
from dataloaders import split_MNIST as dataloader
# Args -- Approach
if args.experiment == 'split_MNIST' and args.approach == "hat":
from approaches import hat as approach
elif args.experiment == 'split_MNIST' and args.approach == "hat_hebb":
from approaches import hat_hebb_smnist as approach
elif args.experiment == 'split_MNIST' and args.approach == "hebb_bibn_mask":
from approaches import hebb_mask_smnist as approach
else:
if args.approach == 'random':
from approaches import random as approach
elif args.approach == 'sgd':
from approaches import sgd as approach
elif args.approach == 'sgd-restart':
from approaches import sgd_restart as approach
elif args.approach == 'sgd-frozen':
from approaches import sgd_frozen as approach
elif args.approach == 'lwf':
from approaches import lwf as approach
elif args.approach == 'lfl':
from approaches import lfl as approach
elif args.approach == 'ewc':
from approaches import ewc as approach
elif args.approach == 'imm-mean':
from approaches import imm_mean as approach
elif args.approach == 'imm-mode':
from approaches import imm_mode as approach
elif args.approach == 'progressive':
from approaches import progressive as approach
elif args.approach == 'pathnet':
from approaches import pathnet as approach
elif args.approach == 'hat-test':
from approaches import hat_test as approach
elif args.approach == 'hat-automl':
from approaches import hat_autoML as approach
elif args.approach == 'hat':
from approaches import hat as approach
elif args.approach == 'joint' or args.approach == 'joint-automl':
from approaches import joint as approach
elif args.approach == 'hebb_mask':
from approaches import hebb_mask as approach
elif args.approach == 'hat_con':
from approaches import hat_con as approach
# Args -- Network
if args.experiment == 'mnist2' or args.experiment == 'pmnist':
if args.approach == 'hat' or args.approach == 'hat-test':
from networks import mlp_hat as network
else:
from networks import mlp as network
elif args.experiment == 'split_MNIST':
if args.approach == 'hat' or args.approach == 'hat-test':
from networks import hat_smnist as network
elif args.approach == 'sgd':
from networks import smnist_mlp as network
elif args.approach == 'hat_hebb':
from networks import split_mnist_hat_hebb as network
elif args.approach == 'hebb_bibn_mask':
from networks import smnist_hebb_mask_net as network
else:
if args.approach == 'lfl':
from networks import alexnet_lfl as network
elif args.approach == 'hat':
from networks import alexnet_hat as network
elif args.approach == 'progressive':
from networks import alexnet_progressive as network
elif args.approach == 'pathnet':
from networks import alexnet_pathnet as network
elif args.approach == 'hat-test':
from networks import alexnet_hat_test as network
elif args.approach == 'hat-automl':
from networks import alexnet_hat_autoML as network
elif args.approach == 'joint-automl':
from networks import net_autoML as network
elif args.approach == 'hebb_mask':
from networks import alexnet_hebbs_mask as network
elif args.approach == 'hat_con':
from networks import alexnet_hat_con as network
else:
from networks import alexnet as network
########################################################################################################################
# Load
print('Load data...')
data,taskcla,inputsize=dataloader.get(seed=args.seed)
print('Input size =',inputsize,'\nTask info =',taskcla)
importlib.reload(network)
# Inits
print('Inits...')
net=network.Net(inputsize,taskcla).cuda()
utils.print_model_report(net)
#from importlib import reload
importlib.reload(approach)
appr=approach.Appr(net,nepochs=args.nepochs,lr=args.lr,args=args)
print(appr.criterion)
utils.print_optimizer_config(appr.optimizer)
print('-'*100)
# Loop tasks
acc = np.zeros((len(taskcla), len(taskcla) + 2+5), dtype=np.float32)
lss = np.zeros((len(taskcla), len(taskcla)), dtype=np.float32)
ev_times_epoch_tasks=[]
ev_times_batch_tasks = []
for t, ncla in taskcla:
# for t, ncla in taskcla:
# net.initialZeroHebb()
# t, ncla = taskcla[1]
print('*' * 100)
print('Task {:2d} ({:s})'.format(t, data[t]['name']))
print('*' * 100)
if args.approach == 'joint' or args.approach == 'joint-automl':
# Get data. We do not put it to GPU
print('joint')
if t == 0:
xtrain = data[t]['train']['x']
ytrain = data[t]['train']['y']
xvalid = data[t]['valid']['x']
yvalid = data[t]['valid']['y']
task_t = t * torch.ones(xtrain.size(0)).int()
task_v = t * torch.ones(xvalid.size(0)).int()
task = [task_t, task_v]
else:
xtrain = torch.cat((xtrain, data[t]['train']['x']))
ytrain = torch.cat((ytrain, data[t]['train']['y']))
xvalid = torch.cat((xvalid, data[t]['valid']['x']))
yvalid = torch.cat((yvalid, data[t]['valid']['y']))
task_t = torch.cat((task_t, t * torch.ones(data[t]['train']['y'].size(0)).int()))
task_v = torch.cat((task_v, t * torch.ones(data[t]['valid']['y'].size(0)).int()))
task = [task_t, task_v]
else:
# Get data
xtrain = data[t]['train']['x'].cuda()
ytrain = data[t]['train']['y'].cuda()
xvalid = data[t]['valid']['x'].cuda()
yvalid = data[t]['valid']['y'].cuda()
task = t
# Train
# _, ev_time_epoch = appr.train(task, xtrain, ytrain, xvalid, yvalid)
ev_time_epoch, ev_time_batch, cap = appr.train(task, xtrain, ytrain, xvalid, yvalid)
ev_times_epoch_tasks.append(ev_time_epoch)
ev_times_batch_tasks.append(ev_time_batch)
print('-' * 100)
# cum_masks.append(mask)
# masks.append(cum_mask)
# Test
for u in range(t + 1):
xtest = data[u]['test']['x'].cuda()
ytest = data[u]['test']['y'].cuda()
# test_loss,test_acc=appr.eval(u, xtest, ytest, appr.s[u], True)
# test_loss,test_acc=appr.eval(u, xtest.data, ytest, net.max_plasiticity[u])
test_loss, test_acc = appr.eval(u, xtest.data, ytest)#, appr.s[u])
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, data[u]['name'], test_loss,
100 * test_acc))
acc[t, u] = test_acc
lss[t, u] = test_loss
# acc[t,len(taskcla)]=ev_time_epoch
acc[t, len(taskcla) ] = ev_time_epoch
acc[t, len(taskcla) +1] = ev_time_batch
for i,c in enumerate(cap):
acc[t, len(taskcla) + 1+i+1] = c
# Save
print('Save at ' + args.output)
np.savetxt(args.output, acc, '%.4f')
# Save Model
# save_checkpoint(net.state_dict(), "_".join(str(x) for x in list(range(1, t+1))))
|
#import serial
import urllib2
import json
'''
# Serial port connection with baud rate of 9600
try:
ser = serial.Serial(/dev/ttyACM0,9600,timeout=1)
except:
ser = serial.Serial(/dev/ttyACM1,9600,timeout=1)
'''
pulses = 0
liters = 581984
APIKEY="YrrR0K4MtS4gdEjXXGfRaNSnsWjCh" # Replace with your APIKEY
DEVICE = "NiravIOTPi@niravjoshi.niravjoshi" # Replace with the id_developer of your devices
class ClientAPIDataPush(object):
api_url="http://things.ubidots.com/api/v1.6/devices/niravpiflowsensordata"
def __init__(self,api_key=None,client_type=json):
self.client_type = client_type
self.api_key = api_key
self.content_type = "application/json"
self.headers = {
"X-Auth-Token":self.api_key,
"Content-Type": self.content_type
}
def send_data(self,data):
self.data = json.dumps(data)
request = urllib2.Request(ClientAPIDataPush.api_url,self.data,self.headers)
self.response = urllib2.urlopen(request)
return self.response
#reated instance of class ClinetAPIDataPush over here
client_ubidots = ClientAPIDataPush(APIKEY)
while True:
# read line received
#line = ser.readline().strip()
#if line:
#print (line)
#remove and cleaning
#if line.startswith("Litres"):
# piece = line.split(":")
#if 2 == len(piece):
#liters = int(piece[1].strip())
print (liters)
data = {"Timeslot": "100", "littersread": round(liters, 3)}
client_ubidots_response = client_ubidots.send_data(data)
print (client_ubidots_response)
|
# This solution works for all scenarios
# 4 minutes first time
# currently takes 2 minutes to run
#!python
import time
from pprint import pprint
from hashtable import HashTable
# import glob
import os
def load_data():
"""
Returns a list of phone prefixes and prices from a file.
"""
# all_route_costs = glob.glob(os.path.join('', 'route-costs-*.txt'))
route_costs = []
# for route in all_route_costs:
# with open(route, 'r') as f:
with open('route-costs-4.txt', 'r') as f:
for line in f:
prefix, price = line.split(',')
pprice = price.replace("\n", "")
route_costs.append((prefix, pprice))
return route_costs
def init_hashtable(route_costs):
"""
Add phone number (key) and price (value) in the hashtable and return
hashtable items to proven the data is inserted correctly.
"""
output_list = []
num_buckets = len(route_costs)
ht = HashTable(num_buckets)
for prefix, price in route_costs:
ht.set(prefix, price)
return ht
def is_prefix_match_and_get_price(ht, phone_num):
"""
Return True if the prefix we have on record is the start of a phone we give
as input otherwise return False
"""
# base case
if not phone_num:
return 0
#check if the phone number is in the HashTable
if ht.contains(phone_num):
# if yes, return the price
return ht.get(phone_num)
#if not, pop off the last digit of the phone number
else:
phone_num = phone_num[:-1]
return is_prefix_match_and_get_price(ht, phone_num)
def get_prices(phone_numbers, is_prefix_match_and_get_price, ht):
"""
Returns a price list and a file with a phone number and its associated price.
"""
price_list = []
#loop through the phone numbers
for number in phone_numbers:
#pass one phone number into the prefix match function
# append price to the list
price = is_prefix_match_and_get_price(ht, number)
# open the file
with open('route-costs-3.txt', 'a') as f:
# write to the file
# each phone number and price on new line
f.write("%s, %s \n" % (number, str(price) ))
# What we did before
# price_list.append((number, price))
return price_list
def load_phone_nums():
"""
Returns a list of phone numbers.
"""
phone_numbers = []
with open('../../project/data/phone-numbers-3.txt', 'r') as f:
for line in f:
print(line)
individual_phone_num = line.replace("\n", "")
phone_numbers.append(individual_phone_num)
return phone_numbers
if __name__ == '__main__':
start = time.time()
route_costs = load_data()
ht = init_hashtable(route_costs)
phone_numbers = load_phone_nums()
price_list = get_prices(phone_numbers, is_prefix_match_and_get_price, ht)
print(price_list)
end = time.time()
print(end - start)
|
import datetime
import os
import random
import sys
import xbmc
import xbmcaddon
import xbmcplugin
import api
import constants
import utils
from exceptions import ApiError
ADDON = xbmcaddon.Addon()
APPID = xbmcaddon.Addon().getAddonInfo("id")
NAME = xbmcaddon.Addon().getAddonInfo("name")
VERSION = xbmcaddon.Addon().getAddonInfo("version")
ICON = xbmcaddon.Addon().getAddonInfo("icon")
DATADIR=xbmc.translatePath( ADDON.getAddonInfo('profile') )
api_version = 383
# os.uname() is not available on Windows, so we make this optional.
try:
uname = os.uname()
os_string = ' (%s %s %s)' % (uname[0], uname[2], uname[4])
except AttributeError:
os_string = ''
def get_config(key):
addon_handle = int(sys.argv[1])
return xbmcplugin.getSetting(addon_handle, key)
def set_config(key, value):
addon_handle = int(sys.argv[1])
return xbmcplugin.setSetting(addon_handle, key, value)
def set_setting(key, value):
return xbmcaddon.Addon(APPID).setSetting(key, value)
def set_setting_bool(key, value):
return xbmcaddon.Addon(APPID).setSettingBool(key, value)
def get_setting(key):
return xbmcaddon.Addon(APPID).getSetting(key)
def get_setting_bool(key):
return xbmcaddon.Addon(APPID).getSettingBool(key)
def get_unique_id():
if get_setting(constants.UID) is not None and get_setting(constants.UID) != "":
return get_setting(constants.UID)
digits = '0123456789'
letters = 'abcdef'
all_chars = digits + letters
length = 16
val = None
while True:
val = ''.join(random.choice(all_chars) for i in range(length))
if not val.isdigit():
break
set_setting(constants.UID, val)
return val
def showSettingsGui():
xbmcaddon.Addon().openSettings()
def showGuiNotification(message):
xbmc.executebuiltin('Notification(%s, %s, %d, %s)' % (NAME, message, 5000, ICON))
def configCheck():
if not get_setting_bool(constants.CONFIGURED):
set_setting_bool(constants.CONFIGURED, True)
showSettingsGui()
return
def login_check():
if not get_setting_bool(constants.LOGGED_IN):
# Ask for credentials if they are missing
if utils.isEmpty(get_setting(constants.USERNAME)) or utils.isEmpty(get_setting(constants.PASSWORD)):
showSettingsGui()
return
# Log in and show a status notification
try:
api.login()
showGuiNotification("Login successful")
except ApiError as e:
showGuiNotification(str(e))
utils.log(str(e))
pass
return
# Periodically (1 day) force update token because it can expire
t1 = utils.dateFromString(get_setting(constants.LAST_LOGIN))
t2 = datetime.datetime.now()
interval = 1
update = abs(t2 - t1) > datetime.timedelta(days=interval)
if update is True:
utils.log("Refreshing Lattelecom login token")
set_setting(constants.LAST_LOGIN, utils.stringFromDateNow())
try:
api.login(force=True)
except ApiError as e:
showGuiNotification(str(e))
utils.log(str(e))
pass
else:
utils.log("Lattelecom login token seems quite fresh.")
def logout():
utils.log("Clearing token")
set_setting_bool(constants.LOGGED_IN, False)
set_setting(constants.TOKEN, "")
showGuiNotification("Authorization token cleared")
|
import matplotlib.pyplot as plot
def plot_graph(file, img_name):
precision_values = {}
recall_values = {}
query_values = {}
with open(file) as content:
data = content.read().splitlines()
data = [s.split() for s in data]
# print data
i = 0
for pid in data:
precision_values.setdefault(int(pid[0]), []).append(pid[5])
# print precision_values
for rid in data:
recall_values.setdefault(int(rid[0]),[]).append(rid[6])
# print recall_values
for qid in data:
query_values.setdefault(int(qid[0]),[]).append(qid[5:])
# print query_values
for qu_id, val in query_values.items():
p_val = (precision_values[qu_id])
r_val = (recall_values[qu_id])
plot.plot(r_val,p_val)
plot.suptitle(graph_name)
plot.xlabel("Recall")
plot.ylabel("Precision")
plot.savefig(img_name + '.png')
plot.clf()
file_name = raw_input("Enter the text file name: ")
graph_name = file_name
file_name = file_name + '.txt'
plot_graph(file_name,graph_name) |
"""Example on how to read mask version and properties from a KNX actor."""
import asyncio
import sys
from typing import List
from xknx import XKNX
from xknx.core import PayloadReader
from xknx.telegram import IndividualAddress
from xknx.telegram.apci import (
DeviceDescriptorRead,
DeviceDescriptorResponse,
PropertyValueRead,
PropertyValueResponse,
)
async def main(argv: List[str]):
"""Connect and read information from a KNX device. Requires a System B device."""
if len(argv) == 2:
address = IndividualAddress(argv[1])
else:
address = "1.1.1"
xknx = XKNX()
await xknx.start()
reader = PayloadReader(xknx, address)
# Read the mask version of the device (descriptor 0).
payload = await reader.send(
DeviceDescriptorRead(descriptor=0), response_class=DeviceDescriptorResponse
)
if payload is not None:
print(f"Mask version: {payload.value:04x}")
# Read the serial number of the device (object 0, property 11).
payload = await reader.send(
PropertyValueRead(object_index=0, property_id=11, count=1, start_index=1),
response_class=PropertyValueResponse,
)
if payload is not None:
print(
f"Serial number: {payload.data[0]:02x}{payload.data[1]:02x}:"
f"{payload.data[2]:02x}{payload.data[3]:02x}{payload.data[4]:02x}{payload.data[5]:02x}"
)
# Check if the device is in programming mode (object 0, property 54).
payload = await reader.send(
PropertyValueRead(object_index=0, property_id=54, count=1, start_index=1),
response_class=PropertyValueResponse,
)
if payload is not None:
print(f"Programming mode: {'ON' if payload.data[0] else 'OFF'}")
await xknx.stop()
if __name__ == "__main__":
asyncio.run(main(sys.argv))
|
import pandas as pd
def add(data_string):
data_list = [k.split(",") for k in data_string.split("\n") if k != ""]
data_header, data_values = data_list[0], [list(map(float, k)) for k in data_list[1:]]
data = pd.DataFrame(data_values, columns=data_header)
data["x+y"] = data["x"]+data["y"]
# return data.to_csv(index=False)
return data.to_json() |
#created by ahmad on 02-10-2019
def fun():
steps =int(input("How many steps do you want? :"))
print()
k0="__"
k1=" |"
for i in range(steps):
print(k0," \--»",i+1)
print(k1,end='')
k1=" "+k1
print()
print('-----------------------------------------')
print()
c="y"
while True:
if c=="y" or c=="Y":
fun()
elif c=='n' or c== "N":
print('احمد created by')
break
else:
print(' Invalid input !!! \n')
c=input('Do you want to continue (y/n) ?:')
print() |
# Generated by Django 2.2.3 on 2019-09-10 18:16
import datetime
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('DatosEmpresa', '0002_auto_20190910_1316'),
('estado_paros_deta3', '0001_initial'),
('historial', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='estados',
name='FechaTurno',
field=models.DateTimeField(default=datetime.datetime(2019, 9, 10, 6, 30)),
),
migrations.CreateModel(
name='estados_inicial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turno', models.FloatField(default=1)),
('fecha', models.DateTimeField(default=django.utils.timezone.now, null=True)),
('fecha_creacion', models.DateTimeField(default=django.utils.timezone.now)),
('FechaTurno', models.DateTimeField(default=datetime.datetime(2019, 9, 10, 6, 30))),
('Modo', models.CharField(blank=True, max_length=60, null=True)),
('Execution', models.CharField(blank=True, max_length=60, null=True)),
('Alarma', models.CharField(blank=True, max_length=60, null=True)),
('CorteViruta', models.BooleanField(blank=True, null=True)),
('deltaTime', models.FloatField(null=True)),
('maquina', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='DatosEmpresa.maquina')),
('paros_deta3', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='estado_paros_deta3.paros_deta3')),
],
),
]
|
import logging
log = logging.getLogger('onegov.gis') # noqa
log.addHandler(logging.NullHandler()) # noqa
from onegov.gis.forms import CoordinatesField
from onegov.gis.integration import MapboxApp
from onegov.gis.models import Coordinates, CoordinatesMixin
__all__ = ('Coordinates', 'CoordinatesMixin', 'CoordinatesField', 'MapboxApp')
|
# -*- coding: UTF-8 -*-
import os
import Module
from MFile import MFile
def list_dir(path):
files = os.listdir(path)
list_libs = ['pro/AutoNews/Rexxar', 'pro/AutoNews/shauto-lintcheck', 'pro/AutoNews/shauto-comment']
list_modules = []
for file in files:
sub_path = path + "/" + file
if os.path.isdir(sub_path) and is_module(sub_path):
# print(sub_path)
module = os.listdir(sub_path)
for m_file in module:
m_dir = sub_path + "/" + m_file
if os.path.isdir(m_dir) and m_file == "src" and sub_path not in list_libs:
list_files = get_file_count(m_dir, m_file, [], [], [])
print(sub_path)
print("Java:", len(list_files.java_files),"Xml:", len(list_files.xml_files),"Img:", len(list_files.img_files))
list_modules.append(list_files)
# list_modules.append(list_files)
print()
# print(list_modules)
return list_modules
def get_file_count(path, file, list_xml, list_java, list_img):
if not os.path.isdir(path):
module_file = MFile()
module_file.fName = file
module_file.fPath = path
if path.endswith('.xml'):
module_file.fType = 'xml'
module_file.lines = get_file_lines(path)
list_xml.append(module_file)
elif path.endswith('.java'):
module_file.fType = 'java'
module_file.lines = get_file_lines(path)
list_java.append(module_file)
elif path.endswith('.jpg') or path.endswith('.gif') or path.endswith('.png'):
module_file.fType = 'res'
list_img.append(module_file)
else:
for file in os.listdir(path):
get_file_count(path + "/" + file, file, list_xml, list_java, list_img)
return Module.Module(list_java, list_xml, list_img)
def get_file_lines(path):
count = 0
for count, line in enumerate(open(path, encoding='utf-8')): pass
return count
def has_files(path):
files = os.listdir(path)
for file in files:
if not os.path.isdir(path + "/" + file):
return True
def is_module(dir):
files = os.listdir(dir)
for file in files:
if file.__contains__("build.gradle"):
return True
|
#!/usr/bin/env python
# encoding: utf-8
"""
低レベルファイルIOを使って、標準入力から標準出力にファイルをコピーする
"""
import sys
import os
STDIN_FILENO = 0
STDOUT_FILENO = 1
BUFSIZE = 8192
while True:
try:
buf = os.read(STDIN_FILENO, BUFSIZE)
except Exception, e:
sys.exit("read error")
if not buf:
break
try:
os.write(STDOUT_FILENO, buf)
except Exception, e:
sys.exit("write error")
sys.exit(0)
|
# # 셀레늄 모듈 임포트
# from selenium import webdriver
# import time
# # 크롬 물리드라이버 가동 명령
# driver = webdriver.Chrome("C:\chrome/chromedriver.exe")
# # 물리 드라이버로 사이트 이동 명령
# driver.get("https://www.naver.com")
# time.sleep(1)
# # xpath를 이용하여 자동으로 클릭 제어하기
# login_btn = driver.find_element_by_xpath('//*[@id="account"]/a')
# # //*[@id="account"]/div/a/i # 원래 패스가 이거였는데
# # //*[@id="account"]/a # 최근에 이걸로 바뀜
# login_btn.click()
# # xpath를 이용하여 자동으로 텍스트 작성하기
# time.sleep(2)
# id_input = driver.find_element_by_xpath('//*[@id="id"]')
# id_input.send_keys('naverID')
# time.sleep(2)
# pw_input = driver.find_element_by_xpath('//*[@id="pw"]')
# pw_input.send_keys('naverPassword')
# time.sleep(1)
# login_btn = driver.find_element_by_xpath('//*[@id="log.login"]')
# login_btn.click()
|
import os
import sys
import datetime
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from utils import functions as c_functions
import utils.anchors as l_anchors
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
class Parse:
def __init__(self, image_dir, anchors, grid_sizes, IMAGE_TARGET_SIZE):
self.image_dir = image_dir
self.anchors = anchors
self.grid_sizes = grid_sizes
self.IMAGE_TARGET_SIZE = IMAGE_TARGET_SIZE
self.flat_anchors = [tf.reshape(item, (-1, item.shape[-1])) for item in self.anchors]
self.last_dim = 6
def get_image(self, image_name):
image_real_path = self.image_dir + "/" + image_name + ".jpg"
image = tf.io.read_file(image_real_path)
image = tf.image.decode_jpeg(image)
image = tf.image.resize(image, self.IMAGE_TARGET_SIZE)
image = tf.cast(image, tf.float32)
image = image / 127.5 - 1
return image
def get_label(self, label):
label = tf.strings.regex_replace(label, " ", ",")
label = tf.strings.split(label, ",")
label = tf.strings.to_number(label)
label = tf.reshape(label, (-1, 5))
boxes = label[..., 0:4]
instance_ids = label[..., 4]
# one_dim_anchors = tf.concat(self.flat_anchors, 0)
# shape = one_dim_anchors.shape[:-1] + [6]
# one_dim_labels = tf.zeros(shape)
# shape = one_dim_anchors.shape[0:1] + [1]
# tiled_boxes = tf.tile(boxes, shape)
# shape = one_dim_anchors.shape[0:1] + boxes.shape
# tiled_boxes = tf.reshape(tiled_boxes, shape)
# ious = c_functions.calc_iou(one_dim_anchors, tiled_boxes)
labels = []
for item in self.anchors:
shape = item.shape[:-1] + [self.last_dim]
layer_label = tf.ones(shape) * -1
labels.append(layer_label)
for item in label:
box = item[..., :4]
id = item[..., 4]
box_xy = box[0:2] + (box[2:4] - box[0:2]) / 2
max_index = 0
current_max_iou = 0.
current_arg_max = None
current_box_xy_index = None
for index, (layer_anchors, cell_size) in enumerate(zip(self.anchors, self.grid_sizes)):
box_xy_index = box_xy / (1 / cell_size)
box_xy_index = tf.cast(box_xy_index, tf.int32)
box_xy_index = box_xy_index.numpy().tolist()
anchors = layer_anchors[box_xy_index]
ious = c_functions.calc_iou(box, anchors)
arg_max = tf.argmax(ious, output_type=tf.int32)
max_iou = tf.reduce_max(ious)
if max_iou > current_max_iou:
max_index = index
current_max_iou = max_iou
current_arg_max = arg_max
current_box_xy_index = box_xy_index
value = tf.concat((box, [1.0, id]), 0)
indices = tf.concat((current_box_xy_index, [current_arg_max]), 0)
indices = indices[tf.newaxis, ...]
labels[max_index] = tf.tensor_scatter_nd_update(labels[max_index], indices, [value])
# max_index = 0
# arg_max_list = []
# current_max_iou = 0.
# current_arg_max = None
# for index, layer_anchors in enumerate(self.flat_anchors):
# ious = c_functions.calc_iou(box, layer_anchors)
# max_iou = tf.reduce_max(ious)
# arg_max = tf.argmax(ious, output_type=tf.int32)
# arg_max_list.append(arg_max)
# if max_iou > current_max_iou:
# current_max_iou = max_iou
# max_index = index
# current_arg_max = arg_max
# # arg_max = arg_max_list[max_index]
# value = tf.concat((box, [1.0, id]), 0)
# indices = current_arg_max[tf.newaxis, tf.newaxis, ...]
# labels[max_index] = tf.tensor_scatter_nd_update(labels[max_index], indices, [value])
return labels
ret_labels = []
for layer_labels, layer_anchors in zip(labels, self.anchors):
target_shape = layer_anchors.shape[:-1] + [self.last_dim]
ret_layer_labels = tf.reshape(layer_labels, target_shape)
ret_labels.append(ret_layer_labels)
return ret_labels
def __call__(self, item):
splited_list = tf.strings.split(item, " ", 1)
image_name = splited_list[0]
image = self.get_image(image_name)
label = splited_list[1]
label1, label2, label3 = tf.py_function(self.get_label, [label], [tf.float32, tf.float32, tf.float32])
# label1.set_shape(self.flat_anchors[0].shape[:1]+[6])
# return image, label1, label2
# label1.set_shape(self.anchors[0].shape[:-1] + [6])
# label2.set_shape(self.anchors[1].shape[:-1] + [6])
# label3.set_shape(self.anchors[2].shape[:-1] + [6])
# label = self.get_label(label)
label1 = tf.reshape(label1, [-1, self.last_dim])
label2 = tf.reshape(label2, [-1, self.last_dim])
label3 = tf.reshape(label3, [-1, self.last_dim])
label = tf.concat((label1, label2, label3), 0)
return image, (label[..., 0:4], label[..., 4:5], label[..., 5:6], label)
|
# Sequencia de Fibonacci
n = int(input('Digite quantos termos da sequencia de Fibonacci vc quer ver (digite um número maior que 1): '))
cont = 2
t1 = 0
t2 = 1
print('0 → 1 → ', end='')
while cont < n:
t3 = t1 + t2
print('{}'.format(t3), end=' → ')
t1 = t2
t2 = t3
cont += 1
print('Fim')
|
import os
import re
import sys
import time
import json
import logging
import traceback
import emoji
import random
import datetime
from arango import ArangoClient
from dotenv import load_dotenv
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def update_guidance(guidance_data, db):
for entry in guidance_data:
if entry["file"] == "scanSummaryCriteria.json":
# TODO: this service should not be creating database collections and
# hardcoding options like replication_factor
if not db.has_collection("scanSummaryCriteria"):
db.create_collection(
name="scanSummaryCriteria",
replication_factor=3,
shard_count=6,
write_concern=1,
)
for criteria_type, criteria in entry["guidance"].items():
new_criteria = {
"_key": criteria_type,
"pass": criteria.get("pass", []),
"fail": criteria.get("fail", []),
"warning": criteria.get("warning", []),
"info": criteria.get("info", []),
}
logging.info(
f"Checking if scan summary criteria {criteria_type} exists..."
)
current_criteria = db.collection("scanSummaryCriteria").get(
{"_key": criteria_type}
)
criteria_exists = current_criteria is not None
criteria_updated = criteria_exists and (
current_criteria != new_criteria
)
# Insert if the criteria doesn't exist
if not criteria_exists:
db.collection("scanSummaryCriteria").insert(new_criteria)
logging.info(f"Scan summary criteria {criteria_type} inserted.")
# Update if the criteria has changed
elif criteria_updated:
db.collection("scanSummaryCriteria").update_match(
{"_key": criteria_type},
{
"pass": criteria.get("pass", []),
"fail": criteria.get("fail", []),
"info": criteria.get("info", []),
},
)
logging.info(f"Scan summary criteria {criteria_type} updated.")
else:
logging.info(f"Scan summary criteria {criteria_type} not updated.")
elif entry["file"] == "chartSummaryCriteria.json":
if not db.has_collection("chartSummaryCriteria"):
db.create_collection(
"chartSummaryCriteria",
replication_factor=3,
shard_count=6,
write_concern=1,
)
for criteria_type, criteria in entry["guidance"].items():
new_criteria = {
"_key": criteria_type,
"pass": criteria.get("pass", []),
"fail": criteria.get("fail", []),
}
logging.info(
f"Checking if chart summary criteria {criteria_type} exists..."
)
current_criteria = db.collection("chartSummaryCriteria").get(
{"_key": criteria_type}
)
criteria_exists = current_criteria is not None
criteria_updated = criteria_exists and (
current_criteria != new_criteria
)
# Insert if the criteria doesn't exist
if not criteria_exists:
db.collection("chartSummaryCriteria").insert(new_criteria)
logging.info(f"Chart summary criteria {criteria_type} inserted.")
# Update if the criteria has changed
elif criteria_updated:
db.collection("chartSummaryCriteria").update_match(
{"_key": criteria_type},
{
"pass": criteria.get("pass", []),
"fail": criteria.get("fail", []),
},
)
logging.info(f"Chart summary criteria {criteria_type} updated.")
else:
logging.info(f"Chart summary criteria {criteria_type} not updated.")
else:
file_name = entry["file"].split(".json")[0]
tag_type = file_name.split("tags_")[1]
if not db.has_collection(f"{tag_type}GuidanceTags"):
db.create_collection(
f"{tag_type}GuidanceTags",
replication_factor=3,
shard_count=6,
write_concern=1,
)
for tag_key, tag_data in entry["guidance"].items():
new_tag = {
"_key": tag_key,
"en": tag_data["en"],
"fr": tag_data["fr"],
}
logging.info(f"Checking if tag {tag_key} exists...")
current_tag = db.collection(f"{tag_type}GuidanceTags").get(
{"_key": tag_key}
)
tag_exists = current_tag is not None
tag_updated = tag_exists and (current_tag != new_tag)
# Insert if the tag doesn't exist
if not tag_exists:
db.collection(f"{tag_type}GuidanceTags").insert(new_tag)
logging.info(f"Tag {tag_key} inserted.")
# Update if the tag has changed
elif tag_updated:
db.collection(f"{tag_type}GuidanceTags").update_match(
{"_key": tag_key},
{
"en": tag_data["en"],
"fr": tag_data["fr"],
},
)
logging.info(f"Tag {tag_key} updated.")
else:
logging.info(f"Tag {tag_key} not updated.")
logging.info(f"Guidance update completed.")
if __name__ == "__main__":
load_dotenv()
DB_USER = os.getenv("DB_USER")
DB_PASS = os.getenv("DB_PASS")
DB_NAME = os.getenv("DB_NAME")
DB_URL = os.getenv("DB_URL")
logging.info(emoji.emojize("Guidance service started :rocket:"))
current_directory = os.path.dirname(os.path.realpath(__file__))
guidance_file = open(f"{current_directory}/guidance.json")
guidance_data = json.load(guidance_file)
# Establish DB connection
client = ArangoClient(hosts=DB_URL)
db = client.db(DB_NAME, username=DB_USER, password=DB_PASS)
update_guidance(guidance_data, db)
guidance_file.close()
logging.info(f"Guidance service shutting down...")
|
# Generated by Django 3.0.7 on 2020-11-09 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cl_table', '0080_poshaud_cart_id'),
]
operations = [
migrations.AlterField(
model_name='depositaccount',
name='type',
field=models.CharField(blank=True, choices=[('Deposit', 'Deposit'), ('Top Up', 'Top Up'), ('CANCEL', 'CANCEL')], db_column='Type', max_length=10, null=True),
),
migrations.AlterField(
model_name='itemhelper',
name='sa_date',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='poshaud',
name='isvoid',
field=models.BooleanField(db_column='IsVoid', default=False, null=True),
),
migrations.AlterField(
model_name='prepaidaccount',
name='sa_status',
field=models.CharField(blank=True, choices=[('DEPOSIT', 'DEPOSIT'), ('TOPUP', 'TOPUP'), ('SA', 'SA')], db_column='SA_STATUS', max_length=50, null=True),
),
migrations.AlterField(
model_name='treatment',
name='sa_status',
field=models.CharField(blank=True, choices=[('SA', 'SA'), ('VOID', 'VOID'), ('SU', 'SU')], max_length=5, null=True),
),
migrations.AlterField(
model_name='treatmentaccount',
name='sa_status',
field=models.CharField(blank=True, choices=[('SA', 'SA'), ('VOID', 'VOID'), ('SU', 'SU')], max_length=5, null=True),
),
]
|
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms, models
# Get "features" from VGG19 ("classifier" portion isn't needed)
vgg = models.vgg19(pretrained=True).features
# Freeze all VGG params since we're onl optimizing target image
for param in vgg.parameters():
param.requires_grad_(False)
# move vgg model to cuda if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vgg.to(device)
print(vgg)
def load_image(img_path, max_size=400, shape=None):
''' Load and transform and image, making sure it is <= 400 pixels
in the x-y dims '''
image = Image.open(img_path).convert('RGB')
# Large images will slow down processing
if max(image.size) > max_size:
size = max_size
else:
size = max(image.size)
if shape is not None:
size = shape
in_transform = transforms.Compose([
transforms.Resize((400, 400)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))
])
# Discard the transparent, alpha channel (that's the :3) and add batch dimension
image = in_transform(image)[:3, :, :].unsqueeze(0)
return image
# Load content and style image
content = load_image('style_transfer/imgs/me_buda.jpg').to(device)
# Resize style to match content, to make code easier
style = load_image('style_transfer/imgs/dali_memoria.jpg', shape=content.shape[-2:]).to(device)
# Helper function to un-nomarmalize an image and convert it from Tensor image
# to a NumPy image for display
def img_convert(tensor_img):
'''Display a tensor as image'''
img = tensor_img.to('cpu').clone().detach()
img = img.numpy().squeeze()
img = img.transpose(1, 2, 0)
img = img * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))
img = img.clip(0, 1)
return img
# Display the images if run on console, othewise just comment the code
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
# Content and style side-by-side
ax1.imshow(img_convert(content))
ax2.imshow(img_convert(style))
def get_features(image, model, layers=None):
''' Run an image forward trough a model and get the features for a set of layers.
Default layers are for VGGNet matching Gatys et al (2016). '''
# Mapping PyTorch's VGGNet names to the names from original paper
# Layers for content and style representations of an image
if layers is None:
layers = {'0': 'conv1_1',
'5': 'conv2_1',
'10': 'conv3_1',
'19': 'conv4_1',
'21': 'conv4_2', # content representation
'28': 'conv5_1'}
features = {}
x = image
# model._modules is a dictionary holding each module in the model
for name, layer in model._modules.items():
x = layer(x)
if name in layers:
features[layers[name]] = x
return features
''' Calculate the Gram Matrix of given tensor
https://en.wikipedia.org/wiki/Gramian_matrix '''
def gram_matrix(tensor):
# Get the batch_size, depth, height, and width of the Tensor
_, d, h, w = tensor.size()
# Reshape it, so we're multiplying the features for each channel
tensor = tensor.view(d, h * h)
# Calculate the Gram Matrix
gram = torch.mm(tensor, tensor.t())
return gram
# Get content and style features only once before forming the target image
content_features = get_features(content, vgg)
style_features = get_features(style, vgg)
# Calculate the Gram Matrices for each layer of our style representation
style_grams = {layer: gram_matrix(style_features[layer]) for layer in style_features}
# create a third "target" image and prep it for change
# it is a good idea to start off with the target as a copy of our *content* image
# then iteratively change its style
target = content.clone().requires_grad_(True).to(device)
# todo -> watch lectures
# weights for each style layer
# weighting earlier layers more will result in *larger* style artifacts
# notice we are excluding `conv4_2` our content representation
style_weights = {'conv1_1': 1.,
'conv2_1': 0.75,
'conv3_1': 0.2,
'conv4_1': 0.2,
'conv5_1': 0.2}
content_weight = 1 # alpha
style_weight = 1e6 # beta
# for displaying the target image, intermittently
show_every = 400
# iteration hyperparameters
optimizer = optim.Adam([target], lr=0.003)
steps = 2000 # decide how many iterations to update your image (5000)
for ii in range(1, steps + 1):
# get the features from your target image
target_features = get_features(target, vgg)
# the content loss
content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2']) ** 2)
# the style loss
# initialize the style loss to 0
style_loss = 0
# then add to it for each layer's gram matrix loss
for layer in style_weights:
# get the "target" style representation for the layer
target_feature = target_features[layer]
target_gram = gram_matrix(target_feature)
_, d, h, w = target_feature.shape
# get the "style" style representation
style_gram = style_grams[layer]
# the style loss for one layer, weighted appropriately
layer_style_loss = style_weights[layer] * torch.mean((target_gram - style_gram) ** 2)
# add to the style loss
style_loss += layer_style_loss / (d * h * w)
# calculate the *total* loss
total_loss = content_weight * content_loss + style_weight * style_loss
# update your target image
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# display intermediate images and print the loss
if ii % show_every == 0:
print('Total loss: ', total_loss.item())
plt.imshow(img_convert(target))
plt.show()
# display content and final, target image
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(img_convert(content))
ax2.imshow(img_convert(target)) |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import os
import random
import numpy as np
from config import PROJECT_FOLDER
import causaldag as cd
# === FUNCTIONS DEFINING THE DIRECTORY STRUCTURE
# /data
# /nnodes=5,nlatent=3,exp_nbrs=2,ngraphs=100
# /graph0
# /nsamples=100
# samples.npy
# /estimates
# /fci
# alpha=1.00e-01.npy
# ...
# /gspo
# /nsamples=500
# ...
# /graph1
# ...
# /nnodes=10,nlatent=3,exp_nbrs=2,ngraphs=100
# ...
# /results
# /nnodes=5,nlatent=3,exp_nbrs=2,ngraphs=100
# /nsamples=100
# /fci
# /alpha=1.00e-01.npy
# shds_skeleton.npy
# ...
# /gspo
# ...
# /nsamples=500
# ...
# /nnodes=10,nlatent=3,exp_nbrs=2,ngraphs=100
# ...
def get_graphs_string(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float):
return f"nnodes={nnodes},nlatent={nlatent},exp_nbrs={exp_nbrs},ngraphs={ngraphs}"
def get_graphs_folder(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float):
return os.path.join(PROJECT_FOLDER, 'data', get_graphs_string(ngraphs, nnodes, nlatent, exp_nbrs))
def get_graph_folder(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float, graph_num: int):
graphs_folder = get_graphs_folder(ngraphs, nnodes, nlatent, exp_nbrs)
return os.path.join(graphs_folder, f"graph{graph_num}")
def get_samples_folder(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float, graph_num: int, nsamples: int):
graph_folder = get_graph_folder(ngraphs, nnodes, nlatent, exp_nbrs, graph_num)
return os.path.join(graph_folder, f"nsamples={nsamples}")
def get_alg_estimate_folder(ngraphs, nnodes, nlatent, exp_nbrs: float, graph_num, nsamples, alg):
samples_folder = get_samples_folder(ngraphs, nnodes, nlatent, exp_nbrs, graph_num, nsamples)
return os.path.join(samples_folder, "estimates", alg)
def to_str(v):
if isinstance(v, float):
return f"{v:.2e}"
else:
return str(v)
def dict2str(d):
keys_and_values = sorted(d.items(), key=lambda item: item[0])
return ','.join([f"{k}={to_str(v)}" for k, v in keys_and_values])
def get_alg_estimate_filename(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float, graph_num: int, nsamples: int, alg: str, skeleton=False, **kwargs):
results_folder = get_alg_estimate_folder(ngraphs, nnodes, nlatent, exp_nbrs, graph_num, nsamples, alg)
skeleton_str = '_skeleton' if skeleton else ''
return os.path.join(results_folder, dict2str(kwargs)+skeleton_str+'.npy')
def get_alg_time_filename(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float, graph_num: int, nsamples: int, alg: str, **kwargs):
results_folder = get_alg_estimate_folder(ngraphs, nnodes, nlatent, exp_nbrs, graph_num, nsamples, alg)
return os.path.join(results_folder, dict2str(kwargs)+'_time.npy')
def get_graphs_results_folder(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float, nsamples: int):
sample_str = f"samples={nsamples}"
return os.path.join(PROJECT_FOLDER, 'results', get_graphs_string(ngraphs, nnodes, nlatent, exp_nbrs), sample_str)
def get_alg_results_folder(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float, nsamples: int, alg: str, **kwargs):
graphs_results_folder = get_graphs_results_folder(ngraphs, nnodes, nlatent, exp_nbrs, nsamples)
return os.path.join(graphs_results_folder, alg, dict2str(kwargs))
# === GENERATING SAMPLES
def generate_mags_and_samples(ngraphs, nnodes, nlatent, exp_nbrs: float, nsamples):
"""
Generates [ngraphs] MAGs with [nnodes] nodes, marginalized from an Erdos-Renyi DAG with [nlatent]
additional variables and [exp_nbrs] expected neighbors.
Generates [nsamples] samples from each MAG.
A random seed is set so that the same MAGs are generated for each setting of (ngraphs,nnodes,nlatent,exp_nbrs).
"""
# === SKIP IF SAMPLES HAVE ALREADY BEEN GENERATED (assume generated for 1st means generated for all)
if os.path.exists(get_samples_folder(ngraphs, nnodes, nlatent, exp_nbrs, 0, nsamples)):
return
# === SET SEEDS FOR REPRODUCIBILITY
random.seed(9889772)
np.random.seed(9898725)
# === GENERATE DAGS AND MAGS
dags = cd.rand.directed_erdos(nlatent+nnodes, exp_nbrs/(nnodes-1), size=ngraphs, as_list=True)
gdags = [cd.rand.rand_weights(dag) for dag in dags]
mags = [dag.marginal_mag(set(range(nlatent)), relabel='default') for dag in dags]
# === GENERATE SAMPLES
samples_list = [gdag.sample(nsamples)[:, nlatent:] for gdag in gdags]
# === SAVE GRAPHS AND SAMPLES
graph_folders = [get_graph_folder(ngraphs, nnodes, nlatent, exp_nbrs, n) for n in range(ngraphs)]
samples_folders = [get_samples_folder(ngraphs, nnodes, nlatent, exp_nbrs, n, nsamples) for n in range(ngraphs)]
for graph_folder, samples_folder, mag, samples in zip(graph_folders, samples_folders, mags, samples_list):
os.makedirs(samples_folder, exist_ok=True)
np.save(os.path.join(graph_folder, 'mag_amat.npy'), mag.to_amat())
np.save(os.path.join(samples_folder, "samples.npy"), samples)
def get_mag_samples(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float, graph_num: int, nsamples: int):
samples_folder = get_samples_folder(ngraphs, nnodes, nlatent, exp_nbrs, graph_num, nsamples)
if not os.path.exists(samples_folder):
generate_mags_and_samples(ngraphs, nnodes, nlatent, exp_nbrs, nsamples)
return np.load(os.path.join(samples_folder, "samples.npy"))
def get_true_mags(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float):
graph_folders = [get_graph_folder(ngraphs, nnodes, nlatent, exp_nbrs, n) for n in range(ngraphs)]
mag_filenames = [os.path.join(graph_folder, 'mag_amat.npy') for graph_folder in graph_folders]
mags = [cd.AncestralGraph.from_amat(np.load(mag_filename)) for mag_filename in mag_filenames]
return mags
def get_alg_times(ngraphs: int, nnodes: int, nlatent: int, exp_nbrs: float, nsamples: int, alg: str, **kwargs):
times_filename = get_alg_results_folder(ngraphs, nnodes, nlatent, exp_nbrs, nsamples, alg, **kwargs) + 'times.npy'
overwrite = True
if overwrite or not os.path.exists(times_filename):
alg_time_filenames = [
get_alg_time_filename(ngraphs, nnodes, nlatent, exp_nbrs, graph_num, nsamples, alg, **kwargs)
for graph_num in range(ngraphs)
]
times = np.array([np.load(fn) for fn in alg_time_filenames])
np.save(times_filename, times)
return times
else:
return np.load(times_filename)
|
from django.contrib import admin
from django.urls import path
from .views import *
from .water_usage import *
from .friends import *
urlpatterns = [
path('', login_view),
path('login/', login_view, name="login"),
path('logout/', logout_view, name="logout"),
path('register/', register_view, name="register"),
path('water-usage/', water_usage_view, name="water-usage"),
path('choices/', choices_view, name="choices"),
path('household/', household_view, name="household"),
path('home/', home_view, name="home"),
path('friends/', friends_view, name="friends"),
]
|
from django.urls import path
from . import views
app_name = "blog_app"
# Contails all urls for the blog app
urlpatterns = [
path('write/', views.CreateBlog.as_view(), name="write"),
path('blog_list/', views.BlogList.as_view(), name="blog_list"),
path('blog_details/<slug>', views.blog_details, name="blog_details"),
path('my_blogs/', views.MyBlogs.as_view(), name="my_blogs"),
path('edit_blogs/<int:pk>', views.UpdateBlogs.as_view(), name="edit_blogs"),
path('liked/<int:pk>', views.liked_blog, name="like"),
path('unliked/<int:pk>', views.unlike_blog, name="unlike"),
path("search/", views.search, name="search")
]
|
from typing import Dict, Union, Tuple, Iterable
from pathlib import Path, WindowsPath
from os import sep, utime
import time
import logging
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import font as tkfont
import toml
import attr
from attr.validators import instance_of
from appdirs import user_config_dir
from .config_parser import Config
import pysight
from pysight.nd_hist_generator.movie import ImagingSoftware
def is_positive(instance, attribute, value):
if value < 0:
return ValueError("TAG Bit value has to be greater than 0.")
def end_is_greater(instance, attribute, value):
if value < instance.start:
return ValueError("TAG Bit 'end' value has to be equal or greater to 'start'.")
@attr.s(slots=True)
class TagBits(object):
"""
Storage for TAG bits
"""
value = attr.ib(default="None", validator=instance_of(str))
start = attr.ib(default=0, validator=[instance_of(int), is_positive])
end = attr.ib(default=1, validator=[instance_of(int), is_positive, end_is_greater])
DATA_SOURCES = (
"PMT1",
"PMT2",
"PMT3",
"PMT4",
"Lines",
"Frames",
"Laser",
"TAG Lens",
"Empty",
)
class GuiAppLst:
"""
Main GUI for the multiscaler code.
Note - class variables should contain "entry" in their name if they point
to an entry TTK object. Also, no variable should contain "root" in its name.
"""
def __init__(self):
self.root = Tk()
self.root.title(f"PySight \uFF5C PBLab \uFF5C v{pysight.__version__}")
self.root.rowconfigure(16, weight=1)
self.root.columnconfigure(16, weight=1)
main_frame = ttk.Frame(self.root, width=1000, height=1300)
main_frame.grid(column=0, row=0)
main_frame["borderwidth"] = 2
style = ttk.Style()
style.theme_use("clam")
self.normal_font = tkfont.Font(family="Helvetica", size=10)
self.bold_font = tkfont.Font(family="Helvetica", size=12, weight="bold")
self.config_row = 14
self.__create_vars()
# Run widgets
self.__browse_file(main_frame)
self.__advanced_win(main_frame)
self.__input_channels(main_frame)
self.__num_of_frames(main_frame)
self.__outputs(main_frame)
self.__image_size(main_frame)
self.__tag_bits(main_frame)
self.__imaging_software(main_frame)
# Only saving\loading functions after this point
self.__save_cfg(main_frame)
self.__load_cfg(main_frame)
self.__load_last_used_cfg(main_frame)
# Define the last quit button and wrap up GUI
quit_button = ttk.Button(main_frame, text="Start", command=self.root.destroy)
quit_button.grid(row=16, column=2, sticky="ns")
self.root.bind("<Return>", self.__dest)
for child in main_frame.winfo_children():
child.grid_configure(padx=3, pady=2)
self.root.wait_window()
def __dest(self, event):
self.root.destroy()
def __create_vars(self):
self.debug = BooleanVar(value=False)
self.phase = DoubleVar(value=-2.78)
self.reprate = DoubleVar(
value=80e6
) # 80e6 for the Chameleon, 0 to raise ZeroDivisionError
self.gating = BooleanVar(
value=False
) # difference between pulse and arrival to sample
self.binwidth = DoubleVar(value=800e-12)
self.tag_freq = DoubleVar(value=0.189e6)
self.tag_pulses = IntVar(value=1)
self.tag_offset = IntVar(value=0)
self.fill_frac = DoubleVar(value=72.0) # percent
self.bidir = BooleanVar(value=False)
self.keep_unidir = BooleanVar(value=False)
self.flim: BooleanVar = BooleanVar(value=False)
self.flim_downsampling_space: IntVar = IntVar(value=1)
self.flim_downsampling_time: IntVar = IntVar(value=1)
self.censor: BooleanVar = BooleanVar(value=False)
self.line_freq = DoubleVar(value=7930.0) # Hz
self.sweeps_as_lines = BooleanVar(value=False)
self.frame_delay = DoubleVar(value=0.001) # sec
self.interleaved = BooleanVar(value=False)
def __browse_file(self, main_frame):
file_row = 0
self.filename = StringVar(value="")
browse_button = ttk.Button(main_frame, text="Browse", command=self.__browsefunc)
browse_button.grid(column=0, row=file_row, sticky="ns")
browse_entry = ttk.Entry(main_frame, textvariable=self.filename, width=80)
browse_entry.grid(column=1, row=file_row, sticky="we", columnspan=2)
def __imaging_software(self, main_frame):
imaging_software_label = ttk.Label(
main_frame, text="Imaging Software", font=self.bold_font
)
imaging_software_label.grid(row=5, column=2, sticky="ns")
self.imaging_software = StringVar()
cb_image = ttk.Combobox(
main_frame, textvariable=self.imaging_software, width=10
)
cb_image.grid(row=6, column=2, sticky="ns")
cb_image.set(ImagingSoftware.SCANIMAGE.value)
cb_image["values"] = [item.value for item in ImagingSoftware]
def __input_channels(self, main_frame):
# Comboboxes
inputs_row = 1
input_channels_label = ttk.Label(
main_frame,
text="Input Channels ",
font=self.bold_font,
)
input_channels_label.grid(column=0, row=inputs_row, columnspan=2)
self.input_start = StringVar()
self.input_stop1 = StringVar()
self.input_stop2 = StringVar()
self.input_stop3 = StringVar()
self.input_stop4 = StringVar()
self.input_stop5 = StringVar()
mb1 = ttk.Combobox(main_frame, textvariable=self.input_start, width=10)
mb1.grid(column=1, row=inputs_row + 1, sticky="w")
mb1.set("PMT1")
mb1["values"] = DATA_SOURCES
mb2 = ttk.Combobox(main_frame, textvariable=self.input_stop1, width=10)
mb2.grid(column=1, row=inputs_row + 2, sticky="w")
mb2.set("Empty")
mb2["values"] = DATA_SOURCES
mb3 = ttk.Combobox(main_frame, textvariable=self.input_stop2, width=10)
mb3.grid(column=1, row=inputs_row + 3, sticky="w")
mb3.set("Lines")
mb3["values"] = DATA_SOURCES
mb4 = ttk.Combobox(main_frame, textvariable=self.input_stop3, width=10)
mb4.grid(column=1, row=inputs_row + 4, sticky="w")
mb4.set("Empty")
mb4["values"] = DATA_SOURCES
mb5 = ttk.Combobox(main_frame, textvariable=self.input_stop4, width=10)
mb5.grid(column=1, row=inputs_row + 5, sticky="w")
mb5.set("Empty")
mb5["values"] = DATA_SOURCES
mb6 = ttk.Combobox(main_frame, textvariable=self.input_stop5, width=10)
mb6.grid(column=1, row=inputs_row + 6, sticky="w")
mb6.set("Empty")
mb6["values"] = DATA_SOURCES
# Labels
input_channel_1 = ttk.Label(main_frame, text="START", font=self.normal_font)
input_channel_1.grid(column=0, row=inputs_row + 1, sticky="ns")
input_channel_2 = ttk.Label(main_frame, text="STOP1", font=self.normal_font)
input_channel_2.grid(column=0, row=inputs_row + 2, sticky="ns")
input_channel_3 = ttk.Label(main_frame, text="STOP2", font=self.normal_font)
input_channel_3.grid(column=0, row=inputs_row + 3, sticky="ns")
input_channel_4 = ttk.Label(main_frame, text="STOP3", font=self.normal_font)
input_channel_4.grid(column=0, row=inputs_row + 4, sticky="ns")
input_channel_5 = ttk.Label(main_frame, text="STOP4", font=self.normal_font)
input_channel_5.grid(column=0, row=inputs_row + 5, sticky="ns")
input_channel_6 = ttk.Label(main_frame, text="STOP5", font=self.normal_font)
input_channel_6.grid(column=0, row=inputs_row + 6, sticky="ns")
def __num_of_frames(self, main_frame):
# Number of frames in the data
frame_label = ttk.Label(
main_frame, text="Number of frames", font=self.normal_font
)
frame_label.grid(column=2, row=4, sticky="w")
self.num_of_frames = IntVar(value=1)
self.num_frames_entry = ttk.Entry(
main_frame, textvariable=self.num_of_frames, width=3
)
self.num_frames_entry.grid(column=2, row=4, sticky="ns")
self.num_frames_entry.config(state="disabled")
# Disable number of frames unless all inputs but one are empty
self.input_start.trace("w", self.__check_if_empty)
self.input_start.trace("w", self.__check_if_tag_lens_exists)
self.input_stop1.trace("w", self.__check_if_empty)
self.input_stop1.trace("w", self.__check_if_tag_lens_exists)
self.input_stop2.trace("w", self.__check_if_empty)
self.input_stop2.trace("w", self.__check_if_tag_lens_exists)
self.input_stop3.trace("w", self.__check_if_empty)
self.input_stop3.trace("w", self.__check_if_tag_lens_exists)
self.input_stop4.trace("w", self.__check_if_empty)
self.input_stop4.trace("w", self.__check_if_tag_lens_exists)
self.input_stop5.trace("w", self.__check_if_empty)
self.input_stop5.trace("w", self.__check_if_tag_lens_exists)
def __outputs(self, main_frame):
""" Wanted outputs """
outputs_row = 9
outputs_column = 2
outputs_label = ttk.Label(main_frame, text="Outputs", font=self.bold_font)
outputs_label.grid(column=outputs_column, row=outputs_row - 1, sticky="ns")
self.summed = BooleanVar(value=False)
summed_array = ttk.Checkbutton(
main_frame, text="Summed Stack", variable=self.summed
)
summed_array.grid(column=outputs_column, row=outputs_row, sticky="ns")
self.memory = BooleanVar(value=False)
in_memory = ttk.Checkbutton(main_frame, text="In Memory", variable=self.memory)
in_memory.grid(column=outputs_column, row=outputs_row + 1, sticky="ns")
self.stack = BooleanVar(value=True)
tif = ttk.Checkbutton(main_frame, text="Full Stack", variable=self.stack)
tif.grid(column=outputs_column, row=outputs_row + 2, sticky="ns")
def __image_size(self, main_frame):
image_size_row = 1
image_size_label = ttk.Label(main_frame, text="Image Size", font=self.bold_font)
image_size_label.grid(column=2, row=image_size_row, sticky="ns", columnspan=1)
x_size_label = ttk.Label(main_frame, text="X", font=self.normal_font)
x_size_label.grid(column=2, row=image_size_row + 1, sticky="w")
y_size_label = ttk.Label(main_frame, text="Y", font=self.normal_font)
y_size_label.grid(column=2, row=image_size_row + 1, sticky="ns")
z_size_label = ttk.Label(main_frame, text="Z", font=self.normal_font)
z_size_label.grid(column=2, row=image_size_row + 1, sticky="e")
self.x_pixels = IntVar(value=512)
self.y_pixels = IntVar(value=512)
self.z_pixels = IntVar(value=1)
x_pixels_entry = ttk.Entry(main_frame, textvariable=self.x_pixels, width=5)
x_pixels_entry.grid(column=2, row=image_size_row + 2, sticky="w")
y_pixels_entry = ttk.Entry(main_frame, textvariable=self.y_pixels, width=5)
y_pixels_entry.grid(column=2, row=image_size_row + 2, sticky="ns")
self.z_pixels_entry = ttk.Entry(main_frame, textvariable=self.z_pixels, width=5)
self.z_pixels_entry.grid(column=2, row=image_size_row + 2, sticky="e")
self.z_pixels_entry.config(state="disabled")
def __debug(self, main_frame):
""" Read a smaller portion of data for debugging """
debug_check = ttk.Checkbutton(main_frame, text="Debug?", variable=self.debug)
debug_check.grid(column=2, row=10, sticky="ns")
def __interleaved(self, main_frame):
""" Unmix two data channel in the same PMT1 analog channel """
inter_check = ttk.Checkbutton(
main_frame, text="Interleaved?", variable=self.interleaved
)
inter_check.grid(column=2, row=9, sticky="ns")
def __mirror_phase(self, main_frame):
phase_text = ttk.Label(main_frame, text="Mirror phase [us]: ")
phase_text.grid(column=0, row=1, sticky="w")
phase_entry = ttk.Entry(main_frame, textvariable=self.phase, width=8)
phase_entry.grid(column=0, row=1, sticky="e")
def __reprate(self, main_frame):
""" Laser repetition rate"""
laser1_label = ttk.Label(main_frame, text="Laser nominal rep. rate (FLIM) [Hz]")
laser1_label.grid(column=2, row=8, sticky="ns")
reprate_entry = ttk.Entry(main_frame, textvariable=self.reprate, width=11)
reprate_entry.grid(column=3, row=8, sticky="ns")
def __gating(self, main_frame):
self.gating_check = ttk.Checkbutton(
main_frame, text="With Gating?", variable=self.gating
)
self.gating_check.grid(column=2, row=7, sticky="ns")
self.gating_check.config(state="disabled")
def __binwidth(self, main_frame):
""" Binwidth of Multiscaler (for FLIM) """
binwidth_label = ttk.Label(main_frame, text="Multiscaler binwidth [sec]")
binwidth_label.grid(column=2, row=1, sticky="ns")
binwidth_entry = ttk.Entry(main_frame, textvariable=self.binwidth, width=9)
binwidth_entry.grid(column=3, row=1, sticky="ns")
def __tag_lens(self, main_frame):
""" TAG lens nominal frequency """
tag_row = 7
tag_label = ttk.Label(
main_frame,
text=" TAG nominal freq. [Hz]\noffset [deg] n. pulses",
)
tag_label.grid(column=0, row=tag_row, columnspan=2, sticky="w")
tag_label_entry = ttk.Entry(main_frame, textvariable=self.tag_freq, width=10)
tag_label_entry.grid(column=0, row=tag_row + 1, sticky="ns")
tag_pulses_entry = ttk.Entry(main_frame, textvariable=self.tag_pulses, width=3)
tag_pulses_entry.grid(column=0, row=tag_row + 1, sticky="e")
tag_pulses_entry.config(state="disabled")
self.tag_offset_entry = ttk.Entry(
main_frame, textvariable=self.tag_offset, width=3
)
self.tag_offset_entry.grid(column=0, row=tag_row + 1, sticky="w")
def __tag_bits(self, main_frame):
""" TAG bits """
tag_bits_row = 9
tag_bits_label = ttk.Label(
main_frame, text="TAG Bits Allocation", font=self.bold_font
)
tag_bits_label.grid(column=1, row=tag_bits_row, sticky="ns")
self.tag_bits = BooleanVar(value=False)
tag_bit_check = ttk.Checkbutton(main_frame, text="Use?", variable=self.tag_bits)
tag_bit_check.grid(column=1, row=tag_bits_row, sticky="w")
self.bits_grp_1_start = IntVar(value=1)
self.bits_grp_1_end = IntVar(value=3)
self.bits_grp_2_start = IntVar(value=4)
self.bits_grp_2_end = IntVar(value=5)
self.bits_grp_3_start = IntVar(value=6)
self.bits_grp_3_end = IntVar(value=16)
self.bits_grp_1_label = StringVar()
self.bits_grp_2_label = StringVar()
self.bits_grp_3_label = StringVar()
self.tag_bits_group_options = (
"Power",
"Slow axis",
"Fast axis",
"Z axis",
"None",
)
bits_grp_1 = ttk.Combobox(
main_frame, textvariable=self.bits_grp_1_label, width=10
)
bits_grp_1.grid(column=0, row=tag_bits_row + 1, sticky="e")
bits_grp_1.set("None")
bits_grp_1["values"] = self.tag_bits_group_options
bits_grp_2 = ttk.Combobox(
main_frame, textvariable=self.bits_grp_2_label, width=10
)
bits_grp_2.grid(column=0, row=tag_bits_row + 2, sticky="e")
bits_grp_2.set("None")
bits_grp_2["values"] = self.tag_bits_group_options
bits_grp_3 = ttk.Combobox(
main_frame, textvariable=self.bits_grp_3_label, width=10
)
bits_grp_3.grid(column=0, row=tag_bits_row + 3, sticky="e")
bits_grp_3.set("None")
bits_grp_3["values"] = self.tag_bits_group_options
bits_grp_1_start_lab = ttk.Label(main_frame, text="\tStart")
bits_grp_1_start_lab.grid(column=1, row=tag_bits_row + 1, sticky="w")
bits_grp_1_start_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_1_start, width=3
)
bits_grp_1_start_ent.grid(column=1, row=tag_bits_row + 1, sticky="ns")
bits_grp_1_end_lab = ttk.Label(main_frame, text="End")
bits_grp_1_end_lab.grid(column=1, row=tag_bits_row + 1, sticky="e")
bits_grp_1_end_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_1_end, width=3
)
bits_grp_1_end_ent.grid(column=2, row=tag_bits_row + 1, sticky="w")
bits_grp_2_start_lab = ttk.Label(main_frame, text="\tStart")
bits_grp_2_start_lab.grid(column=1, row=tag_bits_row + 2, sticky="w")
bits_grp_2_start_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_2_start, width=3
)
bits_grp_2_start_ent.grid(column=1, row=tag_bits_row + 2, sticky="ns")
bits_grp_2_end_lab = ttk.Label(main_frame, text="End")
bits_grp_2_end_lab.grid(column=1, row=tag_bits_row + 2, sticky="e")
bits_grp_2_end_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_2_end, width=3
)
bits_grp_2_end_ent.grid(column=2, row=tag_bits_row + 2, sticky="w")
bits_grp_3_start_lab = ttk.Label(main_frame, text="\tStart")
bits_grp_3_start_lab.grid(column=1, row=tag_bits_row + 3, sticky="w")
bits_grp_3_start_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_3_start, width=3
)
bits_grp_3_start_ent.grid(column=1, row=tag_bits_row + 3, sticky="ns")
bits_grp_3_end_lab = ttk.Label(main_frame, text="End")
bits_grp_3_end_lab.grid(column=1, row=tag_bits_row + 3, sticky="e")
bits_grp_3_end_ent = ttk.Entry(
main_frame, textvariable=self.bits_grp_3_end, width=3
)
bits_grp_3_end_ent.grid(column=2, row=tag_bits_row + 3, sticky="w")
self.tag_bits_dict = {}
self.tag_bits_dict = {
0: TagBits(
value=self.bits_grp_1_label.get(),
start=self.bits_grp_1_start.get(),
end=self.bits_grp_1_end.get(),
),
1: TagBits(
value=self.bits_grp_2_label.get(),
start=self.bits_grp_2_start.get(),
end=self.bits_grp_2_end.get(),
),
2: TagBits(
value=self.bits_grp_3_label.get(),
start=self.bits_grp_3_start.get(),
end=self.bits_grp_3_end.get(),
),
}
def __fill_frac(self, main_frame):
""" Percentage of time mirrors spend "inside" the image """
fill_frac_text = ttk.Label(main_frame, text="Fill fraction [%]: ")
fill_frac_text.grid(column=0, row=4, sticky="w")
fill_frac_entry = ttk.Entry(main_frame, textvariable=self.fill_frac, width=8)
fill_frac_entry.grid(column=0, row=4, sticky="e")
def __browsefunc(self):
filetypes = [("List files", "*.lst"), ("All files", "*.*")]
if self.filename.get() != "":
self.filename.set(
filedialog.askopenfilename(
filetypes=filetypes,
title="Choose a list or pickle file",
initialdir=str(Path(self.filename.get()).parent),
)
)
else:
self.filename.set(
filedialog.askopenfilename(
filetypes=filetypes,
title="Choose a list or pickle file",
initialdir=".",
)
)
def __check_if_empty(self, *args):
list_of_values = [
self.input_start.get(),
self.input_stop1.get(),
self.input_stop2.get(),
self.input_stop3.get(),
self.input_stop4.get(),
self.input_stop5.get(),
]
if 2 == list_of_values.count("Empty"):
if "PMT1" in list_of_values or "PMT2" in list_of_values:
self.num_frames_entry.config(state="normal")
else:
self.num_frames_entry.config(state="disabled")
def __check_if_tag_lens_exists(self, *args):
list_of_values = [
self.input_start.get(),
self.input_stop1.get(),
self.input_stop2.get(),
self.input_stop3.get(),
self.input_stop4.get(),
self.input_stop5.get(),
]
if "TAG Lens" in list_of_values:
self.z_pixels_entry.config(state="normal")
# self.tag_offset_entry.config(state='normal')
else:
self.z_pixels_entry.config(state="disabled")
# self.tag_offset_entry.config(state='disabled')
def __bidir(self, main_frame):
""" Checkbox for bi-directional scan """
bidir_check = ttk.Checkbutton(
main_frame, text="Bi-directional scan", variable=self.bidir
)
bidir_check.grid(column=0, row=5, sticky="ns")
self.bidir.trace("w", self.__check_if_bidir)
def __check_if_bidir(self, *args):
if self.bidir:
self.keep_unidir_check.config(state="normal")
if not self.bidir:
self.keep_unidir_check.config(state="disabled")
def __keep_unidir_events(self, main_frame):
""" Checkbox to see if events taken in the returning phase of a resonant mirror should be kept. """
self.keep_unidir_check = ttk.Checkbutton(
main_frame, text="Keep unidirectional?", variable=self.keep_unidir
)
self.keep_unidir_check.grid(column=0, row=6, sticky="ns")
self.keep_unidir_check.config(state="disabled")
def __flim(self, main_frame):
"""
Defines the mapping between one pulse and the missing pulses.
For example, downsampling factor of 8 means that every pulse that is
received starts an event of 8 pulses, with the next recorded pulse being the 9th.
:param main_frame: ttk.Frame
"""
flim_check: ttk.Checkbutton = ttk.Checkbutton(
main_frame, variable=self.flim, text="FLIM?"
)
flim_check.grid(row=2, column=2, sticky="ns")
self.flim.trace("w", self.__check_if_flim)
def __flim_downsampling_space(self, main_frame):
downsamping_space_text = ttk.Label(main_frame, text="Downsampling in space:")
downsamping_space_text.grid(column=2, row=3, sticky="ns")
self.downsamping_space_entry = ttk.Entry(
main_frame, textvariable=self.flim_downsampling_space, width=4
)
self.downsamping_space_entry.grid(column=3, row=3, sticky="ns")
self.downsamping_space_entry.config(
state="normal" if self.flim.get() else "disabled"
)
def __flim_downsampling_time(self, main_frame):
downsamping_time_text = ttk.Label(
main_frame, text="Downsampling in time (frames):"
)
downsamping_time_text.grid(column=2, row=4, sticky="ns")
self.downsamping_time_entry = ttk.Entry(
main_frame, textvariable=self.flim_downsampling_time, width=4
)
self.downsamping_time_entry.grid(column=3, row=4, sticky="ns")
self.downsamping_time_entry.config(
state="normal" if self.flim.get() else "disabled"
)
def __censor(self, main_frame):
"""
If FLIM is active, this checkbox enables the use of censor correction on the generated images.
:param main_frame: ttk.Frame
"""
self.censor_check: ttk.Checkbutton = ttk.Checkbutton(
main_frame, variable=self.censor, text="Censor Correction"
)
self.censor_check.grid(row=5, column=2, sticky="ns")
self.censor_check.config(state="disabled")
def __check_if_flim(self, *args):
state = "normal" if self.flim.get() else "disabled"
for check in (
self.censor_check,
self.gating_check,
self.downsamping_space_entry,
self.downsamping_time_entry,
):
check.config(state=state)
self.root.update_idletasks()
def __line_freq(self, main_frame):
""" Frequency of the line scanning mirror """
line_freq_label = ttk.Label(main_frame, text="Line freq [Hz]: ")
line_freq_label.grid(row=3, column=0, sticky="w")
line_freq_entry = ttk.Entry(main_frame, textvariable=self.line_freq, width=8)
line_freq_entry.grid(row=3, column=0, sticky="e")
def __sweeps_as_lines(self, main_frame):
""" Use the sweeps as lines for the image generation """
sweeps_cb = ttk.Checkbutton(
main_frame, variable=self.sweeps_as_lines, text="Sweeps as lines?"
)
sweeps_cb.grid(row=6, column=2, sticky="ns")
def __advanced_win(self, main_frame):
advanced_but = ttk.Button(
main_frame, text="Advanced", command=self.__open_advanced
)
advanced_but.grid(row=13, column=2, sticky="ns")
def __open_advanced(self, *args):
self.advanced_win = Toplevel(self.root)
frame = ttk.Frame(self.advanced_win, width=300, height=300)
frame.grid(column=0, row=0)
frame["borderwidth"] = 2
style = ttk.Style()
style.theme_use("clam")
self.__setup_advanced_frame(frame)
self.__gating(frame)
self.__flim(frame)
self.__flim_downsampling_space(frame)
self.__flim_downsampling_time(frame)
self.__censor(frame)
self.__sweeps_as_lines(frame)
self.__debug(frame)
self.__mirror_phase(frame)
self.__fill_frac(frame)
self.__reprate(frame)
self.__binwidth(frame)
self.__keep_unidir_events(frame)
self.__bidir(frame)
self.__check_if_bidir(frame)
self.__tag_lens(frame)
self.__frame_delay(frame)
self.__line_freq(frame)
self.__interleaved(frame)
for child in frame.winfo_children():
child.grid_configure(padx=3, pady=2)
def __setup_advanced_frame(self, frame):
scan_lab = ttk.Label(frame, text=" Scanner Settings", font=self.bold_font)
scan_lab.grid(row=0, column=0, sticky="ns")
hardware_lab = ttk.Label(
frame, text=" Hardware Settings", font=self.bold_font
)
hardware_lab.grid(row=0, column=2, sticky="ns")
def __frame_delay(self, main_frame):
frame_delay_label = ttk.Label(main_frame, text="Frame delay [sec]: ")
frame_delay_label.grid(row=2, column=0, sticky="w")
frame_delay_entry = ttk.Entry(
main_frame, textvariable=self.frame_delay, width=8
)
frame_delay_entry.grid(row=2, column=0, sticky="e")
####### ONLY SAVE\LOAD FUNCS AFTER THIS POINT ########
def __save_cfg(self, main_frame):
""" A button to write a .toml with current configs """
config_label = ttk.Label(
main_frame, text="Configuration File", font=self.bold_font
)
config_label.grid(column=1, row=self.config_row, sticky="ns")
self.save_as: StringVar = StringVar(value="default")
save_label = ttk.Label(main_frame, text="Config file name to save:")
save_label.grid(
column=0, row=self.config_row + 1, sticky="ns", columnspan=2, padx=10
)
save_entry = ttk.Entry(main_frame, textvariable=self.save_as, width=8)
save_entry.grid(column=1, row=self.config_row + 1, sticky="e")
save_button = ttk.Button(
main_frame, text="Save cfg", command=self.__callback_save_cur_cfg
)
save_button.grid(column=1, row=self.config_row + 2, sticky="w")
def __callback_save_cur_cfg(self) -> None:
"""
Takes a GUIApp() instance and saves it to a .toml file
"""
cfg_dict_to_save = Config.from_gui(self)
cfg_dict_to_save.to_disk()
def __load_cfg(self, main_frame: ttk.Frame):
"""
Load a specific .toml file and change all variables accordingly
"""
self.cfg_filename: StringVar = StringVar(value="default")
load_button: Button = ttk.Button(
main_frame, text="Load cfg", command=self.__browsecfg
)
load_button.grid(column=1, row=self.config_row + 2, sticky="e")
def __browsecfg(self, new_cfg=None):
if not new_cfg:
self.cfg_filename.set(
filedialog.askopenfilename(
filetypes=[("Config files", "*.toml")],
title=f"Choose a configuration file",
initialdir=user_config_dir("pysight"),
)
)
else:
self.cfg_filename.set(new_cfg)
with open(self.cfg_filename.get(), "r") as f:
self.config = toml.load(f)
try:
utime(self.cfg_filename.get(), (time.time(), time.time()))
except PermissionError:
pass
self.__modify_vars()
def __modify_vars(self):
"""
With the dictionary loaded from the TOML file, change all variables
"""
from_cfg_to_vars = self._build_config_dict()
for cfg_key, cfg_val in self.config.items():
if isinstance(cfg_val, dict):
for inner_key, inner_val in cfg_val.items():
if isinstance(inner_val, dict):
for innner_key, innner_val in inner_val.items():
from_cfg_to_vars[innner_key].set(innner_val)
else:
from_cfg_to_vars[inner_key].set(inner_val)
else:
from_cfg_to_vars[cfg_key].set(cfg_val)
self.root.update_idletasks()
def __load_last_used_cfg(self, main_frame):
direc = Path(user_config_dir("pysight"))
all_cfg_files: Iterable = direc.glob("*.toml")
latest_filename: str = ""
latest_file_date: int = 0
for cfg_file in all_cfg_files:
cur_date_modified = cfg_file.stat()[8]
if cur_date_modified > latest_file_date:
latest_filename = str(cfg_file)
latest_file_date = cur_date_modified
if latest_filename != "":
with open(latest_filename, "r") as f:
try:
self.config = toml.load(f)
except ValueError:
self.config = {}
self.__modify_vars()
def _build_config_dict(self):
""" Helper method to populate a new GUI instance from a config file """
from_config_to_vars = {
"cfg_title": self.save_as,
"stop1": self.input_stop1,
"stop2": self.input_stop2,
"stop3": self.input_stop3,
"stop4": self.input_stop4,
"stop5": self.input_stop5,
"start": self.input_start,
"num_of_frames": self.num_of_frames,
"x_pixels": self.x_pixels,
"y_pixels": self.y_pixels,
"z_pixels": self.z_pixels,
"imaging_software": self.imaging_software,
"data_filename": self.filename,
"summed": self.summed,
"memory": self.memory,
"stack": self.stack,
"debug": self.debug,
"phase": self.phase,
"reprate": self.reprate,
"gating": self.gating,
"binwidth": self.binwidth,
"tag_freq": self.tag_freq,
"tag_pulses": self.tag_pulses,
"tag_offset": self.tag_offset,
"fill_frac": self.fill_frac,
"bidir": self.bidir,
"keep_unidir": self.keep_unidir,
"flim": self.flim,
"flim_downsampling_space": self.flim_downsampling_space,
"flim_downsampling_time": self.flim_downsampling_time,
"censor": self.censor,
"line_freq": self.line_freq,
"sweeps_as_lines": self.sweeps_as_lines,
"frame_delay": self.frame_delay,
"interleaved": self.interleaved,
"tag_bits": self.tag_bits,
"label1": self.bits_grp_1_label,
"start1": self.bits_grp_1_start,
"end1": self.bits_grp_1_end,
"label2": self.bits_grp_2_label,
"start2": self.bits_grp_2_start,
"end2": self.bits_grp_2_end,
"label3": self.bits_grp_3_label,
"start3": self.bits_grp_3_start,
"end3": self.bits_grp_3_end,
}
return from_config_to_vars
if __name__ == "__main__":
app = GuiAppLst()
|
# coding=utf-8
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get("https://github.com/")
# 获取cookie信息
cookie = driver.get_cookies()
print cookie
driver.add_cookie({'name':'key-aaaaaa','value':'value-bbbbbb'})
for cookie in driver.get_cookies():
print "%s --> %s" % (cookie['name'],cookie['value'])
'''
get_cookies() 获得所有cookie 信息
get_cookie(name) 返回有特定name 值有cookie 信息
add_cookie(cookie_dict) 添加cookie,必须有name 和value 值
delete_cookie(name) 删除特定(部分)的cookie 信息
delete_all_cookies() 删除所有cookie 信息
''' |
from binary_search_tree import BST
def main():
pass
b = BST()
b.insert(12)
b.insert(9)
b.insert(13)
b.preorder()
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
from django.apps import AppConfig
class WvpnConfig(AppConfig):
name = 'wvpn'
|
""" Runs continuous prediction"""
import ContiniousPrediction as cp
if __name__ == "__main__":
contpred = cp.ContiniousPrediction()
contpred.on_folder() |
import tkinter as tk
from tkinter import ttk
from tkinter.font import BOLD
from tkinter import scrolledtext
from tkinter.ttk import Style
import pandas as pd
from sys import platform as _platform
def saveinfo():
valor1 = nameEntry.get()
valor2 = mobileEntry.get()
valor3 = emailEntry.get()
valor4 = collegechoosen.get()
valor5 = locationchoosen.get()
valor6 = titleEntry.get()
valor7 = detailsEntry.get()
valor8 = budgetEntry.get()
valor9 = dateEntry.get()
valor10 = durationEntry.get()
valor11 = setupEntry.get()
valor12 = wrapEntry.get()
valor13 = startEntry.get()
valor14 = endEntry.get()
valor15 = anticipatedEntry.get()
valor16 = expectedEntry.get()
valor17 = radioCME.get()
valor18 = radioAV.get()
valor19 = chkStudents.state()
# this works with ttk.checkbutton but gives you : selected , alternate
# valor20 = tk.IntVar()
# valor20 = chkStudents.val.get() #this works with tk.checkbutton but gives you : 1, 0
valor20 = chkFaculty.state()
valor21 = chkStaff.state()
valor22 = chkAlumni.state()
valor23 = chkCommunity.state()
valor24 = chkPublic.state()
valor25 = radioStudRequirted.get()
valor26 = vipEntry.get(1.0, tk.END)
valor27 = chkCampus.state()
valor28 = chkMedia.state()
valor29 = chkOtherAD.state()
valor30 = otherAdtextEntry.get()
valor31 = radioSafety.get()
valor32 = chkMonitor.state()
valor33 = chkCheckIDs.state()
valor34 = chkVIPsafety.state()
valor35 = chkPatrol.state()
valor36 = chkOtherSafety.state()
valor37 = OtherSafetyTextEntry.get()
valor38 = chkWiFi.state()
valor39 = wifiITEntry.get()
valor40 = chkDevices.state()
valor41 = deviceITEntry.get()
valor42 = chkOtherIT.state()
valor43 = otherITtextEntry.get()
valor44 = chkInstall.state()
valor45 = chkCheckup.state()
valor46 = chkOtherTech.state()
valor47 = otherTechtextEntry.get()
valor48 = chkTables.state()
valor49 = tablesSerEntry.get()
valor50 = chkChairs.state()
valor51 = chairsSerEntry.get()
valor52 = chkSignages.state()
valor53 = signageSerEntry.get()
valor54 = chkOtherSer.state()
valor55 = otherSertextEntry.get()
valor56 = radiocatering.get()
valor57 = addRequirEntry.get(1.0, tk.END)
valor58 = chkAgree.state()
data.append([valor1, valor2, valor3, valor4, valor5, valor6, valor7, valor8, valor9, valor10, valor11, valor12, valor13, valor14, valor15, valor16, valor17, valor18, valor19, valor20, valor21, valor22, valor23, valor24, valor25, valor26, valor27, valor28, valor29, valor30, valor31, valor32, valor33, valor34, valor35, valor36, valor37, valor38, valor39, valor40, valor41, valor42, valor43, valor44, valor45, valor46, valor47, valor48, valor49, valor50, valor51, valor52, valor53, valor54, valor55, valor56, valor57, valor58])
print(data)
valor1 = nameEntry.delete(0, "end")
valor2 = mobileEntry.delete(0, "end")
valor3 = emailEntry.delete(0, "end")
valor4 = collegechoosen.delete(0, "end")
valor5 = locationchoosen.delete(0, "end")
valor6 = titleEntry.delete(0, "end")
valor7 = detailsEntry.delete(0, "end")
valor8 = budgetEntry.delete(0, "end")
valor9 = dateEntry.delete(0, "end")
valor10 = durationEntry.delete(0, "end")
valor11 = setupEntry.delete(0, "end")
valor12 = wrapEntry.delete(0, "end")
valor13 = startEntry.delete(0, "end")
valor14 = endEntry.delete(0, "end")
valor15 = anticipatedEntry.delete(0, "end")
valor16 = expectedEntry.delete(0, "end")
valor17 = radioCME.set("0")
valor18 = radioAV.set("0")
# valor19 = radioRegist.set("0")
valor21 = chkFaculty.state()
valor22 = chkStaff.state()
valor23 = chkAlumni.state()
valor24 = chkCommunity.state()
valor25 = chkPublic.state()
valor26 = radioStudRequirted.set("0")
valor27 = vipEntry.delete('1.0', tk.END)
valor28 = chkCampus.state()
valor29 = chkMedia.state()
valor30 = chkOtherAD.state()
valor31 = otherAdtextEntry.delete(0, "end")
valor32 = radioSafety.set("0")
valor33 = chkMonitor.state()
valor34 = chkCheckIDs.state()
valor35 = chkVIPsafety.state()
valor36 = chkPatrol.state()
valor37 = chkOtherSafety.state()
valor38 = OtherSafetyTextEntry.delete(0, "end")
valor39 = chkWiFi.state()
valor40 = wifiITEntry.delete(0, "end")
valor41 = chkDevices.state()
valor42 = deviceITEntry.delete(0, "end")
valor43 = chkOtherIT.state()
valor44 = otherITtextEntry.delete(0, "end")
valor45 = chkInstall.state()
valor46 = chkCheckup.state()
valor47 = chkOtherTech.state()
valor48 = otherTechtextEntry.delete(0, "end")
valor49 = chkTables.state()
valor50 = tablesSerEntry.delete(0, "end")
valor51 = chkChairs.state()
valor52 = chairsSerEntry.delete(0, "end")
valor53 = chkSignages.state()
valor54 = signageSerEntry.delete(0, "end")
valor55 = chkOtherSer.state()
valor56 = otherSertextEntry.delete(0, "end")
valor57 = radiocatering.set("0")
valor58 = addRequirEntry.delete('1.0', tk.END)
valor59 = chkAgree.state()
def export():
df = pd.DataFrame(data)
df.to_excel("DataBase.xlsx")
# --- main ---
df = pd.DataFrame
data = []
# intializing the window
window = tk.Tk()
window.title("Event Form Request")
# configuring size of the window
window.geometry('450x750')
#Create Tab Control
TAB_CONTROL = ttk.Notebook(window)
#Tab1
TAB1 = ttk.Frame(TAB_CONTROL)
TAB_CONTROL.add(TAB1, text=' 1 / 3 ')
#Tab2
TAB2 = ttk.Frame(TAB_CONTROL)
TAB_CONTROL.add(TAB2, text=' 2 / 3 ')
#Tab3
TAB3 = ttk.Frame(TAB_CONTROL)
TAB_CONTROL.add(TAB3, text=' 3 / 3 ')
TAB_CONTROL.pack(expand=1, fill="both")
###############
#TAB 1
###############
ttk.Label(TAB1, text=" College and Event Details", font=("arial", 10, BOLD)).place(x=100, y=20)
ttk.Label(TAB1, text="Name of Incharge Person:").place(x=30, y=50, width=200)
nameEntry = ttk.Entry(TAB1)
nameEntry.place(x=200, y=50, width=160)
ttk.Label(TAB1, text="Mobile:").place(x=30, y=80, width=80)
mobileEntry = ttk.Entry(TAB1)
mobileEntry.place(x=200, y=80, width=160)
ttk.Label(TAB1, text="Email:").place(x=30, y=110, width=80)
emailEntry = ttk.Entry(TAB1)
emailEntry.place(x=200, y=110, width=160)
ttk.Label(TAB1, text="College:", state="readonly").place(x=30, y=140)
# Combobox creation
collegechoosen = ttk.Combobox(TAB1, width=23, state="readonly")
# Adding combobox drop down list
collegechoosen['values'] = (' COM-R',
' COM-J',
' CON-R',
' CON-J',
' CON-A',
' COP',
' COD',
' COAMS-R',
' COAMS-J',
' COAMS-A',
' COSHP-R',
' COSHP-J')
collegechoosen.set('Please Select ..')
collegechoosen.place(x=200, y=140)
collegechoosen.current()
ttk.Label(TAB1, text="Location:").place(x=30, y=170)
# Combobox creation
locationchoosen = ttk.Combobox(TAB1, width=23, state="readonly")
# Adding combobox drop down list
locationchoosen['values'] = (
' CONF. ROOM 12TH FLOOR',
' DINING ROOM 12 FLOOR',
' MAJLIS 12TH FLOOR',
' ROOM 21 M. FLOOR',)
locationchoosen.set('Please Select ..')
locationchoosen.place(x=200, y=170)
locationchoosen.current()
ttk.Label(TAB1, text="Event Title:").place(x=30, y=200, width=80)
titleEntry = ttk.Entry(TAB1)
titleEntry.place(x=200, y=200, width=160)
ttk.Label(TAB1, text="Event Details:").place(x=30, y=230, width=80)
detailsEntry = ttk.Entry(TAB1)
detailsEntry.place(x=200, y=230, width=160)
ttk.Label(TAB1, text="Event Budget:").place(x=30, y=260, width=80)
budgetEntry = ttk.Entry(TAB1)
budgetEntry.place(x=200, y=260, width=160)
ttk.Label(TAB1, text="Event Date:").place(x=30, y=290, width=80)
dateEntry = ttk.Entry(TAB1)
dateEntry.place(x=200, y=290, width=160)
ttk.Label(TAB1, text="Duration:").place(x=30, y=320, width=80)
durationEntry = ttk.Entry(TAB1)
durationEntry.place(x=200, y=320, width=160)
ttk.Label(TAB1, text="Setup Date:").place(x=30, y=350, width=80)
setupEntry = ttk.Entry(TAB1)
setupEntry.place(x=200, y=350, width=160)
ttk.Label(TAB1, text="Wrap Date:").place(x=30, y=380, width=80)
wrapEntry = ttk.Entry(TAB1)
wrapEntry.place(x=200, y=380, width=160)
ttk.Label(TAB1, text="Start Time:").place(x=30, y=410, width=80)
startEntry = ttk.Entry(TAB1)
startEntry.place(x=200, y=410, width=160)
ttk.Label(TAB1, text="End Time:").place(x=30, y=440, width=80)
endEntry = ttk.Entry(TAB1)
endEntry.place(x=200, y=440, width=160)
ttk.Label(TAB1, text="Number of Anticipated:").place(x=30, y=470, width=180)
anticipatedEntry = ttk.Entry(TAB1)
anticipatedEntry.place(x=200, y=470, width=160)
ttk.Label(TAB1, text="Number of Expected\nAttendees:").place(x=30, y=495, width=180)
expectedEntry = ttk.Entry(TAB1)
expectedEntry.place(x=200, y=500, width=160)
######################################### RADIOPOINT
ttk.Label(TAB1, text="Is this Event a CME?").place(x=30, y=540)
radioCME = tk.IntVar()
radioOne = ttk.Radiobutton(TAB1, text='Yes',
variable=radioCME, value=1)
radioTwo = ttk.Radiobutton(TAB1, text='No',
variable=radioCME, value=2)
labelValue = ttk.Label(TAB1, textvariable=radioCME.get())
radioOne.place(x=230, y=540)
radioTwo.place(x=300, y=540)
ttk.Label(TAB1, text="Required Audio/ Visual?").place(x=30, y=570)
radioAV = tk.IntVar()
radioOne = ttk.Radiobutton(TAB1, text='Yes',
variable=radioAV, value=1)
radioTwo = ttk.Radiobutton(TAB1, text='No',
variable=radioAV, value=2)
labelValue = ttk.Label(TAB1, textvariable=radioAV.get())
radioOne.place(x=230, y=570)
radioTwo.place(x=300, y=570)
###############
#TAB 2
###############
ttk.Label(TAB2, text="General Requirement", font=("arial", 10, BOLD)).place(x=100, y=20)
ttk.Label(TAB2, text='Select the Targeted Audience:').place(x=30, y=50, width=160)
#######################################################################
valor20 = tk.IntVar()
chkStudents = ttk.Checkbutton(TAB2, text='Students', variable=valor20)
chkStudents.place( x=30, y=80, width=80)
valor21 = tk.IntVar()
chkFaculty = ttk.Checkbutton(TAB2, text='Faculty', variable=valor21)
chkFaculty.place(x=130, y=80, width=80)
valor22 = tk.IntVar()
chkStaff = ttk.Checkbutton(TAB2, text='Staff',variable=valor22)
chkStaff.place(x=230, y=80, width=80)
valor23 = tk.IntVar()
chkAlumni = ttk.Checkbutton(TAB2, text='Alumni',variable=valor23)
chkAlumni.place(x=330, y=80, width=80)
valor24 = tk.IntVar()
chkCommunity = ttk.Checkbutton(TAB2, text='Healthcare Community',variable=valor24)
chkCommunity.place(x=30, y=110, width=200)
valor25 = tk.IntVar()
chkPublic = ttk.Checkbutton(TAB2, text='Public',variable=valor25)
chkPublic.place(x=230, y=110, width=80)
ttk.Label(TAB2, text="Required Students Attending?").place(x=30, y=150)
radioStudRequirted = tk.IntVar()
radioOne = ttk.Radiobutton(TAB2, text='Yes',
variable=radioStudRequirted, value=1)
radioTwo = ttk.Radiobutton(TAB2, text='No',
variable=radioStudRequirted, value=2)
labelValue = ttk.Label(TAB2, textvariable=radioStudRequirted.get())
radioOne.place(x=230, y=150)
radioTwo.place(x=300, y=150)
ttk.Label(TAB2, text="Please list any dignitaries, VIPs who may attend "
"as guest speakers,\npanelists, etc. Or invited guests"
"with title and place of employment:").place(x=30, y=200, width=300)
vipEntry = scrolledtext.ScrolledText(TAB2, width=20, height=4, wrap=tk.WORD)
# vipEntry = ttk.Entry(TAB2)
vipEntry.place(x=30, y=240, width=300)
ttk.Label(TAB2, text="Advertisment & Marketing", font=("arial", 10, BOLD)).place(x=30, y=320, width=400)
ttk.Label(TAB2, text='What will be used to advertise this event?').place(x=30, y=350, width=300)
valor28 = tk.IntVar()
chkCampus = ttk.Checkbutton(TAB2, text='On Campus', variable= valor28)
chkCampus.place(x=30, y=380, width=100)
valor29 = tk.IntVar()
chkMedia = ttk.Checkbutton(TAB2, text='KSAU-HS Social Media', variable= valor29 )
chkMedia.place(x=130, y=380, width=160)
valor30 = tk.IntVar()
chkOtherAD = ttk.Checkbutton(TAB2, text='Other',variable= valor30 )
chkOtherAD.place(x=30, y=410, width=120)
ttk.Label(TAB2, text='Specify:').place(x=100, y=410, width=200)
otherAdtextEntry = ttk.Entry(TAB2)
otherAdtextEntry.place(x=150, y=410, width=200)
ttk.Label(TAB2, text="This event requires a public \nsafety presence?").place(x=30, y=450, width=200)
radioSafety = tk.IntVar()
radioOne = ttk.Radiobutton(TAB2, text='Yes',
variable=radioSafety, value=1)
radioTwo = ttk.Radiobutton(TAB2, text='No',
variable=radioSafety, value=2)
labelValue = ttk.Label(TAB2, textvariable=radioSafety.get())
radioOne.place(x=230, y=450)
radioTwo.place(x=300, y=450)
ttk.Label(TAB2, text='For what purpose:').place(x=30, y=500, width=200)
valor33 = tk.IntVar()
chkMonitor = ttk.Checkbutton(TAB2, text='To Monitor the Event Entrance', variable=valor33)
chkMonitor.place(x=30, y=530, width=200)
valor34 = tk.IntVar()
chkCheckIDs = ttk.Checkbutton(TAB2, text='Check IDs', variable=valor34)
chkCheckIDs.place(x=230, y=530, width=80)
valor35 = tk.IntVar()
chkVIPsafety = ttk.Checkbutton(TAB2, text='VIP Safety', variable=valor35)
chkVIPsafety.place(x=230, y=560, width=80)
valor36 = tk.IntVar()
chkPatrol = ttk.Checkbutton(TAB2, text='Patrol the Event', variable=valor36)
chkPatrol.place(x=30, y=560, width=200)
valor37 = tk.IntVar()
chkOtherSafety = ttk.Checkbutton(TAB2, text='Other', variable=valor37)
chkOtherSafety.place(x=30, y=590, width=120)
ttk.Label(TAB2, text='Specify:').place(x=100, y=590, width=200)
OtherSafetyTextEntry = ttk.Entry(TAB2)
OtherSafetyTextEntry.place(x=150, y=590, width=200)
###############
#TAB 3
###############
ttk.Label(TAB3, text="Technical Requirement", font=("arial", 10, BOLD)).place(x=100, y=20, width=160)
ttk.Label(TAB3, text='IT support requested:').place(x=30, y=55, width=200)
valor39 = tk.IntVar()
chkWiFi = ttk.Checkbutton(TAB3, text='Wi-Fi', variable=valor39)
chkWiFi.place(x=30, y=80, width=80)
ttk.Label(TAB3, text='Qty:').place(x=120, y=80, width=100)
wifiITEntry = ttk.Entry(TAB3)
wifiITEntry.place(x=150, y=80, width=50)
valor41 = tk.IntVar()
chkDevices = ttk.Checkbutton(TAB3, text='Devices', variable=valor41)
chkDevices.place(x=30, y=105, width=60)
ttk.Label(TAB3, text='Qty:').place(x=120, y=105, width=100)
deviceITEntry = ttk.Entry(TAB3)
deviceITEntry.place(x=150, y=105, width=50)
valor43 = tk.IntVar()
chkOtherIT = ttk.Checkbutton(TAB3, text='Other', variable=valor43)
chkOtherIT.place(x=30, y=130, width=80)
ttk.Label(TAB3, text='Specify:').place(x=100, y=130, width=100)
otherITtextEntry = ttk.Entry(TAB3)
otherITtextEntry.place(x=150, y=130, width=200)
ttk.Label(TAB3, text='Technical support requested:').place(x=30, y=170, width=200)
valor45 = tk.IntVar()
chkInstall = ttk.Checkbutton(TAB3, text='Installation', variable=valor45)
chkInstall.place(x=30, y=190, width=80)
valor46 = tk.IntVar()
chkCheckup = ttk.Checkbutton(TAB3, text='Check UP', variable=valor46)
chkCheckup.place(x=30, y=215, width=80)
valor47 = tk.IntVar()
chkOtherTech = ttk.Checkbutton(TAB3, text='Other', variable=valor47)
chkOtherTech.place(x=30, y=240, width=80)
ttk.Label(TAB3, text='Specify:').place(x=100, y=240, width=100)
otherTechtextEntry = ttk.Entry(TAB3)
otherTechtextEntry.place(x=150, y=240, width=200)
ttk.Label(TAB3, text='Pick Up Services:').place(x=30, y=280, width=200)
valor49 = tk.IntVar()
chkTables = ttk.Checkbutton(TAB3, text='Tables', variable=valor49)
chkTables.place(x=30, y=305, width=80)
ttk.Label(TAB3, text='Qty:').place(x=120, y=305, width=100)
tablesSerEntry = ttk.Entry(TAB3)
tablesSerEntry.place(x=150, y=305, width=50)
valor51 = tk.IntVar()
chkChairs = ttk.Checkbutton(TAB3, text='Chairs', variable=valor51)
chkChairs.place(x=30, y=330, width=80)
ttk.Label(TAB3, text='Qty:').place(x=120, y=330, width=100)
chairsSerEntry = ttk.Entry(TAB3)
chairsSerEntry.place(x=150, y=330, width=50)
valor53 = tk.IntVar()
chkSignages = ttk.Checkbutton(TAB3, text='Signages', variable=valor53)
chkSignages.place(x=30, y=355, width=80)
ttk.Label(TAB3, text='Qty:').place(x=120, y=355, width=100)
signageSerEntry = ttk.Entry(TAB3)
signageSerEntry.place(x=150, y=355, width=50)
valor55 = tk.IntVar()
chkOtherSer = ttk.Checkbutton(TAB3, text='Other', variable=valor55)
chkOtherSer.place(x=30, y=380, width=80)
ttk.Label(TAB3, text='Specify:').place(x=100, y=380, width=100)
otherSertextEntry = ttk.Entry(TAB3)
otherSertextEntry.place(x=150, y=380, width=200)
ttk.Label(TAB3, text="Catering Requested?").place(x=30, y=435)
radiocatering = tk.IntVar()
radioOne = ttk.Radiobutton(TAB3, text='Yes',
variable=radiocatering, value=1)
radioTwo = ttk.Radiobutton(TAB3, text='No',
variable=radiocatering, value=2)
labelValue = ttk.Label(TAB3, textvariable=radiocatering.get())
radioOne.place(x=170, y=435)
radioTwo.place(x=250, y=435)
ttk.Label(TAB3, text="Additional Requirement", ).place(x=30, y=470, width=300)
addRequirEntry = scrolledtext.ScrolledText(TAB3, width=300, height=4, wrap=tk.WORD)
addRequirEntry.place(x=30, y=490, width=300)
ttk.Label(TAB3, text="Agreeing of taking Responsibility for good condition and the cleanness \n of the Event Location as received:", font=("arial", 8, BOLD) ).place(x=30, y=600, width=400)
valor59 = tk.IntVar()
chkAgree = ttk.Checkbutton(TAB3, text='Agree',variable=valor59)
chkAgree.place(x=30, y=650, width=200)
ttk.Button(TAB3, text="Save", command=saveinfo, ).place(x=150, y=650, width=50)
ttk.Button(TAB3, text="Export", command=export).place(x=250, y=650, width=50)
ttk.Button(TAB3, text="Quit", command=window.destroy).place(x=350, y=650, width=50)
ttk.Label(TAB3, text="KSAU-HS/MUR Dept.", font=("arial", 5, BOLD)).place(x=30, y=700, width=160)
#Calling Main()
window.mainloop()
|
from django.urls import path
from . import views
app_name = 'staff'
urlpatterns = [
path('', views.LoginView, name='login'), #localhost:8000
path('register', views.RegisterView.as_view(), name='register'), #localhost:8000/register
path('users', views.ViewUsers.as_view(), name='users'), #localhost:8000/register
path('list', views.ListCompetenceView.as_view(), name='list'), #localhost:8000/list
path('comp', views.CompentenceCreateView, name='comp'),
path('comm/<pk>', views.CommentView.as_view(), name='comm'),
path('update_compentence/<pk>', views.CompetenceUpdateView.as_view(), name='update_compentence'),
path('update_com/<pk>', views.AppraiseeCommentUpdate.as_view(), name='update_com'),
path('update_assessment/<pk>', views.AppraiserAndAppraiseeAgreementView, name='update_assessment'),
path('update_assess/<pk>', views.UpdateAppraiserAndAppraiseeAgreement, name='update_assess'),
path('success', views.SuccessView.as_view(), name='success'),
path('coma/<pk>', views.CommentappView.as_view(), name='coma'),
path('comv/<pk>', views.CommentvcView.as_view(), name='comv'),
path('prof', views.ProfileView.as_view(), name='prof'),
path('act/<pk>', views.PerformanceView.as_view(), name='act'),
path('as/<pk>', views.AssessmentView, name='as'),
path('both', views.SuperandApprai.as_view(), name='both'),
# path('profile', views.Profiles.as_view(), name='profile'),
path('home' , views.IndexView.as_view(), name='index'),
path('logout', views.LogoutView, name='logout'),
path('add_user', views.Register, name='add_user'),
path('detail/<pk>', views.AllDetailViewapp.as_view(), name='detail'),
path('agreement/<pk>', views.AgreementView.as_view(), name='agreement'),
path('edit/<pk>', views.EditOutput, name='editing'),
path('editi/<pk>', views.UpdateOutputall.as_view(), name='editi'),
path('listvc', views.ListCompetenceVCView.as_view(), name='listvc'),
path('details/<pk>', views.AllDetailViewVC.as_view(), name='details'),
#localhost:8000/list
] |
import ex_1
import ex_2
# проверка работы функций первого модуля
ex_1.create_dir()
ex_1.remove_dir()
# проверка работы функции второго модуля
print(ex_2.choise_list(ex_2.create_list())) |
name = []
password = []
def register():
name_of_user = input('Enter your NEW name : ')
if name_of_user in name:
print('Alredy exist plz try other username')
for i in range(500):
name_of_user = input('Enter your NEW name : ')
if name_of_user not in name:
name.append(name_of_user)
break
print('Already exist plz try other username')
pass_of_user = input('Enter your NEW password : ')
retype = input('Retype your NEW password : ')
if retype == pass_of_user:
print('successfully registered :)')
if name_of_user not in name:
name.append(name_of_user)
password.append(pass_of_user)
else:
for i in range(3):
retype = input(f'Retype your password (attempt {i + 1}) : ')
if retype == pass_of_user:
if name_of_user not in name:
name.append(name_of_user)
password.append(pass_of_user)
print('succesfully registered')
break
else:
print('Too many attempts .. Denied !')
def login():
nam = input('Enter your username : ')
if nam in name:
pas = input(f'hi {nam} enter your passcode : ')
if pas in password:
print('Login sucess')
else:
print('OOPs incorrect man')
for i in range(3):
re = input('Enter your password again : ')
if re in password:
print(f'hi {nam} sucessfully loged in ')
break
else:
print('Too many attempts')
else:
print('you are not a user')
def update():
try:
i = 0
n = input('Enter your username : ')
ind_name = name.index(n)
if n in name:
p = input('Enter your old password : ')
if p in password:
new = input('Enter your new password : ')
a = name.index(n)
password[a] = new
print('Sucessfully updated')
else:
for i in range(3):
p = input('Enter your old password : ')
if p in password:
new = input('Enter your new password : ')
a = name.index(n)
password[a] = new
print('Sucessfully updated')
break
else:
print('Too many attempts')
else:
print('No username')
except ValueError:
print('list is empty')
for i in range(1000):
print()
print('''WELCOME TO CANARA BANK
1 . Register
2 . Login
3 . Update''')
n = int(input('Enter your operation : '))
if n == 1 or 2 or 3:
if n == 1:
register()
if n == 2:
login()
if n == 3:
update()
else:
print('invalid option..select (1,2,3)')
|
import csv
class IntCodeProgram:
class Instruction:
def __init__(self, instruction):
self.opcode = int(instruction[3:])
self.mode1 = int(instruction[2])
self.mode2 = int(instruction[1])
self.mode3 = int(instruction[0])
def __init__(self, input_string, silent_mode=False):
self.program = [int(i) for i in input_string]
self.program.extend([0 for i in range(100000)])
self.silent_mode = silent_mode
self.outputs = []
self.preloaded_inputs = []
self.index = 0
self.relative_index = 0
def preload_inputs(self, inputs, clear_others=False):
if clear_others:
self.preloaded_inputs = []
self.preloaded_inputs.extend(inputs)
def get_last_output(self):
return self.outputs[-1]
def parse_instruction(self):
return self.Instruction(str(self.program[self.index]).rjust(5, '0'))
def fiddle_startup(self, noun, verb):
self.program[1] = noun
self.program[2] = verb
def get_value(self, index, mode):
param_value = self.program[index]
if mode == 0: # Positional
return self.program[param_value]
elif mode == 1: # Immediate
return param_value
elif mode == 2: # Relative
return self.program[param_value + self.relative_index]
def set_value(self, index, value, mode):
param_value = self.program[index]
if mode == 0:
self.program[param_value] = value
elif mode == 2:
self.program[param_value + self.relative_index] = value
def run(self, break_on_output=False):
while True:
instruction = self.parse_instruction()
if instruction.opcode == 1:
a = self.get_value(self.index + 1, instruction.mode1)
b = self.get_value(self.index + 2, instruction.mode2)
self.set_value(self.index + 3, a + b, instruction.mode3)
self.index = self.index + 4
elif instruction.opcode == 2:
a = self.get_value(self.index + 1, instruction.mode1)
b = self.get_value(self.index + 2, instruction.mode2)
self.set_value(self.index + 3, a * b, instruction.mode3)
self.index = self.index + 4
elif instruction.opcode == 3:
if len(self.preloaded_inputs) > 0:
val_to_use = self.preloaded_inputs[0]
self.preloaded_inputs = self.preloaded_inputs[1:]
else:
val_to_use = int(input("Input: "))
self.set_value(self.index + 1, val_to_use, instruction.mode1)
self.index = self.index + 2
elif instruction.opcode == 4:
output = self.get_value(self.index + 1, instruction.mode1)
if not self.silent_mode:
print(output)
self.outputs.append(output)
self.index = self.index + 2
if break_on_output:
return output
elif instruction.opcode == 5:
if self.get_value(self.index + 1, instruction.mode1) != 0:
self.index = self.get_value(self.index + 2, instruction.mode2)
else:
self.index = self.index + 3
elif instruction.opcode == 6:
if self.get_value(self.index + 1, instruction.mode1) == 0:
self.index = self.get_value(self.index + 2, instruction.mode2)
else:
self.index = self.index + 3
elif instruction.opcode == 7:
if self.get_value(self.index + 1, instruction.mode1) < self.get_value(self.index + 2, instruction.mode2):
self.set_value(self.index + 3, 1, instruction.mode3)
else:
self.set_value(self.index + 3, 0, instruction.mode3)
self.index = self.index + 4
elif instruction.opcode == 8:
if self.get_value(self.index + 1, instruction.mode1) == self.get_value(self.index + 2, instruction.mode2):
self.set_value(self.index + 3, 1, instruction.mode3)
else:
self.set_value(self.index + 3, 0, instruction.mode3)
self.index = self.index + 4
elif instruction.opcode == 9:
relative_offset = self.get_value(self.index + 1, instruction.mode1)
self.relative_index = self.relative_index + relative_offset
self.index = self.index + 2
elif instruction.opcode == 99:
return -99
if __name__ == '__main__':
f = open('Day05Input.txt', 'r')
csv_reader = csv.reader(f)
program_input = [i for i in next(csv_reader)]
program = IntCodeProgram(program_input)
results = 0
while results != -1:
results = program.run()
|
# Generated by Django 2.2.3 on 2019-11-07 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('main', '0039_auto_20191108_0010'),
]
operations = [
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(blank=True, max_length=200, null=True)),
('product', models.ForeignKey(blank=True, null=True, on_delete=False, to='main.Product', verbose_name='Наименование продукта')),
],
options={
'verbose_name': 'Whishlist',
'verbose_name_plural': 'Whishlist',
},
),
]
|
import cv2
import numpy as np
from datetime import datetime
cap = cv2.VideoCapture(0)
backgroundSubtracter = cv2.createBackgroundSubtractorMOG2()
kernel = np.ones( (25,25),np.float32 ) / 625
font = cv2.FONT_HERSHEY_SIMPLEX
while(1):
# Take each frame
_, frame = cap.read()
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
smoothed = cv2.filter2D(grayFrame,-1,kernel)
cv2.putText(frame,datetime.now().strftime('%Y-%m-%d %H:%M:%S'),(350,450),font, 0.7,(10,10,10),2,cv2.LINE_AA)
cv2.imshow(" Original " , frame)
# cv2.imshow(" Kernel smoothed" , smoothed)
# subtractedBackground = backgroundSubtracter.apply(smoothed)
# cv2.imshow(" Median blur ", subtractedBackground )
# frame , 15 = pixel round of value
# blur = cv2.GaussianBlur( frame , (25,25) , 0 )
# cv2.imshow(" Blur " , blur)
median = cv2.medianBlur( grayFrame , 15 )
cv2.imshow(" Median blur ", median )
# bilateral = cv2.bilateralFilter( res , 15 , 75 , 15 )
# cv2.imshow( "bilateral blur " , bilateral )
# denoised_gray = cv2.fastNlMeansDenoising(grayFrame, None, 9, 13)
# cv2.imshow(" Blur " , denoised_gray)
if cv2.waitKey(27) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
|
# -*- coding: utf-8 -
import random
def read_polarity():
lines = []
for line in open('rt-polarity.pos', 'r'):
lines.append("+1 " + line)
for line in open('rt-polarity.neg', 'r'):
lines.append("-1 " + line)
random.shuffle(lines)
return lines
def write_sentiment(lines):
f = open('sentiment.txt', 'w')
for line in lines:
f.write(line)
f.close()
write_sentiment(read_polarity())
|
from . import render_all
render_all()
|
# Generated by Django 2.2.2 on 2019-08-05 06:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filemaster', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='docfile',
name='content',
field=models.CharField(max_length=20, verbose_name='简介'),
),
]
|
import simplejson, builder, pprint, os
f = open("suite.txt")
data_structure = simplejson.loads("\n".join(f.readlines()))
f.close()
f = open("results.txt")
bug = simplejson.loads("\n".join(f.readlines()))["bugs"][0]
f.close()
suite = builder.build(data_structure)
suite.evaluate(bug, True)
print suite
#print suite
#pprint.pprint(suite.get_messages())
|
import json
import os, errno
from xml.dom import minidom
from bs4 import BeautifulSoup
import json
import base64
import numpy as np
import cv2
from PIL import Image
import random
import argparse
import os.path as osp
import sys
import io
import PIL.Image
from augmentor import Augmentor
from config import config
import csv
import shutil
import numpy as np
def apply_exif_orientation(image):
try:
exif = image._getexif()
except AttributeError:
exif = None
if exif is None:
return image
exif = {
PIL.ExifTags.TAGS[k]: v
for k, v in exif.items()
if k in PIL.ExifTags.TAGS
}
orientation = exif.get('Orientation', None)
if orientation == 1:
# do nothing
return image
elif orientation == 2:
# left-to-right mirror
return PIL.ImageOps.mirror(image)
elif orientation == 3:
# rotate 180
return image.transpose(PIL.Image.ROTATE_180)
elif orientation == 4:
# top-to-bottom mirror
return PIL.ImageOps.flip(image)
elif orientation == 5:
# top-to-left mirror
return PIL.ImageOps.mirror(image.transpose(PIL.Image.ROTATE_270))
elif orientation == 6:
# rotate 270
return image.transpose(PIL.Image.ROTATE_270)
elif orientation == 7:
# top-to-right mirror
return PIL.ImageOps.mirror(image.transpose(PIL.Image.ROTATE_90))
elif orientation == 8:
# rotate 90
return image.transpose(PIL.Image.ROTATE_90)
else:
return image
def convert_2D_to_3D(img):
np_img = np.array(img)
backtorgb = cv2.cvtColor(np_img, cv2.COLOR_GRAY2RGB)
img_3D = Image.fromarray(backtorgb)
return img_3D
def load_image_file(filename):
try:
image_pil = PIL.Image.open(filename)
except IOError:
print('Failed opening image file: {}'.format(filename))
return
# apply orientation to image according to exif
image_pil = apply_exif_orientation(image_pil)
with io.BytesIO() as f:
ext = osp.splitext(filename)[1].lower()
if ext in ['.jpg', '.jpeg']:
format = 'JPEG'
else:
format = 'PNG'
image_pil.save(f, format=format)
f.seek(0)
return f.read()
def getListOfFiles(dirName):
# create a list of file and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
def noise_creator(image, label, ag):
# add noise to images and corresponding label
if ag is not None:
image, label = ag.addNoise(image, label)
return image, label
def getText(nodelist):
# Iterate all Nodes aggregate TEXT_NODE
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
else:
# Recursive
rc.append(getText(node.childNodes))
return ''.join(rc)
def create_json(idx, new_path, annotaded_dir, kind_of_use, label):
xcenter, ycenter = label[0], label[1]
width, height = label[2], label[3]
angle = label[4]
theta = np.deg2rad(np.arange(0.0, 360.0, 1))
x = 0.5 * width * np.cos(theta)
y = 0.5 * height * np.sin(theta)
rtheta = np.radians(angle)
R = np.array([
[np.cos(rtheta), -np.sin(rtheta)],
[np.sin(rtheta), np.cos(rtheta)],
])
x, y = np.dot(R, np.array([x, y]))
x += xcenter
y += ycenter
points = []
for i in range(0, len(x)):
if x[i]<0 :
x[i] = 0
if x[i]> 192 :
x[i] = 192
if y[i]<0 :
y[i] = 0
if y[i]> 192 :
y[i] = 192
points.append([x[i],y[i]])
img = load_image_file(new_path)
data = {
"version":"3.21.1",
"flags":{},
"shapes": [
{
"label": "pupil",
"line_color": None,
"fill_color": None,
"points":points,
"shape_type": "polygon",
"flags": {}
}
],
"lineColor": [
0,
255,
0,
128
],
"fillColor": [
255,
0,
0,
128
],
"imagePath": str(idx) +".jpg",
"imageData": base64.encodebytes(img).decode("utf-8"),
"imageHeight": 192,
"imageWidth": 192
}
json.dumps(data, indent=4)
with open(annotaded_dir + "/" + kind_of_use + "/"+ str(idx) + ".json",'w') as json_file:
json.dump(data, json_file)
def data_set_changer (all_image_valid_list, valid_list_xml, valid_list, annotaded_dir, kind_of_use, ag_flag, ag_percentage, eval_dataset_flag):
ag_list= []
if ag_flag=="True" and int(ag_percentage)>0 and eval_dataset_flag == "False":
ag = Augmentor('./data/pupil_noisy_videos', config)
ag_list = random.sample(range(0,len(valid_list)),len(valid_list) * int(ag_percentage)//100)
count = 0
csv_columns = ['ID', 'Original_Ellipse_Center_X', 'Original_Ellipse_Center_Y','Original_Ellipse_W','Original_Ellipse_H','Original_Ellipse_Alpha', 'Predict_Ellipse_Center_X', 'Predict_Ellipse_Center_Y', 'Predict_Ellipse_W','Predict_Ellipse_H','Predict_Ellipse_Alpha', 'Original_Box_X1', 'Original_Box_Y1', 'Original_Box_X2', 'Original_Box_Y2','Predict_Box_X1', 'Predict_Box_Y1', 'Predict_Box_X2', 'Predict_Box_Y2','Ellipse_Center_Euclidean_Distance']
pupil_center =[]
for idx, current_index in enumerate(valid_list):
img = Image.open(all_image_valid_list[current_index])
img_3D = convert_2D_to_3D(img)
new_path = annotaded_dir + "/" + kind_of_use + "/" + str(count) + ".jpg"
img_3D.save(new_path)
xml_dir = valid_list_xml[current_index]
xmldoc = minidom.parse(xml_dir)
in_label = []
in_label.append(getText(xmldoc.getElementsByTagName("x")[0].childNodes))
in_label.append(getText(xmldoc.getElementsByTagName("y")[0].childNodes))
in_label.append(getText(xmldoc.getElementsByTagName("w")[0].childNodes))
in_label.append(getText(xmldoc.getElementsByTagName("h")[0].childNodes))
in_label.append(getText(xmldoc.getElementsByTagName("a")[0].childNodes))
label = np.asarray(in_label, dtype=np.float32)
if label[0] <= 0 or label[0] >= 192:
print("label for {0} is out of bound".format(img))
continue
if label[1] <= 0 or label[1] >= 192:
print("label for {0} is out of bound".format(img))
continue
pupil_center.append({'ID':count, 'Original_Ellipse_Center_X': "{:.4f}".format(label[0]), 'Original_Ellipse_Center_Y':"{:.4f}".format(label[1]), 'Original_Ellipse_W':"{:.4f}".format(label[2]), 'Original_Ellipse_H':"{:.4f}".format(label[3]), 'Original_Ellipse_Alpha':"{:.4f}".format(label[4]), 'Predict_Ellipse_Center_X':"",'Predict_Ellipse_Center_Y':"", 'Predict_Ellipse_W':"", 'Predict_Ellipse_H':"", 'Predict_Ellipse_Alpha':"", 'Original_Box_X1':"", 'Original_Box_Y1':"", 'Original_Box_X2':"", 'Original_Box_Y2':"", 'Predict_Box_X1':"", 'Predict_Box_Y1':"", 'Predict_Box_X2':"", 'Predict_Box_Y2':"",'Ellipse_Center_Euclidean_Distance':"" })
create_json(count, new_path, annotaded_dir, kind_of_use, label)
count = count + 1
if len(ag_list) > 0 and idx in ag_list:
new_path = annotaded_dir + "/" + kind_of_use + "/" + str(count) + ".jpg"
image, noise_label = noise_creator(img, label, ag)
image, noise_label = noise_creator(img, label, ag)
noise_label = np.asarray(noise_label, dtype=np.float32)
pupil_center.append({'ID':count, 'Original_Ellipse_Center_X':"{:.4f}".format(noise_label[0]), 'Original_Ellipse_Center_Y':"{:.4f}".format(noise_label[1]), 'Original_Ellipse_W':"{:.4f}".format(noise_label[2]), 'Original_Ellipse_H':"{:.4f}".format(noise_label[3]), 'Original_Ellipse_Alpha':"{:.4f}".format(noise_label[4]),'Predict_Ellipse_Center_X':"", 'Predict_Ellipse_Center_Y':"", 'Predict_Ellipse_W':"", 'Predict_Ellipse_H':"", 'Predict_Ellipse_Alpha':"",'Original_Box_X1':"", 'Original_Box_Y1':"", 'Original_Box_X2':"", 'Original_Box_Y2':"", 'Predict_Box_X1':"", 'Predict_Box_Y1':"", 'Predict_Box_X2':"", 'Predict_Box_Y2':"",'Ellipse_Center_Euclidean_Distance':""})
img_3D = convert_2D_to_3D(image)
img_3D.save(new_path)
create_json(str(count), new_path, annotaded_dir, kind_of_use, noise_label)
count = count + 1
if kind_of_use != "train":
csv_file = "./results/pupil_Detection_result/data_info.csv"
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in pupil_center:
writer.writerow(data)
except IOError:
print("I/O error")
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--images_dir', help='Input Image Subdirectory')
parser.add_argument('--annotaded_dir', help='Output Annotaded Directory')
parser.add_argument('--ag_flag', help='Agumentor Flag')
parser.add_argument('--ag_percentage', default=0, help='Percentage of Agumentor')
parser.add_argument('--eval_dataset_flag', default=False, help='Create Just Evaluate Dataset')
args = parser.parse_args()
shutil.rmtree(args.annotaded_dir, ignore_errors=True)
os.makedirs(args.annotaded_dir)
all_image_valid_list = []
valid_list_xml = []
list_of_files = getListOfFiles(args.images_dir)
for idx, current_file in enumerate(list_of_files):
if ".jpg" in current_file:
xml_dir = "/".join(current_file.split("/")[0:-1])+"/"+current_file.split("/")[-1].replace("in.jpg", "gt.xml")
if xml_dir in list_of_files:
all_image_valid_list.append(current_file)
valid_list_xml.append(xml_dir)
result_export_path = "./results/pupil_Detection_result"
shutil.rmtree( result_export_path, ignore_errors=True)
os.makedirs(result_export_path)
if args.eval_dataset_flag=="False":
temp_list = random.sample(range(0, len(all_image_valid_list)), len(all_image_valid_list))
train_list = temp_list[0:((len(temp_list)*80)//100)]
test_list = temp_list[len(train_list):len(temp_list)]
os.makedirs(args.annotaded_dir + "/train")
os.makedirs(args.annotaded_dir + "/test")
data_set_changer(all_image_valid_list, valid_list_xml, train_list, args.annotaded_dir, "train", args.ag_flag, args.ag_percentage, args.eval_dataset_flag)
data_set_changer(all_image_valid_list, valid_list_xml, test_list, args.annotaded_dir, "test", args.ag_flag, args.ag_percentage, args.eval_dataset_flag)
if args.eval_dataset_flag == "True":
os.makedirs(args.annotaded_dir + "/test")
eval_list = random.sample(range(0, len(all_image_valid_list)), len(all_image_valid_list))
data_set_changer(all_image_valid_list, valid_list_xml, eval_list, args.annotaded_dir, "test", args.ag_flag, args.ag_percentage, args.eval_dataset_flag)
print('Creating annotaded directory:', args.annotaded_dir)
if __name__ == '__main__':
main()
|
print("testchild)
|
'''
Utility to updated ACDD global attributes in NetCDF file using metadata sourced from GeoNetwork
Created on Apr 7, 2016
@author: Alex Ip, Geoscience Australia
'''
import sys
import subprocess
import re
import os
import netCDF4
from geophys2netcdf import ERS2NetCDF
def main():
assert len(
sys.argv) >= 3 and len(sys.argv) <= 4, 'Usage: %s <root_dir> <file_template> [<xml_dir>]' % sys.argv[0]
root_dir = sys.argv[1]
file_template = sys.argv[2]
if len(sys.argv) == 4:
xml_dir = sys.argv[3]
else:
xml_dir = None
nc_path_list = sorted([filename for filename in subprocess.check_output(
['find', root_dir, '-name', file_template]).split('\n') if re.search('\.nc$', filename)])
for nc_path in nc_path_list:
print 'Updating metadata in %s' % nc_path
if xml_dir:
xml_path = os.path.abspath(os.path.join(xml_dir, os.path.splitext(os.path.basename(nc_path))[0] + '.xml'))
else:
xml_path = None
try:
g2n_object = ERS2NetCDF()
g2n_object.update_nc_metadata(nc_path, do_stats=True, xml_path=xml_path)
# Kind of redundant, but possibly useful for debugging
g2n_object.check_json_metadata()
except Exception as e:
print 'Metadata update failed: %s' % e.message
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#========================================================================#
# CGLOPS LSWT L3U processing
#------------------------------------------------------------------------#
# Run from command line as:
# python2.7 run_lswt_l3u.py --rerun no --run_l3cdaily no &
#------------------------------------------------------------------------#
# R. Maidment
#========================================================================#
#/group_workspaces/jasmin2/nceo_uor/users/lcarrea01/TEST_OUTPUT_L2P/RESAMPLE
#/group_workspaces/jasmin2/nceo_uor/users/lcarrea01/OUTPUT_L3C/run_collate_l3
#/group_workspaces/jasmin2/nceo_uor/users/lcarrea01/CGLOPS/run_collate_10DAYS
#------------------------------------------------------------------------#
# Import modules
#------------------------------------------------------------------------#
from __future__ import division
from datetime import datetime as dt
from datetime import timedelta
import os
import os.path
import logging
import time
import sys
import shutil
import subprocess
import config_lswt as config
import lswt_operational as so
from threading import Thread
import argparse
#------------------------------------------------------------------------#
# Specify required command line arguments
#------------------------------------------------------------------------#
parser = argparse.ArgumentParser()
parser.add_argument("--rerun", choices=['yes','no'], dest="rerun", metavar="RERUN", help="Run processing regardless if files already exist", required=True)
parser.add_argument("--run_l3cdaily", choices=['yes','no'], dest="run_l3cdaily", metavar="RUN_L3CDAILY", help="Run l3c-daily script when l3u processing has finished", required=True)
args = parser.parse_args()
#------------------------------------------------------------------------#
# Create log file for each day (day being day this script was executed)
#------------------------------------------------------------------------#
logfile = so.log_output(config.log_opsdir,'l3u')
#------------------------------------------------------------------------#
# Determine dates to process
#------------------------------------------------------------------------#
#today = dt.strptime("%s-%s-%s %s:%s:%s" % (2019, 05, 03, 1, 1, 1), '%Y-%m-%d %H:%M:%S')
today = dt.now().replace(microsecond=0)
dekad_info = so.get_date(today, config.lag, config.buffer, config.latency, config.l2p_latency, config.capdates, config.check_startdate)
dekad_info.get_dateinfo()
#------------------------------------------------------------------------#
# Check if previously submitted jobs are finished (if all jobs have
# finished, carry on, otherwise exit script)
#------------------------------------------------------------------------#
so.check_submitted_jobs(config.log_opsdir, dekad_info, type='l2p', pattern='Job_ID')
so.check_submitted_jobs(config.log_opsdir, dekad_info, type='l3u', pattern='Job_ID')
#------------------------------------------------------------------------#
# Declare paths (was defined in LC script, now passed as argument)
#------------------------------------------------------------------------#
basepath = config.l2path + '/'
pathout = config.l3upath + '/'
if os.path.isdir(config.l3upath)==False:
os.makedirs(pathout)
#------------------------------------------------------------------------#
# Determine startday, endday and R value
#------------------------------------------------------------------------#
#x1 = dt.strptime("%s-%s-%s" % (2002, 5, 20), '%Y-%m-%d')
#x2 = dt.strptime("%s-%s-%s" % (2018, 11, 1), '%Y-%m-%d')
#dr = [x2 - timedelta(days=x) for x in range(0, (x2-x1).days+1)]
#len(dr)
d0 = dt.strptime("%s-%s-%s" % (2002, 5, 20), '%Y-%m-%d')
d1 = dekad_info.dekad_list[-1:][0][1] #d1 = dekad_info.pdekad_start
d2 = dekad_info.dekad_list[-1:][0][2] #d2 = dekad_info.pdekad_end
daterange = [d2 - timedelta(days=x) for x in range(0, (d2-d1).days+1)]
daterange.reverse()
i = d1 - d0
j = d2 - d0
startday = i.days
endday = j.days
start_time = 674697600 # Seconds since 1981-01-01 00:00:00
R = start_time + i.days*60*60*24
#------------------------------------------------------------------------#
# Submit jobs (one job per day)
#------------------------------------------------------------------------#
logging.info("===============================================")
logging.info(" Checking if l3u files need processing ...")
logging.info("===============================================")
joblist = list()
for idx, i in enumerate(range(startday, endday+1)):
R = start_time + i*60*60*24
date = daterange[idx]
logging.info(" Checking date: %s (R: %s) " % (dt.strftime(date, '%Y-%m-%d'),R))
# Create gbcs log directory for given day
l3ulogpath_day = os.path.join(config.log_l3udir,dt.strftime(date, '%Y'),dt.strftime(date, '%m'),dt.strftime(date, '%d'))
if not os.path.exists(l3ulogpath_day):
logging.info(" Making l3u log directory: %s" % l3ulogpath_day)
os.makedirs(l3ulogpath_day)
# Prepare job command ('lswt_l3u_resample.py' was 'previously resample_l2p_withPLOT_AVHRR.py')
pycommand = 'lswt_l3u_resample.py', '--start', R, '--rerun n', '--basepath', basepath, '--pathout', pathout
job = ['bsub',
'-q', 'short-serial',
'-W', '24:00',
'-o', os.path.join(l3ulogpath_day,'%J.out'),
'-e', os.path.join(l3ulogpath_day,'%J.err'),
'-R', 'rusage[mem=50000]',
'python2.7',
os.path.join(config.homedir, so.tidyup_job(pycommand))]
# Create daily directory if it doesn't already exist
daydir = os.path.join(config.l3upath,dt.strftime(date, '%Y'),dt.strftime(date, '%m'),dt.strftime(date, '%d'))
if not os.path.exists(daydir):
logging.info(" Making directory for l3u SLSTR files: %s" % daydir)
os.makedirs(daydir)
# Only attemp to process l3u files if l2p files exist for given day
l2p_files = so.list_files(os.path.join(config.l2path,dt.strftime(date, '%Y'),dt.strftime(date, '%m'),dt.strftime(date, '%d')), "nc")
if len(l2p_files) > 0:
# Check if l3u files already exist for given day
filelist = so.list_files(daydir, "nc")
logging.info(" -> %s l3u files detected" % len(filelist))
if len(filelist) == 0:
logging.info("... submitting job: %s" % job)
result = subprocess.check_output(so.tidyup_job(job), shell=True)
logging.info("Job_ID: %s" % result)
logging.info("Job_PATH: <%s>" % l3ulogpath_day)
joblist.append((result, l3ulogpath_day, date, 'l3u'))
else:
if args.rerun == 'yes':
logging.info(" -> Parameter 'rerun' set to: %s " % args.rerun)
logging.info("... submitting job: %s" % job)
result = subprocess.check_output(so.tidyup_job(job), shell=True)
logging.info("Job_ID: %s" % result)
logging.info("Job_PATH: <%s>" % l3clogpath_day)
joblist.append((result, l3clogpath_day, date, 'l3u'))
elif args.rerun == 'no':
logging.info(" -> Parameter 'rerun' set to '%s' " % args.rerun)
else:
logging.info(" -> No l2p files exist!")
logging.info(" Unsent email notification <" + "LSWT Operations Warning" + ": No l2p files found for date:- %s" % dt.strftime(date, '%Y-%m-%d') + ">")
logging.info(" . . . . .")
#------------------------------------------------------------------------#
# Check progress of each job
#------------------------------------------------------------------------#
if len(joblist) > 0:
logging.info("\n-----------------------------------------------")
logging.info(" Checking progress of submitted job(s) ...")
logging.info("-----------------------------------------------")
# Create threads for each job submitted and execute
threads = [Thread(target=so.check_job, args=(i)) for i in joblist]
[t.start() for t in threads]
[t.join() for t in threads]
# Now check log files for problems - if problems found, send email
#warnings = so.check_logfile(filename, 'l3u')
#if any key terms found, stop processing and send email notification!
#------------------------------------------------------------------------#
# When l3u processing is done and no errors found, submit l3c-daily processing
#------------------------------------------------------------------------#
if args.run_l3cdaily == 'yes':
logging.info("\n-----------------------------------------------")
logging.info(" Now executing l3c-daily processing ...")
logging.info("-----------------------------------------------")
pycommand = 'run_lswt_l3cdaily.py', '--rerun', 'no', '--run_l3cdekadal', 'yes'
job = ['bsub',
'-q', 'short-serial',
'-W', '12:00',
'-oo', os.path.join(config.log_opsdir,'submit_l3cdaily.out'),
'-eo', os.path.join(config.log_opsdir,'submit_l3cdaily.err'),
'-R', 'rusage[mem=40000]',
'-M', '40000',
'python2.7',
os.path.join(config.homedir, so.tidyup_job(pycommand))]
result = subprocess.check_output(so.tidyup_job(job), shell=True)
elif args.run_l3cdaily == 'no':
logging.info("\n-----------------------------------------------")
logging.info(" No l3c-daily processing instructed.")
logging.info("-----------------------------------------------")
logging.info("\n--- End of script ---")
|
"""Backend for a proteomics database."""
from flask import Flask, jsonify, make_response, render_template
from ctesi.ldap import LDAPUserDatastore, LDAPLoginForm
from http import HTTPStatus
from redis import StrictRedis
from flask_sqlalchemy import SQLAlchemy
from flask_security import Security
from flask_migrate import Migrate
from flask_login import current_user
from flask_allows import Allows
from celery import Celery
import config.config as config
app = Flask(__name__)
app.config.from_object(config.config)
allows = Allows(app=app, identity_loader=lambda: current_user)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
celery = Celery()
celery.config_from_object(config.CeleryConfig)
redis = StrictRedis(host='redis')
# Register blueprints
from ctesi.views import home, users, api_blueprint
app.register_blueprint(home)
app.register_blueprint(users)
app.register_blueprint(api_blueprint, url_prefix='/api')
# Setup Flask-Security with LDAP goodness
from ctesi.core.models import User, Role
user_datastore = LDAPUserDatastore(db, User, Role)
security = Security(app, user_datastore, login_form=LDAPLoginForm)
@app.errorhandler(HTTPStatus.UNAUTHORIZED)
@app.errorhandler(HTTPStatus.NOT_FOUND)
def error(error):
return render_template('error.html', error=error), error.code
|
a,b=input().split()
c=[]
c=input().split()
e=0
for i in range(1,int(a)+1):
if(int(c[i-1])==int(b)):
e+=1
if(e>0):
print("yes")
else:
print("no")
|
import sys
import os.path as op
import backslant
from flask import Flask, render_template
sys.meta_path.insert(0, backslant.PymlFinder(op.dirname(__file__), hook="bsviews"))
from bsviews.templates import index
app = Flask(__name__)
app.debug = True
@app.route('/')
def hello_world():
s = ''.join(index.render(title="Backslant Sample Server"))
return s
@app.route('/j2')
def j2():
return render_template('index.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000) |
"""
The MIT License (MIT)
Copyright (c) 2017 Marvin Teichmann
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import scipy as scp
import warnings
import deepdish as dd
import logging
from tables.exceptions import NaturalNameWarning
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
class Logger():
def __init__(self, filename=None):
self.data = {}
self.steps = []
self.filename = filename
def init_step(self, step):
self.steps.append(step)
if len(self.steps) > 1:
# Check that step size is constant.
assert(self.steps[-1] - self.steps[-2] ==
self.steps[1] - self.steps[0])
def add_value(self, value, name, step):
assert(self.steps[-1] == step)
if len(self.steps) == 1:
self.data[name] = [value]
else:
self.data[name].append(value)
assert(len(self.data[name]) == len(self.steps))
def add_values(self, value_dict, step, prefix=None):
for name, value in value_dict.items():
if prefix is not None:
name = prefix + "\\" + name
self.add_value(value, name, step)
def save(self, filename):
if filename is None:
assert(self.filename is not None)
filename = self.filename
save_dict = {'data': self.data,
'steps': self.steps}
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=NaturalNameWarning)
dd.io.save(filename, save_dict)
def load(self, filename):
load_dict = dd.io.load(filename)
self.data = load_dict['data']
self.steps = load_dict['steps']
return self
def reduce_step(self, step):
reduced_data = {}
assert(step >= 0)
assert(step <= len(self.steps))
for key, value in self.data.items():
reduced_data[key] = value[step]
return reduced_data
def discard_data(self, step):
reduced_data = {}
assert(step >= 0)
assert(step <= len(self.steps))
for key, value in self.data.items():
reduced_data[key] = value[0:step]
self.data = reduced_data
self.steps = self.steps[0:step]
return
if __name__ == '__main__':
logging.info("Hello World.")
|
bl_info = {
'name':'SaveIncOperator',
'category':'User',
'author':'miguel'
}
import bpy
import string
class SaveIncOperator(bpy.types.Operator):
bl_idname = "object.save_incremental"
bl_label = "Save scene incrementally"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
# get current blender file name
blend_name = bpy.path.basename(bpy.data.filepath)
print('blend_name: ', blend_name)
# remove file extension
name = blend_name[0:(len(blend_name) -6)]
print('name: ' + name)
if len(name) == 0:
name = 'untitled'
# check last digits
digit_counter = 0
for i in range(len(name))[::-1]:
if name[i].isdigit():
digit_counter += 1
continue
else:
break
print('digits_counter: ', digit_counter)
if digit_counter == 0:
new_file_name = name + '001.blend'
print('new_file_name: ', new_file_name)
else:
# extract number from file name
file_number = ''.join(name[-digit_counter:])
print('file_number: ', file_number)
# convert number to int
number = int(file_number)
print('number: ', number)
# the increment step
number += 1
# setup new file name
new_file_name = name[0:(len(name)-digit_counter)] + ('%03.d' % number) + '.blend'
print('new_file_name: ', new_file_name)
# extract blender file name from path
path = bpy.data.filepath[:-len(blend_name)]
print('Path to file %s' % path)
if len(path) <= 0:
final_path = '/home/miguel/' + new_file_name
print('final_path: ', final_path)
else:
final_path = path + new_file_name
bpy.ops.wm.save_as_mainfile(filepath=final_path, check_existing=False)
return {'FINISHED'}
def register():
bpy.utils.register_class(SaveIncOperator)
def unregister():
bpy.utils.unregister_class(SaveIncOperator)
if __name__ == "__main__":
register()
# test call
bpy.ops.object.save_incremental()
|
#Ejercicio 04
def find_needle(needle, haystack):
posicion_needle = 0
posicion_haystack = 0
while posicion_haystack<len(haystack):
if needle[posicion_needle] == haystack[posicion_haystack]:
needle_encontrado = True
def encontrar(cadena, subcadena):
subcadena in cadena
encontrar("Tiktoker", "to") |
from typing import Generator
import numpy as np
from keras.callbacks import Callback
from keras.utils import GeneratorEnqueuer
from sklearn.metrics import precision_score, roc_auc_score
# TODO: Add extra metrics to history so that they are saved in the file
class PrecisionCallback(Callback):
def on_train_begin(self, logs=None):
self.prfs = []
def on_epoch_end(self, epoch, logs=None):
y_pred = self.model.predict(self.validation_data[0])
self.prfs.append(
precision_score(np.argmax(self.validation_data[1], axis=1), np.argmax(y_pred, axis=1),
average='weighted'))
print('Precision Score is %s' % self.prfs[-1])
print('random')
class AucCallback(Callback):
def __init__(self, validation_data, validation_steps=None, interval=1):
self.interval = interval
self.validation_data = validation_data
self.validation_steps = validation_steps
def on_train_begin(self, logs=None):
self.auc = []
def auc_eval(self):
if isinstance(self.validation_data, Generator):
assert self.validation_steps is not None, \
'If validation data is a generator, validation steps must be provided'
y_pred = []
y_true = []
enqueuer = GeneratorEnqueuer(self.validation_data,
use_multiprocessing=False,
wait_time=.01)
enqueuer.start(workers=1, max_queue_size=10)
output_generator = enqueuer.get()
for _ in range(self.validation_steps):
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
elif len(generator_output) == 3:
x, y, _ = generator_output
else:
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' +
str(generator_output))
outs = self.model.predict_on_batch(x)
y_pred += outs.tolist()
y_true += y.tolist()
enqueuer.stop()
else:
y_pred = self.model.predict(self.validation_data[0])
y_true = self.validation_data[1].astype(np.bool)
roc_auc = roc_auc_score(y_true=y_true, y_score=y_pred)
self.auc.append(roc_auc)
print('AUC Score is %s' % self.auc[-1])
def on_epoch_end(self, epoch, logs=None):
if epoch % self.interval == 0:
self.auc_eval()
def on_train_end(self, logs=None):
self.auc_eval()
|
"""
Usage: python dealer_socket_functions.py
This script is used to intake dealerSocket data in csv format and save it to a simple json format after differential data is merged with the full data set provided via ftp in the morning.
The script expects there to be a "../data/3213_ctc_activities_update.csv" file containing original data and a "../data/3213_ctc_activities_used_as_diff.csv" file containing differential data. These can be changed within the main() function.
The script uses the following libraries: datetime, date, time and timedelta from the library datetime; csv; copy and json.
The script filters out data that is not relevant for the date set as target day, as well as duplicate records.
Finally, the data is saved to file in .json format, with the name suffixed by "sales" or "service".
Output validation can be done through
http://www.jsoneditoronline.org. or viewed using http://www.dirtymarkup.com/
If you have any questions, please feel free to contact me via:
email: theiligers@infermadiks.com
skype: tinaheiligers
"""
import csv
import copy
from datetime import datetime, date, time, timedelta
import json
def check_input(data):
'''Input: data parsed from file.
Checks input data against specifications
Returns results of checks as True/False
'''
data_status = {"field ok:": False, "activity_ids_unique": False, "all_data_today_data": False, "record_status": False, "complete_date_check":False, "all_valid_events": False}
if check_fields(data) == [True, True]:
data_status["field ok"] = True
if check_unique_activity_ids(data) == True:
data_status["activity_ids_unique"] = True
if due_date_is_target(data, target_date)[2] == True:
data_status["all_data_today_data"] = True
if len(record_status(data)[0]) == len(record_status(data)[1]):
data_status["record_status"] = True
if len(blank_complete_date(data)) == len(data):
data_status["complete_date_check"] = True
if split_data_by_category(data)[3] == []:
data_status["all_valid_events"] = True
return data_status
def get_data(file):
'''Input: csv file
Returns: parsed data, data for all other functions used'''
cur_data = []
with open(file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
cur_data.append(row)
return cur_data
def get_fields(data):
'''Input: parsed data from file
Returns: fields for data entries'''
cur_data_fields = []
for entry in data:
# print entry
for key in entry.keys():
# print key
if key not in cur_data_fields:
cur_data_fields.append(key)
return cur_data_fields
def check_fields(data_fields):
'''Input: parsed data_fields from get_fields()
Raises ValueError if required fields are missing from input
Returns: True if all fields are present and True if there aren't any additional fields, false otherwise.'''
field_check = [False, False]
req_fields = ["ActivityId", "EntityId", "EventId",
"FirstName","LastName", "CompanyName", "AssignedToName", "DueDate", "StatusDesc", "CompleteDate", "EventCategoryDesc"]
for field in data_fields:
if field in req_fields:
field_check[0] = True
else:
raise ValueError("extra field in input data: {}".format(field))
for field in req_fields:
if field in data_fields:
field_check[1] = True
else:
raise ValueError("missing field from input data: {}".format(field))
return field_check
def get_data_check_fields(file1, file2):
'''Input: full paths to data files
Gets data, gets data fields.
Returns cur_data, diff_data, field_names_cur_data, field_names_diff_data'''
cur_data = get_data(file1)
diff_data = get_data(file2)
field_names_cur_data = get_fields(cur_data)
field_names_diff_data = get_fields(diff_data)
check_fields_cur_data = check_fields(field_names_cur_data)
check_fields_diff_data = check_fields(field_names_diff_data)
return(cur_data, diff_data)
def concatenate_name(data):
'''Input: parsed data from get_data()
concatenate first name and last name
convert to capitilised version of full name
Returns: data with new FullName field.'''
for record in data:
record["FullName"] = (record["FirstName"].lower()).capitalize() + " " + (record["LastName"].lower()).capitalize()
if record["FullName"] == '':
record["FullName"] = record["CompanyName"]
return data
def update_data(data, diff_data):
'''Input: parsed data sets from concatenate_name.
merges differential data with original data, adds new records if not previously in data set.
Removes merged data from differential data set, leaves new records in place.
Returns: merged data sets'''
cur_data_copy = concatenate_name(data)[:]
diff_data_copy = concatenate_name(diff_data)[:]
for rec_d in cur_data_copy:
for rec_d_d in diff_data_copy:
if rec_d["ActivityId"] == rec_d_d["ActivityId"] or rec_d["FullName"] == rec_d_d["FullName"]:
rec_d = rec_d_d
diff_data_copy.remove(rec_d_d)
for record in diff_data_copy:
cur_data_copy.append(record)
#adding extra appointments not in original data set
return cur_data_copy
def concat_names_update_data(cur_data, diff_data):
'''Input: cur_data, diff_data
Combines first and last names into one,
Updates cur_data with records from diff_data, adds new records.
Returns: full data'''
cur_data_full_name = concatenate_name(cur_data)
diff_data_full_name = concatenate_name(diff_data)
all_data = update_data(cur_data_full_name, diff_data_full_name)
return all_data
def check_unique_activity_ids(data):
'''Input: parsed csv data sets from get_data().
Checks if all records by Activity Id are unique.
Returns True if all activity ids in input data are unique, set of unique activity Ids.'''
activity_ids_unique = False
activity_id_set = set()
duplicate_ids = []
for record in data:
activity_id_set.add(record["ActivityId"])
if len(activity_id_set) == len(data):
activity_ids_unique = True
return (activity_ids_unique, activity_id_set)
def split_dates(data):
'''Input: parsed csv data.
Splits DueDate and CompleteDate into date and time fields, adds these.
Returns: new data set with date and time fields.'''
for record in data:
record["DDate"] = datetime.strptime(record["DueDate"], "%m/%d/%Y %I:%M:%S %p").date()
record["DTime"] = datetime.strptime(record["DueDate"], "%m/%d/%Y %I:%M:%S %p").time()
if record["CompleteDate"] != '':
record["CDate"] = datetime.strptime(record["CompleteDate"], "%m/%d/%Y %I:%M:%S %p").date()
record["CTime"] = datetime.strptime(record["CompleteDate"], "%m/%d/%Y %I:%M:%S %p").time()
else:
record["CDate"] = date.max
record["CTime"] = time.max
return data
def check_update_split_dates(all_data):
'''Input: updated, merged data
checks the merge by uniqueness of activity ID's. If ok, splits datetime field into date and time. If not ok, only uses unique activity id records to split date and time.
Returns data with new seperate date and time fields.'''
all_data_id_check = check_unique_activity_ids(all_data)
activity_ids_unique, activity_id_set = check_unique_activity_ids(all_data)
if activity_ids_unique == True:
all_data_dates_split = split_dates(all_data)
else:
unique_records = []
for record in all_data:
if record["ActivityId"] in activity_id_set:
unique_records.append(record)
all_data_dates_split = split_dates(unique_records)
return all_data_dates_split
def due_date_is_target(data, target_date=(date.today()+timedelta(days=-7))):
'''Input: data, target date, defaults to today, if timedelta(days=0). Set days with negative integer if you need to set the target date as a day before today.
i.e.: testing on old data, set targetdate == date.today() + timedelta(days=-x) for x an integer)
Internally splits dates and times using split_dates() function
Returns: tuple of data subsets of today_data: list of records, not_today_data: list of records, all_data_today_data: Boolean'''
split_data = split_dates(data)
all_data_today_data = False
today_data = []
not_today_data = []
for record in split_data:
if record["DDate"] == target_date:
today_data.append(record)
else:
not_today_data.append(record)
if len(today_data) == len(split_data):
all_data_today_data = True
return (today_data, not_today_data, all_data_today_data)
def order_by_due_date_then_time(data):
'''Input: data from output of blank_complete_date
orders data by DueDate
output: ordered data'''
split_data = split_dates(data)
ordered_data_by_due_date_then_time = sorted(split_data, key=lambda k: (k["DDate"], k["DTime"]))
return ordered_data_by_due_date_then_time
def get_todays_data_reorder_by_time(all_data_dates_split):
'''Input: data with datetime split into date and time.
Subsets into records due for target date, default to today, if offset required, manually change timedelta(days=-x) for x an int.
reorders records by increasing time.
Returns reordered data for target date.'''
target_date = date.today() + timedelta(days=-7)
todays_data = due_date_is_target(all_data_dates_split, target_date)[0]
todays_data_ordered = order_by_due_date_then_time(todays_data)
return todays_data_ordered
def record_status(data):
'''
Input: parsed csv data
splits data into subset:
"StatusDesc" as "Cancelled"|"No Show" not needed, relevant data having "StatusDesc" not "Cancelled"|"No Show"
returns tuple of relevant and irrelevant data'''
status_to_exclude = ["Cancelled", "No Show"]
relevant_data = []
irrelevant_data = []
for entry in data:
if entry["StatusDesc"] not in status_to_exclude:
relevant_data.append(entry)
else:
irrelevant_data.append(entry)
return (relevant_data, irrelevant_data)
def blank_complete_date(data):
'''Input: data
internaly splits data using split_data(), subsets data with Complete Date either empty or within two hours of being due.
Returns subset of data
Not used in do_stuff()'''
split_data = split_dates(data)
complete_date_data = []
for entry in split_data:
if entry["CDate"] != [0]:
if entry["CDate"] > entry["DDate"]:
if entry["CTime"] > entry["DTime"]:
# adapt the take the 2 hours into account
complete_date_data.append(entry)
else:
complete_date_data.append(entry)
return complete_date_data
def split_data_by_category(data):
'''Input: data
splits data into sales and service
returns sales data records, service data records'''
data_sales = []
data_service = []
data_invalid_event = []
for entry in data:
if entry["EventCategoryDesc"] == "Sales":
data_sales.append(entry)
elif entry["EventCategoryDesc"] == "Service":
data_service.append(entry)
else:
data_invalid_event.append(entry)
return (data_sales, data_service, data_invalid_event)
def dump_all_data(data, number_of_records=28):
'''Input: data filtered through all helper functions, test functions, number_of_records, an int, number of records to save to file per batch.
Splits data into number_of_records requested and remainder.
dumps number of records to json, reserves remainder as cur_data.
Note: additional fields that can be added are commented out for ease of changing at a later stage. These should not be removed.
Returns: data dumped to json, remaining data not yet dumped as cur_data.
'''
if len(data) > number_of_records:
data_to_dump = data[0:number_of_records]
cur_data = data[number_of_records: -1]
else:
data_to_dump = data
cur_data = []
data_copy = data_to_dump[:]
output_data = []
for entry in data_to_dump:
output_data.append({
"FullName": entry["FullName"],
"AssignedToName": entry["AssignedToName"],
"DueTime": str(time.strftime(entry["DTime"], "%H:%M")),
# "StatusDesc": entry["StatusDesc"],
# "CompleteDate": str(entry["CompleteDate"])
# "EventId": entry["EventId"],
# "CDate": date.strftime(entry["CDate"], "%m/%d/%Y"),
# "CTime": time.strftime(entry["CTime"], "%H:%M"),
# "CompanyName": entry["CompanyName"],
# "EventCategoryDesc": entry["EventCategoryDesc"],
# "DDate": date.strftime(entry["DDate"], "%m/%d/%Y"),
# "ActivityId": entry["ActivityId"],
# "EntityId": entry["EntityId"],
# "DueDate": entry["DueDate"]
})
return (output_data, cur_data)
def save_sales_data_to_json(data):
'''Input: split data by sales
Saves data to json as xxx_activities_ds_sales.json
Returns: nothing'''
with open('../data/xxx_activities_ds_sales.json', 'w') as outfile:
json.dump(data, outfile)
return
def save_service_data_to_json(data):
'''Input: split data by service
Saves data to json as xxx_activities_ds_service.json,
Returns: nothing'''
with open('../data/xxx_activities_ds_service.json', 'w') as outfile:
json.dump(data, outfile)
return
def write_sales_service_to_json_file(todays_data_ordered, number_of_records=28):
'''Input: relevant records for today, number of records to write to file, defaults to 28, or fewer if < 28.
Splits data into "Sales" and "Service", formats to json, writes to file
Returns nothing'''
data_sales = split_data_by_category(todays_data_ordered)[0]
output_dumped = dump_all_data(data_sales, 28)
sales_dumped = output_dumped[0]
cur_data = output_dumped[1]
save_sales_data_to_json(sales_dumped)
#service data:
data_service = split_data_by_category(todays_data_ordered)[1]
output_dumped = dump_all_data(data_service, 28)
service_dumped = output_dumped[0]
cur_data = output_dumped[1]
save_service_data_to_json(service_dumped)
return
def main():
'''Function for running all functions in the correct order.
Note: pay careful attention to getting the correct output from each function that returns tuples:
these are:check_unique_activity_ids(), due_date_is_target()
'''
cur_file = file1
diff_file = file2
cur_data, diff_data = get_data_check_fields(file1, file2)
all_data = concat_names_update_data(cur_data, diff_data)
all_data_dates_split = check_update_split_dates(all_data)
todays_data_ordered = get_todays_data_reorder_by_time(all_data_dates_split)
write_sales_service_to_json_file(todays_data_ordered, 28)
return
if __name__ == '__main__':
#get the file paths from the server
file1 = "../data/3213_ctc_activities_update.csv"
file2 = "../data/3213_ctc_activities_used_as_diff.csv"
main()
|
import os
import requests
import numpy as np
import json
import pandas as pd
import glob
import subprocess
import math
from pathlib import Path
from hashlib import md5
import shutil
PHOTO_FOLDER = "./photos/"
# Adapted directly from Andrew Wheeler:
# https://andrewpwheeler.wordpress.com/2015/12/28/using-python-to-grab-google-street-view-imagery/
# Usage example:
# >>> download_streetview_image((46.414382,10.012988))
def download_streetview_image(apikey_streetview, lat_lon, filename="image", savepath=PHOTO_FOLDER, picsize="600x300",
heading=151.78, pitch=-0, fi=".jpg", fov=90, get_metadata=False, verbose=True,
outdoor=True, radius=5):
assert type(radius) is int
Path(PHOTO_FOLDER).mkdir(parents=True, exist_ok=True)
# Any size up to 640x640 is permitted by the API
# fov is the zoom level, effectively. Between 0 and 120.
base = "https://maps.googleapis.com/maps/api/streetview"
if get_metadata:
base = base + "/metadata?parameters"
if type(lat_lon) is tuple:
lat_lon_str = str(lat_lon[0]) + "," + str(lat_lon[1])
elif type(lat_lon) is str:
# We expect a latitude/longitude tuple, but if you providing a string address works too.
lat_lon_str = lat_lon
if outdoor:
outdoor_string = "&source=outdoor"
else:
outdoor_string = ""
url = base + "?size=" + picsize + "&location=" + lat_lon_str + "&heading=" + str(heading) + "&pitch=" + str(
pitch) + "&fov=" + str(fov) + outdoor_string + "&radius" + str(radius) + "&key=" + apikey_streetview
if verbose:
print(url)
if get_metadata:
# Description of metadata API: https://developers.google.com/maps/documentation/streetview/intro#size
with requests.get(url) as response:
data = json.loads(response.content)
return data
else:
r = requests.get(url)
with open(savepath + filename + fi, 'wb') as f:
f.write(r.content)
return savepath + filename + fi
# Gist copied from https://gist.github.com/jeromer/2005586 which is in the public domain:
def calculate_initial_compass_bearing(pointA, pointB):
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(x, y)
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
def haversine(a_gps, b_gps):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
lat1, lon1 = a_gps
lat2, lon2 = b_gps
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.asin(math.sqrt(a))
km = 6367 * c
m = 6367000.0 * c
return m
# Given two GPS points (lat/lon), interpolate a sequence of GPS points in a straight line
def interpolate_points(a_gps, b_gps, n_points=10, hop_size=None):
a_gps = np.array(a_gps, dtype=float)
b_gps = np.array(b_gps, dtype=float)
if hop_size is not None:
distance = haversine(a_gps, b_gps)
n_points = int(np.ceil(distance * 1.0 / hop_size))
x = np.linspace(a_gps[0], b_gps[0], n_points)
y = np.linspace(a_gps[1], b_gps[1], n_points)
dense_points_list = zip(x, y)
return dense_points_list
# else:
# print("You forgot to provide a hop parameter! Choose between:")
# print(" n_points = number of points to interpolate;")
# print(" hop_size = maximum distance between points in meters.")
# Short script to process the lookpoints from the above "interpolate points" function.
def clean_look_points(look_points):
# Remove points that are the same
pt_diffs = [np.array(a) - np.array(b) for (a, b) in zip(look_points[:-1], look_points[1:])]
keepers = np.abs(np.array(pt_diffs)) > 0
look_points_out = [look_points[i] for i in range(len(keepers)) if np.any(keepers[i])]
return look_points_out
# Download street view images for a sequence of GPS points.
# The orientation is assumed to be towards the next point.
# Setting orientation to value N orients the camera to the Nth next point.
# If there isn't a point N points in the future, we just use the previous heading.
def download_images_for_path(apikey_streetview, filestem, look_points, orientation=1, picsize="640x320"):
assert type(orientation) is int
assert orientation >= 1
for i in range(len(look_points)):
gps_point = look_points[i]
if i + orientation >= len(look_points):
heading = prev_heading
else:
heading = calculate_initial_compass_bearing(gps_point, look_points[i + orientation])
probe = download_streetview_image(apikey_streetview, gps_point, filename="", heading=heading, picsize=picsize,
get_metadata=True)
if probe['status'] == "OK" and 'Google' in probe['copyright']:
dest_file = download_streetview_image(apikey_streetview, gps_point, filename=filestem + str(i),
heading=heading, picsize=picsize, get_metadata=False)
prev_heading = heading
def get_turn_headings(h1, h2, stepsize=15):
if h2 < h1:
h2 += 360
clockwise = (h2 - h1 < 180)
if not clockwise:
h1 += 360
n_points = np.ceil(np.abs((h1 - h2) * 1.0 / stepsize))
headings = np.linspace(h1, h2, n_points)
return np.mod(headings, 360)
# def execute_turn(apikey_streetview, filestem, gps_point, h1, h2, picsize="640x320", stepsize=15):
# if h2 < h1:
# h2 += 360
# clockwise = (h2 - h1 < 180)
# if not clockwise:
# h1 += 360
# n_points = np.ceil(np.abs( (h1 - h2)*1.0 /stepsize))
# headings = np.linspace(h1,h2,n_points)
# probe = download_streetview_image(apikey_streetview, gps_point, filename="", heading=headings[0], picsize=picsize, get_metadata=True)
# if probe['status']=="OK" and 'Google' in probe['copyright']:
# for h_i,h in enumerate(headings):
# dest_file = download_streetview_image(apikey_streetview, gps_point, filename="{0}_turn_{1}".format(filestem,h_i), heading=h, picsize=picsize, get_metadata=False)
#
def generate_download_sequence(gps_points, savename):
# Create dataframe with GPS points
pt_list = pd.DataFrame(index=range(len(gps_points)), data=gps_points, columns=["lat", "lon"])
# Compute basic headings
headings = [calculate_initial_compass_bearing(pt[0], pt[1]) for pt in zip(gps_points[:-1], gps_points[1:])]
pt_list['heading'] = headings + [headings[-1]]
# Set up probes and collect all in raw form
pt_list['probe'] = [{} for i in pt_list.index]
for i in pt_list.index:
pt_list['probe'][i] = download_streetview_image(apikey_streetview, (pt_list["lat"][i], pt_list["lon"][i]),
filename="", heading=pt_list["heading"][i], get_metadata=True)
# Assign probe items to their own columns:
probe_items = ['copyright', 'date', 'location', 'pano_id', 'status']
for p_item in probe_items:
pt_list[p_item] = [x[p_item] for x in pt_list['probe']]
pt_list.to_pickle(savename)
return pt_list
def create_itinerary_df(gps_points):
# Create dataframe with GPS points
pt_list = pd.DataFrame(index=range(len(gps_points)),
columns=["lat", "lon", "heading", "probe", "copyright", "date", "location", "pano_id",
"status", "downloaded_1", "downloaded_array"])
lats, lons = zip(*gps_points)
pt_list['lat'] = lats
pt_list['lon'] = lons
pt_list['downloaded_1'] = False
pt_list['downloaded_array'] = False
# Compute basic headings
headings = [calculate_initial_compass_bearing(pt[0], pt[1]) for pt in zip(gps_points[:-1], gps_points[1:])]
pt_list['heading'] = headings + [headings[-1]]
# pt_list['probe'] = [{} for i in pt_list.index]
pt_list = pt_list.fillna('')
return pt_list
def probe_itinerary_items(itinerary_df, indlist, apikey_streetview, redo=False):
assert [i in itinerary_df.index for i in indlist]
probe_items = ['copyright', 'date', 'location', 'pano_id', 'status']
for i in indlist:
if (itinerary_df['status'][i] == '') or (redo):
print(i)
probe_result = download_streetview_image(apikey_streetview,
(itinerary_df["lat"].loc[i], itinerary_df["lon"][i]), filename="",
heading=itinerary_df["heading"][i], get_metadata=True)
# itinerary_df.loc[i]["probe"] = probe_result
# Assign probe items to their own columns:
for p_item in probe_result.keys():
itinerary_df[p_item][i] = probe_result[p_item]
def process_pointlist(pt_list=None, pt_list_filename=None):
if pt_list is None and pt_list_filename is not None:
pt_list = pd.read_pickle(pt_list_filename)
# Remove duplicate / invalid points:
unique_panos = np.unique(pt_list.pano_id)
panoid_to_ind = {panoid: pt_list.pano_id.eq(panoid).idxmax() for panoid in unique_panos}
keepers = [i for i in sorted(panoid_to_ind.values()) if
pt_list.status[i] == 'OK' and 'Google' in pt_list.copyright[i]]
new_list = pt_list.loc[keepers]
new_list.index = np.arange(new_list.shape[0])
crit_diff = 5
turn_indices = new_list.loc[np.abs(np.diff(new_list.heading)) > crit_diff].index
new_rows = []
for ti in turn_indices:
h1 = new_list.headings[ti]
h2 = new_list.headings[ti + 1]
headings = get_turn_headings(h1, h2, stepsize=1)[1:-1]
tmp_df = pd.DataFrame(np.tile(new_list.loc[ti], (len(headings), 1)))
tmp_df.columns = new_list.columns
tmp_df.heading = headings
tmp_df.index = np.linspace(ti + 0.01, ti + 0.99, len(headings))
new_rows += [tmp_df]
final_list = pd.concat([new_list] + new_rows)
final_list = final_list.sort_index()
final_list.index = np.arange(final_list.shape[0])
return final_list
def download_pics_from_list(item_list, apikey_streetview, filestem, picsize, redownload=False, index_filter=None):
if index_filter is None:
index_filter = item_list.index
for i in index_filter:
row = item_list.loc[i]
lat, lon, heading, downloaded = row['lat'], row['lon'], row['heading'], row['downloaded_1']
if (not downloaded) or redownload:
download_streetview_image(apikey_streetview, (lat, lon), filename=filestem + str(i), heading=heading,
picsize=picsize, get_metadata=False)
item_list["downloaded_1"].loc[i] = True
def download_tableaux_from_list(item_list, apikey_streetview, filestem, picsize, fov, fov_step, pitch, grid_dim,
index_filter=None):
if index_filter is None:
index_filter = item_list.index
for i in index_filter:
row = item_list.loc[i]
lat, lon, heading, downloaded = row['lat'], row['lon'], row['heading'], row['downloaded_array']
download_images_for_point(apikey_streetview, (lat, lon), filestem + str(i), "./photos/", heading, fov, fov_step,
pitch, grid_dim)
if (not downloaded) or redownload:
assemble_grid_of_images(filestem + str(i), "./photos/", "./photos/composite-{0}-{1}".format(filestem, i),
grid_dim, crop_dim="640x640+0+0")
item_list["downloaded_array"].loc[i] = True
# Download set of zoomed-in views to be composited into a larger image
# Download set of zoomed-in views to be composited into a larger image
def download_images_for_point(apikey_streetview, lat_lon, filestem, savepath, heading, fov=30, fov_step=30, pitch=15,
grid_dim=[4, 2]):
horiz_points = (np.arange(grid_dim[0]) - (grid_dim[0] - 1) / 2.0) * fov_step
vert_points = (np.arange(grid_dim[1])[::-1] - (grid_dim[1] - 1) / 2.0) * fov_step + pitch
# horiz_points = np.linspace(-1, 1, grid_dim[0]) * (fov / 90.0)
# vert_points = np.linspace(max_pitch, min_pitch, grid_dim[1]) * (fov / 90.0)
# fov_angle_frac = 1.0 * fov / max(grid_dim)
# fudge_factor = 5
# assert fov_angle_frac >= 15
panel_inds = np.reshape(np.arange(np.prod(grid_dim)), grid_dim, 1).transpose()
for ix, x in enumerate(horiz_points):
for iy, y in enumerate(vert_points):
panel_ind = panel_inds[iy, ix]
print(panel_ind)
tmp_heading = heading + x
tmp_pitch = y
print(tmp_heading, tmp_pitch)
download_streetview_image(apikey_streetview, lat_lon, filename="{0}_{1}".format(filestem, panel_ind),
savepath=savepath, picsize="640x640", heading=tmp_heading, pitch=tmp_pitch,
fi=".jpg", fov=fov, get_metadata=False, verbose=True, outdoor=True, radius=5)
def assemble_grid_of_images(filestem, savepath, outfilestem, grid_dim, crop_dim="640x640+0+0"):
panel_inds = np.reshape(np.arange(np.prod(grid_dim)), grid_dim, 1).transpose()
grid_filenames = [["{0}/{1}_{2}.jpg -crop {3}".format(savepath, filestem, pind, crop_dim) for pind in pindrow] for
pindrow in panel_inds]
command_string = "convert " + " ".join(
[" \( " + " ".join(row + ["+append"]) + " \) " for row in grid_filenames]) + " -append {0}.jpg".format(
outfilestem)
# print(command_string)
subprocess.call(command_string, shell=True)
# Line up files in order to make a video using ffmpeg.
# ffmpeg requires all images files numbered in sequence, with no gaps.
# However, some images will not have been downloaded, so we need to shift everything to tidy up gaps.
# Also, some images will be duplicates, and we can remove them.
# Also, a user may want to manually discard images because they are clearly out of step with the path (e.g., they might be view inside a building, or slightly down a cross-street.) After manually removing files, re-running this will line up the files.
def line_up_files(filestem, new_dir="./movie_lineup", command="mv", override_nums=None):
if not os.path.exists(new_dir):
os.makedirs(new_dir)
files = glob.glob("./photos/" + filestem + "*.jpg")
file_nums = [int(filename[9 + len(filestem):-4]) for filename in files]
file_sort = [files[i] for i in np.argsort(file_nums)]
# First, remove file_nums that represent duplicate files
file_keepers = prune_repeated_images_from_list()
# for i in range(1,len(file_sort)):
# prev_file = file_keepers[-1]
# curr_file = file_sort[i]
# result = os.system("diff " + curr_file + " " + prev_file)
# if result > 0:
# file_keepers += [curr_file]
# Now, shuffle the files into a packed numbering:
for i in range(len(file_keepers)):
old_filename = file_keepers[i]
new_filename = os.path.join(new_dir, "{}{}.jpg".format(filestem, i))
print("{0} {1} {2}".format(command, old_filename, new_filename))
shutil.move(old_filename, new_filename)
# Refactor line_up_files as separate steps:
def line_up_files_with_numbers_script(filestem, numbers, new_dir):
files = ["./photos/{0}{1}.jpg".format(filestem, num) for num in sorted(numbers)]
file_keepers = prune_repeated_images_from_list(files)
copy_files_to_sequence(file_keepers, "./photos/{0}/{1}".format(new_dir, filestem))
def copy_files_to_sequence(list_of_files, new_filestem, command='cp'):
for i in range(len(list_of_files)):
old_filename = list_of_files[i]
new_filename = "{0}{1}.jpg".format(new_filestem, i)
print("{0} {1} {2}".format(command, old_filename, new_filename))
os.system("{0} {1} {2}".format(command, old_filename, new_filename))
def file_hash(filepath):
with open(filepath, 'rb') as f:
return md5(f.read()).hexdigest()
def prune_repeated_images_from_list():
duplicates = []
hash_keys = {}
for index, filename in enumerate(os.listdir("./photos/")):
filepath = os.path.join(PHOTO_FOLDER, filename)
if os.path.isfile(filepath):
filehash = file_hash(filepath)
if filehash not in hash_keys:
hash_keys[filehash] = filepath
else:
duplicates.append((index, filepath))
for index, filepath in duplicates:
os.remove(filepath)
return list(hash_keys.values())
def make_video(base_string, rate=20, video_string=None, picsize="640x640", basepath="./photos"):
if video_string is None:
video_string = base_string
command = "ffmpeg -r {0} -f image2 -s {3} -i {4}/{1}%d.jpg -vcodec libx264 -crf 25 -pix_fmt yuv420p {2}.mp4 -y".format(
rate, base_string, video_string, picsize, basepath)
print(command)
subprocess.call(command, shell=True)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='appointment',
name='comment',
),
migrations.AddField(
model_name='appointment',
name='hour',
field=models.TimeField(default=datetime.datetime(2015, 12, 5, 23, 41, 17, 642953, tzinfo=utc)),
preserve_default=False,
),
]
|
#ex002.py:使用readline()读文件
f=open("story.txt")
while True:
line = f.readline()
if line:
print(line)
else:
break
f.close
|
class FileFormatException(Exception):
"""
An exception thrown when workflow file has incorrect format.
"""
pass
|
__author__ = 'leah'
from data_import_tools import import_arcadia_archived_data
from matplotlib import pyplot as plt
detector_dict, detector_list = import_arcadia_archived_data()
huntington_baldwin = detector_dict[3092]
for det in huntington_baldwin:
plt.figure()
df = det.data
# df.volume.plot()
plt.plot(df.occupancy, df.volume, 'bo')
plt.xlabel('occupancy')
plt.ylabel('volume')
plt.show() |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
##########################################################################################
# si_2mode.py
#
# scipy implementation of two mode SI shapers, both positive and Unity Magnitude (UM)
#
# NOTE: UM case is still *very* sensitive to initial guess. Optimization will fail.
#
# NOTE: Any plotting is set up for output, not viewing on screen.
# So, it will likely be ugly on screen. The saved PDFs should look
# better.
#
# Created: 12/31/14
# - Joshua Vaughan
# - joshua.vaughan@louisiana.edu
# - http://www.ucs.louisiana.edu/~jev9637
#
# -
#
##########################################################################################
import numpy as np
from scipy import optimize
import InputShaping as shaping
import pdb
def func(x, *args):
""" Optimization objective function to minimize - here is min(tn) """
num_impulses = np.round(len(x) / 2).astype(int)
shaper = np.reshape(x,(2,num_impulses)).T
shaper = shaper[shaper[:,1].argsort()]
return shaper[-1,1]**2
def func_deriv(x, *args):
""" Objective Function derivate """
num_impulses = np.round(len(x) / 2).astype(int)
shaper = np.reshape(x,(2,num_impulses)).T
shaper = shaper[shaper[:,1].argsort()]
deriv = np.zeros_like(x)
deriv[-1] = 2.0*shaper[-1,1]
return deriv
def vib(x, f_min, f_max, zeta, ic_1=np.array([0,0]),ic_2=np.array([0,0])):
""" Function to calculate the vibration from a series of impulses over a
range of frequencies.
Inputs:
x = [Ai ti] array, where Ai and ti are ith impulse amplitude and time
f_min = the minimum frequency to limit vibration at (Hz)
f_max = the maximum frequency to limit vibration at (Hz)
zeta = damping ratio
Vtol = the tolerable level of vibration (0.05 = 5%)
Returns:
vib = prop. vibration at num_points number of points in the range of freq.
"""
# f_min, f_max, zeta1 = args
x = np.asarray(x)
num_impulses = np.round(len(x) / 2).astype(int)
num_points = 50
vib = np.zeros(num_points,)
for ii, freq in enumerate(np.linspace(f_min * (2*np.pi), f_max * (2*np.pi), num_points)):
wd = freq * np.sqrt(1 - zeta**2)
cos_term = np.sum(x[0:num_impulses] * np.exp(zeta*freq*x[num_impulses:]) * np.cos(wd*x[num_impulses:]))
sin_term = np.sum(x[0:num_impulses] * np.exp(zeta*freq*x[num_impulses:]) * np.sin(wd*x[num_impulses:]))
vib[ii] = np.exp(-zeta * freq * x[-1]) * np.sqrt((cos_term)**2 + (sin_term)**2)
# vib[ii] = np.exp(-2.0 * zeta * freq * x[-1]) * ((cos_term)**2 + (sin_term)**2)
#print(vib)
return vib
def amp_sum(x):
""" Function to set up the sum of the impulses constraint """
num_impulses = np.round(len(x) / 2).astype(int)
amp_sum = np.sum(x[0:num_impulses])
return amp_sum
def form_constraints(x, args):
""" Function to define the problem constraints """
f1_min, f1_max, f2_min, f2_max, zeta1, zeta2, Vtol1, Vtol2, UMFlag, ic_1, ic_2 = args
num_imp = np.round(len(x) / 2).astype(int)
# Define the contraints for vibration, amplitude-sum, and t_1=0
consts = ({'type': 'ineq',
'fun': lambda x: Vtol1 - vib(x, f1_min, f1_max, zeta1, ic_1,ic_2)},
{'type': 'ineq',
'fun': lambda x: Vtol2 - vib(x, f2_min, f2_max, zeta2)},
{'type':'eq',
'fun': lambda x: amp_sum(x) - 1.0}, # sum(Ai) = 1
#'jac': lambda x: np.hstack((np.ones((1,np.round(len(x) / 2).astype(int))), np.zeros((1,np.round(len(x) / 2).astype(int)))))},
{'type':'eq',
'fun': lambda x: x[num_imp]} # t1 = 0}
)
# Define the constraints for t_i > t_{i-1}
for ii in range(1,np.round(num_imp).astype(int)):
consts += ({'type':'ineq',
'fun': lambda x: x[num_imp + ii] - x[num_imp + (ii-1)]},)
return consts
def si_2mode(UMFlag, neg, X0, f1_min, f1_max, f2_min, f2_max, zeta1, zeta2, Vtol1, Vtol2, deltaT,
ic_1=np.array([0,0]),ic_2=np.array([0,0])):
"""
Function to solve the optimization for an SI shaper.
res, shaper = si_2mode(UMFlag, neg, X0, f1_min, f1_max, f2_min, f2_max, zeta1, zeta2, Vtol1, Vtol2, deltaT)
Inputs:
- ShapLength = the number of impulses in shaper - redefined if no initial guess is given
- UMFlag = if =1, then solve for a Unity Magnitude shaper
- neg = maximum amplitude of the negative shapers (Not currently used - 12/22/14)
- X0 = an initial guess of the solution, if None then a guess is generated automatically
- f1_min = the minimum frequency to limit vibration at (Hz) for first mode
- f1_max = the maximum frequency to limit vibration at (Hz) for first mode
- f2_min = the minimum frequency to limit vibration at (Hz) for second mode
- f2_max = the maximum frequency to limit vibration at (Hz) for second mode
- zeta1 = mode 1 damping ratio
- zeta2 = mode 2 damping ratio
- Vtol1 = the tolerable level of vibration (0.05 = 5%) for first mode
- Vtol2 = the tolerable level of vibration (0.05 = 5%) for second mode
- deltaT = the sampling time of the digital version of the shaper returned
Ouputs:
- res = the output from the optimization, see:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize
- shaper = the resulting shaper in the form [ti Ai], where ti and Ai are "columns"
"""
seeking_solution = True
#print(X0)
while seeking_solution:
# If a unity magnitude shaper, force the negative impulses to UM
if UMFlag:
neg = 1.0
# Determine median frequecny of range to be suppressed
# and Insensitivity.
w1min = 2 * np.pi * f1_min # convert to rad/s
w1max = 2 * np.pi * f1_max # convert to rad/s
wn1 = (w1min + w1max) / 2.0 # median frequency (rad/s)
f1 = wn1 / (2.0 * np.pi) # median frequency (Hz)
Ins1 = (w1max - w1min) / wn1 # Insensitivity
w2min = 2 * np.pi * f2_min # convert to rad/s
w2max = 2 * np.pi * f2_max # convert to rad/s
wn2 = (w2min + w2max) / 2.0 # median frequency (rad/s)
f2 = wn2 / (2.0 * np.pi) # median frequency (Hz)
Ins2 = (w2max - w2min) / wn2 # Insensitivity
# Calculate period use for initial time guess
tau1 = 2 * np.pi/wn1
tau2 = 2 * np.pi/wn2
# TODO: delete these after better initial guess - 12/31/14
tau = np.max([tau1, tau2]) # get the longest period
Ins = np.max([Ins1, Ins2])
zeta = np.max([zeta1, zeta2])
#pdb.set_trace()
# If no initial guess is given, then create one based on curve fits of known solutions
# As of 12/22/14, these initial guesses assume no damping, so damped solutions mail fail
if X0 is None: # if I.C.'s unknown, create X0
# For pos shaper
if not UMFlag:
if Ins <= 0.06:
zv = shaping.ZV_2mode(f1, zeta1, f2, zeta2, deltaT)
X0 = np.hstack((zv.amps, zv.times))
elif Ins <= 0.3992:
zvd = shaping.ZVD_2mode(f1, zeta1, f2, zeta2, deltaT)
X0 = np.hstack((zvd.amps, zvd.times))
elif Ins <= 0.7262:
zvdd = shaping.ZVDD_2mode(f1, zeta1, f2, zeta2, deltaT)
X0 = np.hstack((zvdd.amps, zvdd.times))
elif Ins <= 0.9654:
zvddd = shaping.ZVDDD_2mode(f1, zeta1, f2, zeta2, deltaT)
X0 = np.hstack((zvddd.amps, zvddd.times))
else:
raise ValueError('Code only works (as of 02/16/15) for positive shapers up to I(5%) = 0.9654.')
# for negative shapers
elif UMFlag == 1:
print('As of 12/22/14, UM shaper solution is extremely sensitive to initial guess.')
print('So, you may want to supply one, rather than use the default.\n')
print('As of 02/17/15, UM shaper solution may result in non-alternating signed amplitudes.')
print('Please check the results carefully.\n')
if Ins <= 0.0333*neg**2 - 0.0672*neg + 0.3956: # UM-EI
umei1 = shaping.UMEI(f1, zeta1, 0.05, deltaT)
umei2 = shaping.UMEI(f2, zeta2, 0.05, deltaT)
shaper = shaping.seqconv(umei1.shaper, umei2.shaper)
X0 = np.hstack((shaper[:,1], shaper[:,0]))
elif Ins <= 0.0604*neg**2 - 0.1061*neg + 0.7186: # UM-Two-Hump EI
um2ei1 = shaping.UM2EI(f1, zeta1, 0.05, deltaT)
um2ei2 = shaping.UM2EI(f2, zeta2, 0.05, deltaT)
shaper = shaping.seqconv(um2ei1.shaper, um2ei2.shaper)
X0 = np.hstack((shaper[:,1], shaper[:,0]))
elif Ins <= .2895*neg**4 - 0.6258*neg**3 + 0.5211*neg**2 - 0.2382*neg + 0.9654:
um3ei1 = shaping.UM3EI(f1, zeta1, 0.05, deltaT)
um3ei2 = shaping.UM3EI(f2, zeta2, 0.05, deltaT)
shaper = shaping.seqconv(um3ei1.shaper, um3ei2.shaper)
X0 = np.hstack((shaper[:,1], shaper[:,0]))
# elif Ins <= 1.2:
# ShaperLength = 11 # ???
# X0 = [1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
# 0, 0.0427*tau, 0.4242*tau, 0.5635*tau, 0.8305*tau, 1.0976*tau, 1.2371*tau,
# 1.6189*tau, 1.6619*tau, 1.85*tau, 2.4*tau]
else:
raise ValueError('Code only works (as of 02/17/15) for negative shapers up to I(5%) = 0.912')
seeking_solution = False
elif UMFlag == 2:
raise NotImplementedError('As of 02/17/15, the SNA shaper implementation is not finished.')
# print('As of 12/26/14, SNA shaper solutions are extremely sensitive to initial guess.'
# print('So, you may want to supply one, rather than use the default.'
# if Ins <= 0.0333*neg**2 - 0.0672*neg + 0.3956:
# ShaperLength = 5
# t = np.zeros(ShaperLength)
# a = np.zeros(ShaperLength)
# t[0] = 0
# t[1] = -0.0091*neg**2 - 0.041*neg + 0.1466
# t[2] = 0.0923*neg**2 - 0.2134*neg + 0.4881
# t[3] = 0.1933*neg**2 - 0.3856*neg + 0.8297
# t[4] = 0.1843*neg**2 - 0.4267*neg + 0.9763
#
# a[0] = 0.4067*neg**3 - 0.5703*neg**2 + 0.9112*neg + 0.25
# a[1] = -neg
# a[2] = -0.8135*neg**3 + 1.1401*neg**2 + 0.1782*neg + 0.4998
# a[3] = -neg
# a[4] = a[0]
#
# X0 = np.concatenate((a, t*tau),0) # EI node
#
# elif Ins <= 0.0604*neg**2 - 0.1061*neg + 0.7186:
# ShaperLength = 7
# t = np.zeros(ShaperLength)
# a = np.zeros(ShaperLength)
# t[0] = 0
# t[1] = -0.0698*neg**2 + 0.0185*neg + 0.1171
# t[2] = 0.1952*neg**2 - 0.3032*neg + 0.4949
# t[3] = 0.1571*neg**2 - 0.3053*neg + 0.7294
# t[4] = 0.1190**neg**2 - 0.3075*neg + 0.9639
# t[5] = 0.3839*neg**2 - 0.6290*neg + 1.3416
# t[6] = 0.3138*neg**2 - 0.6102*neg + 1.4587
#
# a[0] = 1.6343*neg**4 - 2.4423*neg**3 + 1.0978*neg**2 + 0.5355*neg + 0.1772
# a[1] = -neg
# a[2] = -1.6343*neg**4 + 2.4423*neg**3 - 1.0978*neg**2 + 0.9645*neg +0.3228
# a[3] = -neg
# a[4] = a[2]
# a[5] = -neg
# a[6] = a[0]
#
# X0 = np.concatenate((a, t*tau),0) # 2Hump EI Node
#
# elif Ins <= .2895*neg**4 - 0.6258*neg**3 + 0.5211*neg**2 - 0.2382*neg + 0.9654:
# ShaperLength = 9
# t = np.zeros(ShaperLength)
# a = np.zeros(ShaperLength)
# t[0] = 0
# t[1] = -0.0856*neg**2 + 0.0235*neg + 0.1148
# t[2] = 0.3095*neg**2 - 0.4085*neg + 0.4947
# t[3] = 0.1438*neg**2 - 0.3033*neg + 0.7011
# t[4] = 0.2201*neg**2 - 0.3867*neg + 0.9686
# t[5] = 0.2973*neg**2 - 0.4709*neg + 1.237
# t[6] = 0.1308*neg**2 - 0.3648*neg + 1.4424
# t[7] = 0.5266*neg**2 - 0.7978*neg + 1.8226
# t[8] = 0.4407*neg**2 - 0.7738*neg + 1.9372
#
# a[0] = 0.3615*neg**5 + 2.2773*neg**4 - 4.501*neg**3 + 2.5652*neg**2 + 0.1458*neg + 0.1537
# a[1] = -neg
# a[2] = -4.7821*neg**5 + 10.014*neg**4 - 7.4091*neg**3 + 2.4361*neg**2 + 0.492*neg + 0.2475
# a[3] = -neg
# a[4] = 8.7667*neg**5 - 24.359*neg**4 + 23.578*neg**3 - 9.8884*neg**2 + 2.7027*neg + 0.1989
# a[5] = -neg
# a[6] = a[2]
# a[7] = -neg
# a[8] = a[0]
#
# X0 = np.concatenate((a, t*tau),0) #3 Hump EI Node
#
# elif Ins <= 1.2:
# ShaperLength = 11 # ???
# X0 = [1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
# 0, 0.0427*tau, 0.4242*tau, 0.5635*tau, 0.8305*tau, 1.0976*tau, 1.2371*tau,
# 1.6189*tau, 1.6619*tau, 1.85*tau, 2.4*tau]
# else:
# raise ValueError('Code only works (as of 12/26/14) for negative shapers up to I(5%) = 1.2')
# seeking_solution = False
else:
raise ValueError('You entered UMFlag = {}. Please enter a proper UMFlag value.\n 0 = positive shaper\n 1 = UM shaper\n 2 = SNA Shaper'.format(UMFlag))
else:
X0 = np.hstack((X0[:,1],X0[:,0]))
ShaperLength = len(X0) / 2
# print('Initial guess: {}'.format(X0)
# Create the bounds on impulse amplitudes and times
amp_tol = 1e-5
imp_minSpace = 0.0
bnds = ()
if X0 is not None:
ShaperLength = len(X0) / 2
# Set the bounds on the impulse amplitudes and times - These are *also* constraints
if UMFlag:
# Unity Magnitude Shaper
for ii in range(np.round(ShaperLength).astype(int)):
# create bounds on impulse ampliutdes of +/-1
bnds = bnds + (((-1)**ii - amp_tol, (-1)**ii + amp_tol),)
# force imp_minSpace time spacing between impulses
# Not strictly necessary, but may be for some numerical solutions
for ii in range(np.round(ShaperLength).astype(int)):
bnds = bnds + ((0.0 + ii * imp_minSpace, 3*tau),)
else:
# Positive Shaper
for ii in range(np.round(ShaperLength).astype(int)):
# create bounds on impulse ampliutdes of 0 < A_i < 1
bnds = bnds + ((0.0, 1.0),)
# force imp_minSpace time spacing between impulses
# Not strictly necessary, but may be for some numerical solutions
for ii in range(np.round(ShaperLength).astype(int)):
bnds = bnds + ((0.0 + ii * imp_minSpace, 3*tau),)
args = (f1_min, f1_max, f2_min, f2_max, zeta1, zeta2, Vtol1, Vtol2, UMFlag,ic_1,ic_2)
# Form the constraints
# Sum of impulses = 1
# t1 = 0
# Vib < V_tol
# t_i > t_{i-1}
cons = form_constraints(X0, args)
#pdb.set_trace()
# Call the optimization routine
res = optimize.minimize(func, X0, args, jac = func_deriv, bounds = bnds,
constraints = cons, method='SLSQP', tol = 1e-6,
options={'maxiter':1e3, 'eps':1e-9, 'disp':True})
if res.success:
# Put the result in standard shaper form
num_impulses = np.round(len(res.x)/2).astype(int)
amps = res.x[0:num_impulses]
amps = amps.reshape(num_impulses,1)
times = res.x[num_impulses:len(res.x)]
times = times.reshape(num_impulses,1)
shaper = np.hstack((times,amps))
# Check for near-repeated times, if they exist combine and resolve using the
# combined solution as the initial guess
# # Uncomment below for debugging
# print(''
# print(shaper
# print(''
# Uncomment two line below for explicit line-by-line debugging
# import pdb
# pdb.set_trace()
if len(amps) > 2:
times = []
amps = []
# print('\nChecking for repeated times...'
idx = np.argsort(shaper[:,0])
shap_sort = shaper[idx, :]
# Uncomment below for debugging
# print('Sorted Shaper:'
# print(shap_sort
# print(''
tms_X0 = shap_sort[:,0]
amps_X0 = shap_sort[:,1]
times = np.append(times, tms_X0[0])
amps = np.append(amps, amps_X0[0])
for ii in range(1, np.round(num_impulses).astype(int)):
# print('\nChecking {} - {}'.format(tms_X0[ii],tms_X0[ii-1])
if np.abs(tms_X0[ii] - tms_X0[ii-1]) < 1e-4:
print('\nRepeated Times. Shortening and resolving...')
amps[-1] = amps[-1] + amps_X0[ii]
else:
# print('Not repeated, adding {}, {}'.format(tms_X0[ii], amps_X0[ii])
times = np.append(times, tms_X0[ii])
amps = np.append(amps, amps_X0[ii])
# Force first impulse time to 0
times[0] = 0.0
# create new initial Guess
X0 = np.hstack((amps, times))
# # Uncomment below for debugging
# print('X0 = {}'.format(X0)
#
# print('times = {}'.format(times)
# print('amps = {}'.format(amps)
# Put the result in standard shaper form
num_impulses = len(amps)
amps = amps.reshape(num_impulses,1)
times = times.reshape(num_impulses,1)
shaper = np.hstack((times,amps))
seeking_solution = False
else:
shaper = []
seeking_solution = False
print('\nOptimization failed.\n')
print('Possible Solutions:')
print(' * Improve your initial guess. Options include:')
print(' - Use the "closest" closed-form shaper for the initial guess.')
print(' - Solve for a "nearby" point and use it as the initial guess.')
print(' * Try a slightly different Insensitivity range.')
print(' * Try a slightly different damping ratio.')
print(' * Normalize the range of frequencies by the midpoint.')
print('\nAs of 12/22/14, solutions nearest to EI-form shapers work best ')
print(' when no initial guess is given.')
print(res.x)
return res, shaper
# If running this a a script, then execute the below.
# This is also representatie of an example use
if __name__ == "__main__":
wn1 = 1.0 * 2.0 * np.pi # Natural Freq (rad/s)
f1 = wn1 / (2*np.pi) # Natural Freq (Hz)
tau1 = 1.0 / f1 # period (s)
zeta1 = 0.0 # Damping Ratio
Vtol1 = 0.05 # Tolerable level of vibration (0.05 = 5%)
w1_min = 0.85 * wn1 # minimum freq in range to suppress (rad/s)
w1_max = 1.15 * wn1 # maximum freq in range to suppress (rad/s)
f1_min = w1_min / (2*np.pi) # minimum freq in range to suppress (Hz)
f1_max = w1_max / (2*np.pi) # maximum freq in range to suppress (Hz)
wn2 = 2.5 * 2.0 * np.pi # Natural Freq (rad/s)
f2 = wn2 / (2*np.pi) # Natural Freq (Hz)
tau2 = 1.0 / f2 # period (s)
zeta2 = 0.0 # Damping Ratio
Vtol2 = 0.05 # Tolerable level of vibration (0.05 = 5%)
w2_min = 0.9 * wn2 # minimum freq in range to suppress (rad/s)
w2_max = 1.1 * wn2 # maximum freq in range to suppress (rad/s)
f2_min = w2_min / (2*np.pi) # minimum freq in range to suppress (Hz)
f2_max = w2_max / (2*np.pi) # maximum freq in range to suppress (Hz)
deltaT = 0.01 # sampling time for digitization
x0 = None
UMFlag = 0
PLOT_SENSCURVE = True # Plot a sensitivity curve for the result?
# Call the SI function to solve
print('\n\n') # Empty line to improve readability of answer
res, shaper = si_2mode(UMFlag, 1.0, x0, f1_min, f1_max, f2_min, f2_max, zeta1, zeta2, Vtol1, Vtol2, deltaT)
# If we get a solution, print(it to the terminal in a "pretty" manner
if res.success:
times = shaper[:,0]
amps = shaper[:,1]
#print(u'\n⎡ Ai⎤ ⎡ ' + ' '.join(str('{:.4f}'.format(n)) for n in amps) + u'⎤')
#print(u'⎪ ⎪ = ⎪' + u''.ljust(7*len(times)) + u'⎪')
#print(u'⎣ ti⎦ ⎣ ' + ' '.join(str('{:.4f}'.format(n)) for n in times) + u'⎦')
# Want to plot the sensitvity curve to check the solution?
if PLOT_SENSCURVE:
from matplotlib.pyplot import *
def sensplot(seq,fmin,fmax,zeta,points,plotflag):
'''Original MATLAB preamble
sensplot Plot the residual over range of frequencies
list = sensplot(seq,fmin,fmax,zeta,points,plotflag)
seq is the shaping sequence
fmin is the low end of the frequency range
fmax is the high end of the frequency range
zeta is the damping ratio of the system
points is the number of points to calculate
plotflag plots the data if plotflag=1
Converted to Python on 2/26/13 by Joshua Vaughan (joshua.vaughan@louisiana.edu)'''
fmax = float(fmax) # force one value to be floating point, to ensure floating point math
df = (fmax-fmin)/points
[rows,cols]=np.shape(seq)
tn = seq[-1,0]
frequency = np.zeros((points,1))
amplitude = np.zeros((points,1))
# the vibration percentage formulation is:
# t(i) is seq(i,1)
# A(i) is seq(i,2)
# tn is seq(num_of_rows_in_seq,1)
for nn in range(points):
sintrm = 0
costrm = 0
freq = (fmin + nn*df) * 2*np.pi
for i in range(rows):
sintrm = sintrm + seq[i,1]*np.exp(zeta*freq*seq[i,0])*np.sin(freq*np.sqrt(1-zeta**2)*seq[i,0])
costrm = costrm + seq[i,1]*np.exp(zeta*freq*seq[i,0])*np.cos(freq*np.sqrt(1-zeta**2)*seq[i,0])
frequency[nn,0] = freq / (2*np.pi)
amplitude[nn,0] = np.exp(-zeta*freq*tn) * np.sqrt(sintrm**2+costrm**2)
if plotflag:
plot(frequency, amplitude*100)
xlabel(r'Frequency (Hz)',family='CMU Serif',fontsize=22,weight='bold',labelpad=5)
ylabel(r'Percentage Vibration',family='CMU Serif',fontsize=22,weight='bold',labelpad=8)
show()
return frequency, amplitude
# Call sensplot function to get the sensitivity curve data
# - plot over twice the shaper's design Insensitivity
Insens = f2_max - f1_min
freq, amp = sensplot(shaper, f1-1.5*Insens, f2+1.5*Insens, zeta1, 2000, 0)
# Now, actually plot the senscurve
# Set the plot size - 3x2 aspect ratio is best
fig = figure(figsize=(6,4))
ax = gca()
subplots_adjust(bottom=0.17,left=0.17,top=0.96,right=0.96)
# Change the axis units to CMUSerif-Roman
setp(ax.get_ymajorticklabels(),fontsize=18)
setp(ax.get_xmajorticklabels(),fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Turn on the plot grid and set appropriate linestyle and color
ax.grid(True,linestyle=':',color='0.75')
ax.set_axisbelow(True)
# Define the X and Y axis labels
xlabel('Frequency (Hz)',fontsize=22,weight='bold',labelpad=5)
ylabel('Percentage Vibration',fontsize=22,weight='bold',labelpad=10)
plot(freq, amp*100, linewidth=2, linestyle="-",label=r'Sensitvity Curve')
# plot([f1 - 1.5*Insens, f2 + 1.5*Insens], [Vtol1*100, Vtol1*100], linewidth = 1, linestyle = '--', label=r'$V_{tol}$')
#text(f1 + 1.4*Insens, Vtol*100, r'$V_{tol}$', horizontalalignment='right', verticalalignment='center', fontsize=18, weight='bold', bbox={'facecolor':'white', 'edgecolor':'white', 'pad':2})
# set limits if needed
xlim(np.max([0, f1 - 1.5*Insens]), f2 + 1.5*Insens)
ylim(0, 100)
# Adjust the page layout filling the page using the new tight_layout command
tight_layout(pad=0.5)
# save the figure as a high-res pdf in the current folder
# savefig('SI_sensplot.pdf')
# show the figure
show() |
import warnings
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def Visualize_4DTensor(tensor, channels, threshold=1E-6, savefile="Visualize_4DTensor.png"):
Channel_Titles = ["Energy Grid","H","O", "N", "C", "P", "Cu","Co","Ag","Zn","Cd", "Fe"]
if (len(tensor.shape) != 4):
print("Tensor must be 4-dimensional. Tensor shape was: ", tensor.shape)
else:
fig = plt.figure()
counter = 1
for channel in channels:
ax = fig.add_subplot(len(channels), 1, counter, projection='3d')
grid = tensor[channel]
grid[grid < threshold] = 0
ax.voxels(grid)
ax.set_title(Channel_Titles[channel])
counter +=1
plt.legend()
plt.savefig(savefile)
def Visualize_MOF(tensor, channels, threshold=1E-1, savefile="MOF.png"):
fig = plt.figure()
ax = fig.gca(projection='3d')
Channel_Titles = ["Energy Grid","H","O", "N", "C", "P", "Cu","Co","Ag","Zn","Cd", "Fe"]
for i,channel in enumerate(channels):
grid = np.copy(tensor[channel])
grid[grid < threshold] = 0
ax.voxels(grid)
# plt.legend()
plt.savefig(savefile)
def Visualize_MOF_Split(tensor, channels, threshold=1E-1, savefile="MOF.png"):
fig = plt.figure()
ax = fig.gca(projection='3d')
Channel_Titles = ["Energy Grid","H","O", "N", "C", "P", "Cu","Co","Ag","Zn","Cd", "Fe"]
for i,channel in enumerate(channels):
grid = np.copy(tensor[channel])
grid[grid < threshold] = 0
ax.voxels(grid)
plt.savefig(Channel_Titles[i]+"_"+savefile)
# plt.close() |
### RUN THIS TO MAKE ALL USED DIRECTORIES FOR PROGRAM ###
import os
directories = [
'ColorMaps',
'DifferenceMaps',
'ImageSets',
'InputImages',
'Photomosaics'
]
for d in directories:
try:
os.mkdir(d)
except:
print "Failed to create directory " + d |
# sample_two.py
import sys
import os
import platform
import wx
# class My_Printout
# class My_Frame
# class My_App
#-------------------------------------------------------------------------------
if os.name == "posix":
print("\nPlatform : UNIX - Linux")
elif os.name in ['nt', 'dos', 'ce']:
print("\nPlatform : Windows")
else:
print("\nPlatform : ", platform.system())
#-------------------------------------------------------------------------------
class My_Printout(wx.Printout):
"""
Create a printout.
"""
def __init__(self, text, title):
wx.Printout.__init__(self, title)
#------------
self.lines = text
#---------------------------------------------------------------------------
def OnBeginDocument(self, start, end):
"""
...
"""
return super(My_Printout, self).OnBeginDocument(start, end)
def OnEndDocument(self):
"""
...
"""
super(My_Printout, self).OnEndDocument()
def OnBeginPrinting(self):
"""
...
"""
super(My_Printout, self).OnBeginPrinting()
def OnEndPrinting(self):
"""
...
"""
super(My_Printout, self).OnEndPrinting()
def OnPreparePrinting(self):
"""
...
"""
super(My_Printout, self).OnPreparePrinting()
def HasPage(self, page):
"""
...
"""
if page <= 2:
return True
else:
return False
def GetPageInfo(self):
"""
...
"""
return (1, 2, 1, 2)
def OnPrintPage(self, page):
"""
...
"""
dc = self.GetDC()
# (wx.MM_METRIC) ---> Each logical unit is 1 mm.
# (wx.MM_POINTS) ---> Each logical unit is a "printer point" i.e.
dc.SetMapMode(wx.MM_POINTS)
dc.SetTextForeground("red")
dc.SetFont(wx.Font(20, wx.SWISS, wx.NORMAL, wx.BOLD))
dc.DrawText(self.lines, 50, 100)
# R, V, B.
dc.SetPen(wx.Pen(wx.Colour(255, 20, 5)))
dc.SetBrush(wx.Brush(wx.Colour(30, 255, 20)))
# x, y, radius.
dc.DrawCircle(100, 275, 25)
# x, y, width, height.
dc.DrawEllipse(100, 275, 75, 50)
return True
#-------------------------------------------------------------------------------
class My_Frame(wx.Frame):
"""
Create a main frame for my application.
"""
def __init__(self, parent, id, title=""):
wx.Frame.__init__(self,
parent,
id,
title,
size=(600, 350),
style=wx.DEFAULT_FRAME_STYLE)
#------------
# Simplified init method.
self.SetProperties()
self.CreateMenu()
self.CreateCtrls()
self.CreatePrintData()
self.BindEvents()
self.DoLayout()
#------------
self.CenterOnScreen()
#---------------------------------------------------------------------------
def SetProperties(self):
"""
Set the main frame properties (title, icon...).
"""
self.SetTitle("Printing test...")
def CreateMenu(self):
"""
Make the frame menus.
"""
menub = wx.MenuBar()
fmenu = wx.Menu()
fmenu.Append(wx.ID_PAGE_SETUP, "Page set&up\tCtrl+U")
fmenu.Append(wx.ID_PREVIEW, "Print pre&view\tCtrl+V")
fmenu.Append(wx.ID_PRINT, "&Print\tCtrl+P")
fmenu.AppendSeparator()
fmenu.Append(wx.ID_EXIT, "E&xit\tCtrl+X")
menub.Append(fmenu, "&File")
self.SetMenuBar(menub)
def CreateCtrls(self):
"""
Make widgets for my app.
"""
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetWeight(wx.BOLD)
font.SetPointSize(10)
#------------
# First create the controls.
self.panel = wx.Panel(self,
id=-1,
style=wx.BORDER_THEME|
wx.TAB_TRAVERSAL)
self.text = wx.StaticText(self.panel,
id=-1,
label="Demonstrating :")
self.text.SetFont(font)
self.info = wx.StaticText(self.panel,
id=-1,
label="1) Direct printing,\n"
"2) Printout class,\n"
"3) Preview,\n"
"4) Menu,\n"
"5) Page setup.")
self.info.SetForegroundColour("red")
font.SetWeight(wx.NORMAL)
self.info.SetFont(font)
self.tc = wx.TextCtrl(self.panel,
id=-1,
size=(200, -1),
value="Hello, World ! A sample text.")
self.btnSetup = wx.Button(self.panel,
id=wx.ID_PAGE_SETUP,
label="Page set&up")
self.btnPreview = wx.Button(self.panel,
id=wx.ID_PREVIEW,
label="Print pre&view")
self.btnPreview.SetFocus()
self.btnPrint = wx.Button(self.panel,
id=wx.ID_PRINT,
label="&Print")
self.btnClose = wx.Button(self.panel,
id=wx.ID_CLOSE,
label="E&xit")
def CreatePrintData(self):
"""
Create printing data.
"""
self.printdata = wx.PrintData()
self.printdata.SetPrinterName('')
self.printdata.SetOrientation(wx.PORTRAIT)
self.printdata.SetPaperId(wx.PAPER_A4)
self.printdata.SetQuality(wx.PRINT_QUALITY_DRAFT)
# Black and white printing if False.
self.printdata.SetColour(True)
self.printdata.SetNoCopies(1)
self.printdata.SetCollate(True)
# self.printData.SetPrintMode(wx.PRINT_MODE_PRINTER)
def BindEvents(self):
"""
Bind all the events related to my application.
"""
# Bind some menu events to an events handler.
self.Bind(wx.EVT_MENU, self.OnBtnPageSetup, id=wx.ID_PAGE_SETUP)
self.Bind(wx.EVT_MENU, self.OnBtnPreview, id=wx.ID_PREVIEW)
self.Bind(wx.EVT_MENU, self.OnBtnPrint, id=wx.ID_PRINT)
self.Bind(wx.EVT_MENU, self.OnBtnClose, id=wx.ID_EXIT)
# Bind the close event to an event handler.
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
# Bind some buttons events to an events handler.
self.Bind(wx.EVT_BUTTON, self.OnBtnPageSetup, self.btnSetup)
self.Bind(wx.EVT_BUTTON, self.OnBtnPreview, self.btnPreview)
self.Bind(wx.EVT_BUTTON, self.OnBtnPrint, self.btnPrint)
self.Bind(wx.EVT_BUTTON, self.OnBtnClose, self.btnClose)
def DoLayout(self):
"""
Manage widgets Layout.
"""
# MainSizer is the top-level one that manages everything.
mainSizer = wx.BoxSizer(wx.VERTICAL)
#------------
hBox1 = wx.BoxSizer(wx.HORIZONTAL)
hBox1.Add(self.info, 0, wx.ALL, 15)
#------------
hBox2 = wx.BoxSizer(wx.HORIZONTAL)
hBox2.Add(self.btnSetup, 0, wx.ALL, 10)
hBox2.Add(self.btnPreview, 0, wx.ALL, 10)
hBox2.Add(self.btnPrint, 0, wx.ALL, 10)
hBox2.Add(self.btnClose, 0, wx.ALL, 10)
#------------
mainSizer.Add(self.text, 0, wx.ALL, 10)
mainSizer.Add(wx.StaticLine(self.panel),
0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5)
mainSizer.Add(self.tc, 0, wx.ALL, 15)
mainSizer.Add(hBox1, 0, wx.ALL, 5)
mainSizer.Add(hBox2, 0, wx.ALL, 5)
#------------
# Finally, tell the panel to use the mainSizer for layout.
self.panel.SetSizer(mainSizer)
def OnBtnPageSetup(self, event):
"""
Show the PrinterSetup dialog.
"""
psdd = wx.PageSetupDialogData(self.printdata)
psdd.EnablePrinter(True)
# psdd.CalculatePaperSizeFromId()
#------------
dlg = wx.PageSetupDialog(self, psdd)
dlg.ShowModal()
#------------
# This makes a copy of the wx.PrintData instead of just saving
# a reference to the one inside the PrintDialogData that will
# be destroyed when the dialog is destroyed
self.printdata = wx.PrintData(dlg.GetPageSetupData().GetPrintData())
dlg.Destroy()
def OnBtnPreview(self, event):
"""
Show the print preview.
"""
text = self.tc.GetValue()
#------------
data = wx.PrintDialogData(self.printdata)
printout1 = My_Printout(text, "- My printing object")
printout2 = My_Printout(text, "- My printing object")
printPreview = wx.PrintPreview(printout1, printout2, data)
# Initial zoom value.
if "__WXMAC__" in wx.PlatformInfo:
printPreview.SetZoom(50)
else:
printPreview.SetZoom(35)
if not printPreview.IsOk():
wx.MessageBox(("There was a problem printing.\nPerhaps "\
"your current printer is \nnot "\
"set correctly ?"),
("Printing"),
wx.OK)
return
else:
previewFrame = wx.PreviewFrame(printPreview, None, "Print preview")
previewFrame.Initialize()
previewFrame.SetPosition(self.GetPosition())
previewFrame.SetSize(self.GetSize())
# Or full screen :
# previewFrame.Maximize()
previewFrame.Show(True)
previewFrame.Layout()
def OnBtnPrint(self, event):
"""
Prints the document.
"""
text = self.tc.GetValue()
#------------
pdd = wx.PrintDialogData(self.printdata)
pdd.SetPrintData(self.printdata)
pdd.SetMinPage(1)
pdd.SetMaxPage(1)
pdd.SetFromPage(1)
pdd.SetToPage(1)
pdd.SetPrintToFile(False)
# pdd.SetSetupDialog(False)
# pdd.EnableSelection(True)
# pdd.EnablePrintToFile(True)
# pdd.EnablePageNumbers(True)
# pdd.SetAllPages(True)
#------------
printer = wx.Printer(pdd)
myPrintout = My_Printout(text, "- My printing object")
if not printer.Print(self, myPrintout, True):
wx.MessageBox(("There was a problem printing.\nPerhaps "\
"your current printer is \nnot "\
"set correctly ?"),
("Printing"),
wx.OK)
return
else:
self.printData = wx.PrintData(printer.GetPrintDialogData().GetPrintData())
myPrintout.Destroy()
def OnBtnClose(self, event):
"""
...
"""
self.Close(True)
def OnCloseWindow(self, event):
"""
...
"""
self.Destroy()
#-------------------------------------------------------------------------------
class My_App(wx.App):
"""
...
"""
def OnInit(self):
#------------
self.locale = wx.Locale(wx.LANGUAGE_ENGLISH)
#------------
frame = My_Frame(None, id=-1)
self.SetTopWindow(frame)
frame.Show(True)
return True
#-------------------------------------------------------------------------------
def main():
app = My_App(False)
app.MainLoop()
#-------------------------------------------------------------------------------
if __name__ == "__main__" :
main()
|
#!/usr/bin/python
'''
Authors : Goerges, Wahn
Description : Methods to predict the secondary structure based on RNA
sequences
Requirements: * http://www.tbi.univie.ac.at/RNA/
- Download and extract source code package
- Navigate into the package with terminal
- Enter the following commands:
+ ./configure --with-python
+ sudo make
+ sudo make install
* epstopdf:
+ sudo apt-get install texlive-font-utils
* xetex:
+ http://wiki.ubuntuusers.de/XeTeX
Mac OS users:
$ ./configure --disable-openmp \
--disable-dependency-tracking \
CFLAGS="-arch i386 -arch x86_64 -O2" \
CXXFLAGS="-arch i386 -arch
x86_64 -O2" --with-python
'''
import os
import subprocess
try:
import RNA
from Bio import SeqIO
except ImportError, e:
pass
def predict_RNA_sec_structure(tmp, fasta, entry, changestatus):
"""
Predict 2nd rna structure for the sequence in the fasta file that
corresponds to the entry.
"""
# Read inputs
seq = None
structure = None
for x in parse_fasta(fasta):
if x["description"] == entry:
seq = x["seq"]
desc = x["description"]
structure = RNA.fold(x["seq"])
break
if seq is None:
raise RuntimeError("No such entry")
# Keep backward compatibility in case support for multiple entries is wanted
entries = [{"seq": seq, "structure": structure, "description": desc}]
#check and create tmp directory if not present
if not os.path.exists(tmp):
os.makedirs(tmp)
# change working dir
os.chdir(tmp)
# Draw plots
entry_c = 0
for entry in entries:
entry_c += 1
entry["ID"] = entry_c
RNA.PS_rna_plot(entry["seq"], entry["structure"][0],
"radiate" + str(entry_c) + ".ps")
# Converts ps to pdf
for i in range(1,entry_c + 1):
subprocess.call(["epstopdf", "radiate" + str(i) + ".ps"])
# Write result into a tex file
with open("rna_struct_result.tex", "w") as handler:
handler.write("\\documentclass[a4paper, 11pt]{article}\n")
handler.write("\\usepackage{graphicx}\n")
handler.write("\\begin{document}\n")
handler.write("\\section*{RNA structure prediction}\n")
if entry_c > 1:
handler.write("The following structures have been predicted using RNAfold of the ViennaRNA package.\n")
handler.write("\\smallskip \n")
else:
handler.write("The following structure has been predicted using RNAfold of the ViennaRNA package. \n")
handler.write("\\smallskip \n")
for i in range(1,entry_c + 1):
handler.write("\\subsection*{Structure " + str(i) + ":}\n")
handler.write("Description:\n")
for entry in entries:
if entry["ID"] == i:
# handle hyphenation for long words in description
handler.write("\\flushleft{\\sloppypar{" + entry["description"] + "}} \n")
handler.write("\\bigskip \n")
handler.write("\\newline \n")
handler.write("Dot-Bracket format: \n")
handler.write("\\medskip \n")
handler.write("\\newline \n")
for entry in entries:
if entry["ID"] == i:
splits = [entry["structure"][0][j:j+80] for j in range(0, len(entry["structure"][0]), 80)]
for j in range(len(splits)):
if j == len(splits) - 1:
handler.write(splits[j] + "\n")
handler.write("\\bigskip \n")
handler.write("\\newline \n")
else:
handler.write(splits[j] + "\n")
handler.write("\\newline \n")
break
handler.write("Radial format:\n")
handler.write("\\medskip \n")
handler.write("\\newline \n")
handler.write("\\begin{figure}[h]\n")
handler.write("\\centering\n")
handler.write("\\includegraphics[width=0.5\\textwidth]{radiate" + str(i) +".pdf}\n")
handler.write("\\end{figure}\n")
handler.write("\\newpage")
handler.write("\\end{document}\n")
# Compile
subprocess.call(["xelatex", "-interaction=nonstopmode", "rna_struct_result.tex"])
# adrian: Why is that needed?
#os.chdir("..")
#Clean tmp folder
for i in range(1,entry_c + 1):
os.remove("radiate" + str(i) + ".ps")
os.remove("radiate" + str(i) + ".pdf")
if os.path.isfile("rna_struct_result.aux"):
os.remove("rna_struct_result.aux")
if os.path.isfile("rna_struct_result.log"):
os.remove("rna_struct_result.log")
if os.path.isfile("rna_struct_result.tex"):
os.remove("rna_struct_result.tex")
if os.path.exists("rna_struct_result.pdf"):
return os.path.join(tmp, "rna_struct_result.pdf")
else:
raise IOError("Was unable to create pdf")
def parse_fasta(fasta):
"""
Parse fasta to entries
"""
entries = []
with open(fasta, "r") as handle:
records = list(SeqIO.parse(fasta, format="fasta"))
for record in records:
seq = str(record.seq.upper())
header = record.description
seq_dict = {"description" : header , "seq" : seq}
entries.append(seq_dict)
return entries
def main():
def f(msg):
"""
A changestatus substitute
"""
print(msg)
#Test
predict_RNA_sec_structure("tmp", os.path.abspath("testdata/virus_rna.fasta"),
"gi|325022|gb|K00849.1|FLAS06M Influenza A/Puerto Rico/8/34 (H1N1), subgenomic RNA 6 (from seg 1), in defective interfering virus",
f)
if __name__ == "__main__":
main()
|
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
import re
import time
import os
# main
from method import *
from PreProcess import NormalEstimate
from ransac import RANSAC
from Projection import Plane2DProjection, Plane3DProjection
# zenrin
import figure2d as F
#from IoUtest import CalcIoU, CalcIoU2, LastIoU
from GA import *
from MakeDataset import MakePointSet, MakePointSet3D, MakeSign3D
from TransPix import MakeOuterFrame, MakeOuterFrame2
from ClossNum import CheckClossNum, CheckClossNum2, CheckClossNum3
def PlaneDetect(points, normals, epsilon, alpha):
# 平面検出
# index: pointsからフィットした点のインデックス
plane, index, num = RANSAC(points, normals, epsilon=epsilon, alpha=alpha)
selectIndex = np.array([True if i in index else False for i in range(points.shape[0])])
# フィット点を平面射影
# plane_points: 射影後の3d座標点群
# UVvector: 射影後の2d座標点群
plane_points, UVvector, u, v, O = Plane2DProjection(points[index], plane)
# # プロット準備
# fig = plt.figure()
# ax = Axes3D(fig)
# ax.set_xlabel("X")
# ax.set_ylabel("Y")
# ax.set_zlabel("Z")
# # 点群描画
# X, Y, Z = Disassemble(points)
# MX, MY, MZ = X[index], Y[index], Z[index]
# PX, PY, PZ = Disassemble(plane_points)
# ax.plot(X, Y, Z, marker="o", linestyle='None', color="white")
# ax.plot(MX, MY, MZ, marker=".", linestyle='None', color="red")
# ax.plot(PX, PY, PZ, marker=".", linestyle='None', color="blue")
# # 平面描画
# plot_implicit(ax, plane.f_rep, points, AABB_size=1, contourNum=15)
# plt.show()
# plt.close()
# # 射影2d点群描画
# UX, UY = Disassemble2d(UVvector)
# plt.plot(UX, UY, marker="o",linestyle="None",color="red")
# plt.show()
# plt.close()
return UVvector, plane, u, v, O, selectIndex
# input: [1,1,1,1,0,1,1,0]
# output: [1,1,1,1,0,1]
#
# result: [1,1,1,1,0,0,1,0]
# ^ ^ ^ ^ ^ ^
def SelectIndex(input, output):
result = []
j = 0
for i in input:
if i == False:
result.append(False)
else:
if output[j] == True:
result.append(True)
else:
result.append(False)
j+=1
return np.array(result)
# TP:1, TN:2, FP:3, FN:4
# true opti
# TP: 1 -> 1
# TF: 0 -> 0
# FP: 0 -> 1
# FN: 1 -> 0
def ConfusionLabeling(trueIndex, optiIndex):
confusionIndex = []
for (true, opti) in zip(trueIndex, optiIndex):
if true==True and opti==True:
confusionIndex.append(1)
elif true==False and opti==False:
confusionIndex.append(2)
elif true==False and opti==True:
confusionIndex.append(3)
elif true==True and opti==False:
confusionIndex.append(4)
return np.array(confusionIndex)
def write_dataset(fig_type, num, dir_path="data/dataset/tri/"):
fig_list = []
AABB_list = []
out_area_list = []
points_list = []
out_points_list = []
os.mkdir(dir_path)
os.mkdir(dir_path+"origin")
# os.mkdir(dir_path+"origin2")
os.mkdir(dir_path+"dil")
os.mkdir(dir_path+"close")
os.mkdir(dir_path+"open")
os.mkdir(dir_path+"add")
os.mkdir(dir_path+"contour")
os.mkdir(dir_path+"outPoints")
os.mkdir(dir_path+"points")
# os.mkdir(dir_path+"GA")
for i in range(num):
print("epoch:{}".format(i))
rate = Random(0.5, 1)
# 2d図形点群作成
fig, sign2d, AABB, _ = MakePointSet(fig_type, 500, rate=rate)
# 外枠作成
# out_points, out_area = MakeOuterFrame(sign2d, dir_path, i,
# dilate_size=30, close_size=20, open_size=50, add_size=50)
out_points, out_area = MakeOuterFrame(sign2d, dir_path, i,
dilate_size1=30, close_size1=20, open_size1=50, add_size1=50)
# 外枠内の点群だけにする
inside = np.array([CheckClossNum3(sign2d[i], out_points) for i in range(sign2d.shape[0])])
#inside = CheckClossNum2(sign2d, out_points)
sign2d = sign2d[inside]
fig_list.append(fig.p)
AABB_list.append(AABB)
out_area_list.append(out_area)
# points_list.append(sign2d)
# out_points_list.append(out_points)
np.save(dir_path+"points/"+str(i), np.array(sign2d))
np.save(dir_path+"outPoints/"+str(i), np.array(out_points))
print("p:{}".format(fig.p))
print("AABB:{}".format(AABB))
print("outArea:{}".format(out_area))
print("points{}:{}".format(i, sign2d.shape))
print("outPoints{}:{}".format(i, out_points.shape))
print("fig:{}".format(np.array(fig_list).shape))
print("AABB:{}".format(np.array(AABB_list).shape))
print("outArea:{}".format(np.array(out_area_list).shape))
np.save(dir_path+"fig", np.array(fig_list))
np.save(dir_path+"AABB", np.array(AABB_list))
np.save(dir_path+"outArea", np.array(out_area_list))
def use_dataset(fig_type, num, dir_path="data/dataset/tri/", out_path="data/GAtest/tri/"):
# データセット読み込み
fig_list = np.load(dir_path+"fig.npy")
AABB_list = np.load(dir_path+"AABB.npy")
outArea_list = np.load(dir_path+"outArea.npy")
print("fig:{}".format(np.array(fig_list).shape))
print("AABB:{}".format(np.array(AABB_list).shape))
print("outArea:{}".format(np.array(outArea_list).shape))
# points, outPointsはまずパスを読み込み
points_paths = sorted(glob(dir_path + "points/**.npy"),\
key=lambda s: int(re.findall(r'\d+', s)[len(re.findall(r'\d+', s))-1]))
outPoints_paths = sorted(glob(dir_path + "outPoints/**.npy"),\
key=lambda s: int(re.findall(r'\d+', s)[len(re.findall(r'\d+', s))-1]))
# print(points_paths)
# print(outPoints_paths)
for i in range(num):
print("epoch:{}".format(i))
# points, outPoints読み込み
points = np.load(points_paths[i])
outPoints = np.load(outPoints_paths[i])
# print("points{}:{}".format(i, points.shape))
# print("outPoints{}:{}".format(i, outPoints.shape))
# 他も参照
fig_p = fig_list[i]
AABB = AABB_list[i]
outArea = outArea_list[i]
# print("p:{}".format(fig_p))
# print("AABB:{}".format(AABB))
# print("outArea:{}".format(outArea))
if fig_type==0:
fig = F.circle(fig_p)
elif fig_type==1:
fig = F.tri(fig_p)
else:
fig = F.rect(fig_p)
# GAにより最適パラメータ出力
# step1
# best0 = EntireGA(points, outPoints, outArea, CalcIoU0, out_path+"score0/"+str(i)+".png")
# best1 = EntireGA(points, outPoints, outArea, CalcIoU1, out_path+"score1/"+str(i)+".png")
# best2 = EntireGA(points, outPoints, outArea, CalcIoU2, out_path+"score2/"+str(i)+".png")
# best3 = EntireGA(points, outPoints, outArea, CalcIoU3, out_path+"score3/"+str(i)+".png")
# step1.5
#best0, n = EntireGA(points, outPoints, outArea, CalcIoU3, out_path+str(i)+".png")
# step2
# best0, alt0 = EntireGA(points, outPoints, outArea, CalcIoU1, out_path+"score0/"+str(i)+".png", fig_type)
# best1, alt1 = EntireGA(points, outPoints, outArea, CalcIoU1, out_path+"score1/"+str(i)+".png", fig_type,
# half_reset_num=15, all_reset_num=9)
# best2, alt2 = EntireGA(points, outPoints, outArea, CalcIoU1, out_path+"score2/"+str(i)+".png", fig_type,
# add_num=30)
start = time.time()
best3, alt3 = EntireGA(points, outPoints, outArea, CalcIoU1, out_path+"score3/"+str(i)+".png", fig_type,
tournament_size=25, n_epoch=600, add_num=30, half_reset_num=15, all_reset_num=9)
mid1 = time.time()
best4, alt4 = EntireGA(points, outPoints, outArea, calc_score2, out_path + "score4/" + str(i) + ".png", fig_type,
tournament_size=25, n_epoch=600, add_num=30, half_reset_num=15, all_reset_num=9)
mid2 = time.time()
best5, alt5 = EntireGA(points, outPoints, outArea, calc_score1_5, out_path + "score5/" + str(i) + ".png",
fig_type, tournament_size=25, n_epoch=600, add_num=30, half_reset_num=15, all_reset_num=9)
mid3 = time.time()
best6, alt6 = EntireGA(points, outPoints, outArea, calc_score1_2, out_path + "score6/" + str(i) + ".png",
fig_type, tournament_size=25, n_epoch=600, add_num=30, half_reset_num=15, all_reset_num=9)
end = time.time()
print("score3:{}s, score4:{}s, score5:{}s, score6:{}s".format((mid1-start), (mid2-mid1), (mid3-mid2), (end-mid3)))
rec_list = []
# IoU0 = LastIoU(fig, best0.figure, AABB, out_path+"IoU0/"+str(i)+".png")
# IoU1 = LastIoU(fig, best1.figure, AABB, out_path+"IoU1/"+str(i)+".png")
# IoU2 = LastIoU(fig, best2.figure, AABB, out_path+"IoU2/"+str(i)+".png")
IoU3 = LastIoU(fig, best3.figure, AABB, out_path + "IoU3/"+str(i)+".png")
IoU4 = LastIoU(fig, best4.figure, AABB, out_path + "IoU4/" + str(i) + ".png")
IoU5 = LastIoU(fig, best5.figure, AABB, out_path + "IoU5/" + str(i) + ".png")
IoU6 = LastIoU(fig, best6.figure, AABB, out_path + "IoU6/" + str(i) + ".png")
# rec_list.append(IoU0)
# if IoU0 == -1:
# rec_list.append(LastIoU(fig, alt0.figure, AABB, out_path+"IoU0/"+str(i)+"re.png"))
# rec_list.append(IoU1)
# if IoU1 == -1:
# rec_list.append(LastIoU(fig, alt1.figure, AABB, out_path+"IoU1/"+str(i)+"re.png"))
# rec_list.append(IoU2)
# if IoU2 == -1:
# rec_list.append(LastIoU(fig, alt2.figure, AABB, out_path+"IoU2/"+str(i)+"re.png"))
rec_list.append(IoU3)
if IoU3 == -1:
rec_list.append(LastIoU(fig, alt3.figure, AABB, out_path+"IoU3/"+str(i)+"re.png"))
rec_list.append(IoU4)
if IoU4 == -1:
rec_list.append(LastIoU(fig, alt4.figure, AABB, out_path + "IoU4/" + str(i) + "re.png"))
rec_list.append(IoU5)
if IoU5 == -1:
rec_list.append(LastIoU(fig, alt5.figure, AABB, out_path + "IoU5/" + str(i) + "re.png"))
rec_list.append(IoU6)
if IoU5 == -1:
rec_list.append(LastIoU(fig, alt6.figure, AABB, out_path + "IoU6/" + str(i) + "re.png"))
print(rec_list)
with open(out_path+"circle_k.csv", 'a', newline="") as f:
writer = csv.writer(f)
writer.writerow(rec_list)
def check_exam(fig_type, i, dir_path="data/dataset/tri/", out_path="data/GAtest/tri/"):
# データセット読み込み
fig_list = np.load(dir_path+"fig.npy")
AABB_list = np.load(dir_path+"AABB.npy")
outArea_list = np.load(dir_path+"outArea.npy")
print("fig:{}".format(np.array(fig_list).shape))
print("AABB:{}".format(np.array(AABB_list).shape))
print("outArea:{}".format(np.array(outArea_list).shape))
# points, outPointsはまずパスを読み込み
points_paths = sorted(glob(dir_path + "points/**.npy"),\
key=lambda s: int(re.findall(r'\d+', s)[len(re.findall(r'\d+', s))-1]))
outPoints_paths = sorted(glob(dir_path + "outPoints/**.npy"),\
key=lambda s: int(re.findall(r'\d+', s)[len(re.findall(r'\d+', s))-1]))
print("epoch:{}".format(i))
# points, outPoints読み込み
points = np.load(points_paths[i])
outPoints = np.load(outPoints_paths[i])
# 他も参照
fig_p = fig_list[i]
AABB = AABB_list[i]
outArea = outArea_list[i]
if fig_type==0:
fig = F.circle(fig_p)
elif fig_type==1:
fig = F.tri(fig_p)
else:
fig = F.rect(fig_p)
# GAにより最適パラメータ出力
# step1
# best0 = EntireGA(points, outPoints, outArea, CalcIoU0, out_path+"score0/"+str(i)+".png")
# best1 = EntireGA(points, outPoints, outArea, CalcIoU1, out_path+"score1/"+str(i)+".png")
# best2 = EntireGA(points, outPoints, outArea, CalcIoU2, out_path+"score2/"+str(i)+".png")
# best3 = EntireGA(points, outPoints, outArea, CalcIoU3, out_path+"score3/"+str(i)+".png")
# step1.5
# best0, n = EntireGA(points, outPoints, outArea, CalcIoU3, out_path+str(i)+".png")
# step2
# best0 = EntireGA(points, outPoints, outArea, CalcIoU3, out_path+"score0/"+str(i)+".png")
# best1 = EntireGA(points, outPoints, outArea, CalcIoU3, out_path+"score1/"+str(i)+".png",
# half_reset_num=15, all_reset_num=9)
# best2 = EntireGA(points, outPoints, outArea, CalcIoU3, out_path+"score2/"+str(i)+".png",
# add_num=30)
# best3 = EntireGA(points, outPoints, outArea, CalcIoU1, out_path+str(i)+".png",
# n_epoch=300, N=100, add_num=30, half_reset_num=15, all_reset_num=9)
#IoU = LastIoU(fig, best3.figure, AABB, path=out_path)
#print(IoU)
# def test3D(fig_type, loop):
# count = 0
# while count != loop:
# # 2d図形点群作成
# para3d, sign3d, AABB3d, trueIndex = MakePointSet3D(fig_type, 500, rate=0.8)
# # 平面検出, 2d変換
# sign2d, plane, u, v, O, index1 = PlaneDetect(sign3d)
# # 外枠作成
# out_points, out_area = MakeOuterFrame(sign2d, path="data/GAtest/" + str(count) + ".png")
# # 外枠内の点群だけにする
# index2 = np.array([CheckClossNum3(sign2d[i], out_points) for i in range(sign2d.shape[0])])
# #inside = CheckClossNum2(sign2d, out_points)
# sign2d = sign2d[index2]
# # GAにより最適パラメータ出力
# #best = GA(sign)
# best = EntireGA(sign2d, out_points, out_area)
# print("="*50)
# X, Y = Disassemble2d(sign2d)
# index3 = (best.figure.f_rep(X, Y) >= 0)
# estiIndex = SelectIndex(index1, SelectIndex(index2, index3))
# print(best[fig_type].figure.p)
# count+=1
# import time
# start = time.time()
# write_dataset(0, 10, dir_path="dataset/circle_GA/")
use_dataset(2, 50, dir_path="data/dataset/rect4/", out_path="data/result/rect_k/")
# end = time.time()
# print("time:{}m".format((end-start)/60))
#test2D(1, 3, "data/GAtest/IoU.csv")
#check_exam(1, 1, dir_path="data/dataset/2D/tri4/", out_path="data/GAtest/") |
"""
2. Написать программу, которая запрашивает у пользователя ввод числа.
На введенное число она отвечает сообщением, целое оно или дробное.
Если дробное — необходимо далее выполнить сравнение чисел до и после запятой.
Если они совпадают, программа должна возвращать значение True, иначе False.
"""
def check_number(str_number):
try:
number = float(str_number)
if int(number) == number:
return f'{str_number} - число целое'
else:
result = [f'{str_number} - число дробное, ']
left, right = str_number.split('.')
if left == right:
result.append('\nЛевая и правая части совпадают')
# return True
else:
result.append('\nЛевая и правая части не совпадают')
# return False
return ''.join(result)
except ValueError:
return(f'{str_number} - не число')
print(check_number(input('Введите число: '))) |
import os
import sys
from .communication import CommunicationManager, ProcessDiedException
class FileNoComs(CommunicationManager):
"""
IPC via fileno i.e. pipes.
"""
def __init__(self, is_child, read_fileno=-1, write_fileno=-1):
super().__init__(is_child)
if read_fileno == -1:
read_fileno = sys.stdin.fileno()
if write_fileno == -1:
write_fileno = sys.stdout.fileno()
self._read = os.fdopen(read_fileno, 'r')
self._write = os.fdopen(write_fileno, 'w')
def _close(self):
self._read.close()
self._write.close()
def _send_str(self, msg:str):
self._write.write(msg)
self._write.flush()
def _recv_str(self)->str:
while True:
msg = self._read.readline()
if msg:
return msg
else:
raise ProcessDiedException |
"Auto-complete selection widgets using Django and jQuery UI."
__version__ = '0.6.2'
|
# Generated by Django 2.1.7 on 2019-04-01 01:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0010_prof_suffix'),
]
operations = [
migrations.AddField(
model_name='prof',
name='middle_initial',
field=models.CharField(blank=True, max_length=5),
),
]
|
import pandas as pd
import json
import sys
from casos import casos_positivos, casos_fallecidos
poblacion_pasco = 270575
positivos_pasco = list(casos_positivos[casos_positivos['DEPARTAMENTO'] == "PASCO"].shape)[0]
positivos_hombres_pasco = list(casos_positivos[(casos_positivos['DEPARTAMENTO'] == "PASCO") &(casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_pasco = list(casos_positivos[(casos_positivos['DEPARTAMENTO'] == "PASCO") &(casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_pasco = list(casos_fallecidos[casos_fallecidos['DEPARTAMENTO'] == "PASCO"].shape)[0]
fallecidos_hombres_pasco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "PASCO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_pasco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "PASCO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Departamento Pasco - Etapa de vida
fallecidos_preinfancia_pasco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_pasco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_pasco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_pasco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_pasco = list(casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_pasco = list(
casos_fallecidos[(casos_fallecidos['DEPARTAMENTO'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincias
#!Provincia Pasco
poblacion_pasco_pasco = 138810
positivos_pasco_pasco = list(casos_positivos[casos_positivos['PROVINCIA'] == "PASCO"].shape)[0]
positivos_hombres_pasco_pasco = list(casos_positivos[(casos_positivos['PROVINCIA'] == "PASCO") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_pasco_pasco = list(casos_positivos[(casos_positivos['PROVINCIA'] == "PASCO") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_pasco_pasco = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "PASCO"].shape)[0]
fallecidos_hombres_pasco_pasco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PASCO") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_pasco_pasco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PASCO") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Pasco - Etapa de vida
fallecidos_preinfancia_pasco_pasco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_pasco_pasco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_pasco_pasco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_pasco_pasco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_pasco_pasco = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_pasco_pasco = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "PASCO") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincia Daniel Alcides Carrion
poblacion_pasco_daniel_alcide_carrion = 46771
positivos_pasco_daniel_alcide_carrion = list(casos_positivos[casos_positivos['PROVINCIA'] == "DANIEL ALCIDES CARRION"].shape)[0]
positivos_hombres_pasco_daniel_alcide_carrion = list(casos_positivos[(casos_positivos['PROVINCIA'] == "DANIEL ALCIDES CARRION") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_pasco_daniel_alcide_carrion = list(casos_positivos[(casos_positivos['PROVINCIA'] == "DANIEL ALCIDES CARRION") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_pasco_daniel_alcide_carrion = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION"].shape)[0]
fallecidos_hombres_pasco_daniel_alcide_carrion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_pasco_daniel_alcide_carrion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Daniel Alcides Carrion - Etapa de vida
fallecidos_preinfancia_pasco_daniel_alcide_carrion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_pasco_daniel_alcide_carrion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_pasco_daniel_alcide_carrion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_pasco_daniel_alcide_carrion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_pasco_daniel_alcide_carrion = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_pasco_daniel_alcide_carrion = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "DANIEL ALCIDES CARRION") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
#!Provincia de Oxapampa
poblacion_pasco_oxapampa = 84994
positivos_pasco_oxapampa = list(casos_positivos[casos_positivos['PROVINCIA'] == "OXAPAMPA"].shape)[0]
positivos_hombres_pasco_oxapampa = list(casos_positivos[(casos_positivos['PROVINCIA'] == "OXAPAMPA") & (casos_positivos['SEXO'] == "MASCULINO")].shape)[0]
positivos_mujeres_pasco_oxapampa = list(casos_positivos[(casos_positivos['PROVINCIA'] == "OXAPAMPA") & (casos_positivos['SEXO'] == "FEMENINO")].shape)[0]
fallecidos_pasco_oxapampa = list(casos_fallecidos[casos_fallecidos['PROVINCIA'] == "OXAPAMPA"].shape)[0]
fallecidos_hombres_pasco_oxapampa = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "OXAPAMPA") &(casos_fallecidos['SEXO'] == "MASCULINO")].shape)[0]
fallecidos_mujeres_pasco_oxapampa = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "OXAPAMPA") &(casos_fallecidos['SEXO'] == "FEMENINO")].shape)[0]
#!Provincia Oxapampa - Etapa de vida
fallecidos_preinfancia_pasco_oxapampa = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "OXAPAMPA") & (casos_fallecidos['EDAD_DECLARADA'] >= 0) & (
casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]
fallecidos_infancia_pasco_oxapampa = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "OXAPAMPA") & (casos_fallecidos['EDAD_DECLARADA'] >= 6) & (
casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]
fallecidos_adolescencia_pasco_oxapampa = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "OXAPAMPA") & (casos_fallecidos['EDAD_DECLARADA'] >= 12) & (
casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]
fallecidos_juventud_pasco_oxapampa = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "OXAPAMPA") & (casos_fallecidos['EDAD_DECLARADA'] >= 19) & (
casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]
fallecidos_adultez_pasco_oxapampa = list(casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "OXAPAMPA") & (casos_fallecidos['EDAD_DECLARADA'] >= 27) & (
casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]
fallecidos_persona_mayor_pasco_oxapampa = list(
casos_fallecidos[(casos_fallecidos['PROVINCIA'] == "OXAPAMPA") & (casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]
pasco = {
"name": "Pasco",
"poblacion": poblacion_pasco,
"positivos": positivos_pasco,
"hombres_infectados": positivos_hombres_pasco,
"mujeres_infectados": positivos_mujeres_pasco,
"fallecidos": fallecidos_pasco,
"hombres_fallecidos": fallecidos_hombres_pasco,
"mujeres_fallecidos": fallecidos_mujeres_pasco,
"type": "Departamento",
"etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_pasco,
"infancia": fallecidos_infancia_pasco,
"adolescencia": fallecidos_adolescencia_pasco,
"juventud": fallecidos_juventud_pasco,
"adultez": fallecidos_adultez_pasco,
"persona_mayor": fallecidos_persona_mayor_pasco
},
"url": "pasco",
"provincias": [
{"name": "Pasco", "positivos": positivos_pasco_pasco, "poblacion": poblacion_pasco_pasco, "hombres_infectados": positivos_hombres_pasco_pasco, "mujeres_infectados": positivos_mujeres_pasco_pasco, "fallecidos": fallecidos_pasco_pasco, "hombres_fallecidos": fallecidos_hombres_pasco_pasco, "mujeres_fallecidos": fallecidos_mujeres_pasco_pasco, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_pasco_pasco,
"infancia": fallecidos_infancia_pasco_pasco,
"adolescencia": fallecidos_adolescencia_pasco_pasco,
"juventud": fallecidos_juventud_pasco_pasco,
"adultez": fallecidos_adultez_pasco_pasco,
"persona_mayor": fallecidos_persona_mayor_pasco_pasco
}},
{"name": "Daniel Alcides Carrion", "positivos": positivos_pasco_daniel_alcide_carrion,"poblacion": poblacion_pasco_daniel_alcide_carrion , "hombres_infectados": positivos_hombres_pasco_daniel_alcide_carrion,"mujeres_infectados": positivos_mujeres_pasco_daniel_alcide_carrion, "fallecidos": fallecidos_pasco_daniel_alcide_carrion, "hombres_fallecidos": fallecidos_hombres_pasco_daniel_alcide_carrion, "mujeres_fallecidos": fallecidos_mujeres_pasco_daniel_alcide_carrion, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_pasco_daniel_alcide_carrion,
"infancia": fallecidos_infancia_pasco_daniel_alcide_carrion,
"adolescencia": fallecidos_adolescencia_pasco_daniel_alcide_carrion,
"juventud": fallecidos_juventud_pasco_daniel_alcide_carrion,
"adultez": fallecidos_adultez_pasco_daniel_alcide_carrion,
"persona_mayor": fallecidos_persona_mayor_pasco_daniel_alcide_carrion
}},
{"name": "Oxapampa", "positivos": positivos_pasco_oxapampa,"poblacion": poblacion_pasco_oxapampa , "hombres_infectados": positivos_hombres_pasco_oxapampa,"mujeres_infectados": positivos_mujeres_pasco_oxapampa, "fallecidos": fallecidos_pasco_oxapampa, "hombres_fallecidos": fallecidos_hombres_pasco_oxapampa, "mujeres_fallecidos": fallecidos_mujeres_pasco_oxapampa, "type": "Provincia", "etapa_de_vida_fallecidos": {
"primera_infancia": fallecidos_preinfancia_pasco_oxapampa,
"infancia": fallecidos_infancia_pasco_oxapampa,
"adolescencia": fallecidos_adolescencia_pasco_oxapampa,
"juventud": fallecidos_juventud_pasco_oxapampa,
"adultez": fallecidos_adultez_pasco_oxapampa,
"persona_mayor": fallecidos_persona_mayor_pasco_oxapampa
}}
]
}
print(json.dumps(pasco))
sys.stdout.flush();
|
import argparse
import numpy as np
# usage: python3 gender_projection.py '/Users/boyuliu/Dropbox (MIT)/nlp_project/data/topic_embeddings/trump_embeddings.npy'
gender_vector = '/Users/boyuliu/Dropbox (MIT)/nlp_project/data/gender_bias/trump_tweets_gender_pca_vector.npy'
def project_embed(embed, vec, onto=False):
assert vec.shape[0] == embed.shape[1]
vec = vec.reshape(1,-1)
scale = np.einsum('ij,ij->i', embed, vec).reshape(-1, 1)
projections = np.matmul(scale, vec)
if onto:
return projections
else:
transformed_embedding = embed - projections
return transformed_embedding
def process_embedding(embed_file, vec, onto=False, vec_name='gender'):
embed_name = embed_file.split('/')[-1]
embed_folder = embed_file.strip(embed_name)
embeddings = np.load(embed_file)
transformed_embedding = project_embed(embeddings, vec, onto=onto)
postfix = '_%s_%s.npy' % ('onto' if onto else 'remove', vec_name)
new_file_name = embed_name.strip('.npy') + postfix
np.save(embed_folder+new_file_name, transformed_embedding)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('embed_file',
help='path to topic_embedding file')
parser.add_argument('--vec', default=gender_vector, type=str,
help='vector representing dimension you are trying to remove or project onto')
parser.add_argument('--onto', action='store_true',
help='whether to project onto the direction or remove that direction')
parser.add_argument('--topic', default='gender', type=str,
help='topic to decide new embeddings file name')
args = parser.parse_args()
vector = np.load(args.vec)
# trump_folder = '/Users/boyuliu/Dropbox (MIT)/nlp_project/data/extractions/trump/str_match'
# embed_file = '/20200101_from_string_match_embeddings.npy'
# embeddings = np.load(trump_folder+embed_file)
# trump_folder = '/Users/boyuliu/Dropbox (MIT)/nlp_project/data/topic_embeddings/'
# embed_file = '/trump_embeddings.npy'
# embed_file = '/Users/boyuliu/Dropbox (MIT)/nlp_project/data/topic_embeddings/trump_embeddings.npy'
process_embedding(args.embed_file, vector, onto=args.onto, vec_name=args.topic)
|
#!/usr/bin/env python
import rospy
import numpy as np
from detection_filter import Filter, Hypothesis
from linemod_detector.msg import NamedPoint
class SampleDetectionFilter(object):
def __init__(self):
self.current_sequence = None
# node parameters
self.sample_name = rospy.get_param("~sample_name", "pre_cached")
self.filter_tolerance = rospy.get_param("~filter_tolerance", 0.1)
self.filter_alpha = rospy.get_param("~filter_alpha", 0.25)
self.filter_unsupported_step = rospy.get_param("~filter_unsupported_step", 0.5)
self.filter_threshold = rospy.get_param("~filter_threshold", 5)
self.filter_frame = rospy.get_param("~filter_frame", "odom")
self.filter = Filter(
Hypothesis(tolerance=self.filter_tolerance,
alpha=self.filter_alpha,
unsupported_step=self.filter_unsupported_step),
self.filter_frame)
rospy.Subscriber('raw_points', NamedPoint, queue_size=1,
callback=self.handle_point)
self.publisher = rospy.Publisher('filtered_points', NamedPoint)
def handle_point(self, msg):
if msg.name == self.sample_name:
if self.current_sequence is not None:
rospy.logdebug("%d %d", self.current_sequence, msg.header.seq)
if msg.header.seq != self.current_sequence:
self.current_sequence = msg.header.seq
self.filter.decay()
e = self.filter.update(msg, self.filter_threshold)
if e is not None:
fp=NamedPoint()
fp.header = msg.header
fp.header.frame_id = self.filter_frame
fp.point.x = e.position[0]
fp.point.y = e.position[1]
fp.point.z = e.position[2]
fp.name = self.sample_name
rospy.logdebug("publish: %s", fp)
self.publisher.publish(fp)
def start_node():
rospy.init_node('sample_detection_filter', log_level=rospy.DEBUG)
filter = SampleDetectionFilter()
rospy.spin()
if __name__=="__main__":
start_node()
|
#!/usr/bin/env python3
import argparse
import binascii
from pyfiglet import Figlet
from getpass import getpass
import os
import pyudev
import subprocess
import sys
import xxtea
def intro():
f = Figlet(font='graffiti')
print(f.renderText("NoRKSEC"))
print('usbWatchdog.py - (c) 2017 NoRKSEC - no rights reserved\n')
def cls():
os.system("cls" if os.name == "nt" else "clear")
def panicButton():
subprocess.call("sdmem -llf", shell="TRUE")
os.popen("shutdown -h now")
def encryptFile(fname, key):
uKey = bytes(key, encoding='utf-8')
with open(fname, 'r+b') as input:
plaintext = input.read()
ciphertext = xxtea.encrypt(plaintext, uKey)
input.seek(0)
input.write(ciphertext)
input.truncate()
input.close()
def decryptFile(fname, key):
uKey = bytes(key, encoding='utf-8')
with open(fname, 'r+b') as input:
ciphertext = input.read()
plaintext = xxtea.decrypt(ciphertext, uKey)
input.seek(0)
input.write(plaintext)
input.truncate()
input.close()
def passPrompt():
global userKey
userKey = getpass(" [*] Enter key: ")
checkKey = getpass(" [*] Re-enter key: ")
verifyKey = False
while verifyKey == False:
if (len(userKey) != 16):
print(' [-] Key must be 16 characters.')
return False
else:
if userKey != checkKey:
print(' [-] Keys do not match.')
return False
else:
return True
def watchdog(encFlag, nukeFlag):
print(' [+] Starting usbWatchdog...')
context = pyudev.Context()
initList = []
for device in context.list_devices(subsystem='usb'):
initList.append(device)
checkSum = False
while (checkSum == False):
checkList = []
for device in context.list_devices(subsystem='usb'):
checkList.append(device)
for i in checkList:
if not i in initList:
checkSum = True
else:
pass
for j in initList:
if not j in checkList:
checkSum = True
else:
pass
if (checkSum == True):
print(' [+] Shit is going down, hang on...')
if encFlag == True:
input_file = open(encFile)
for i in input_file.readlines():
fileName = os.path.expanduser(i).strip('\n')
print(' [*] Attempting to encrypt file: ' + str(fileName))
if not os.path.isfile(fileName):
print(' [-] Error: file does not exist. Skipping...')
else:
encryptFile(fileName, userKey)
print(' [+] Successfully encrypted file.')
print(' [+] Finished encrypting file list.')
panicButton()
os._exit(1)
elif nukeFlag == True:
input_file = open(nukeFile)
for i in input_file.readlines():
fileName = os.path.expanduser(i).strip('\n')
print(' [*] Attempting to nuke file: ' + str(fileName))
if not os.path.isfile(fileName):
print(' [-] Error: file does not exist. Skipping...')
else:
try:
os.remove(fileName)
print(' [+] ' + fileName + ' successfully removed.')
except:
print(' [-] Unable to remove file.')
panicButton()
os._exit(1)
else:
panicButton()
os._exit(1)
def main():
parser = argparse.ArgumentParser(prog='usbwatchdog.py', description='monitor your usb ports for activity and wipe ram/shutdown if anything is plugged in or removed.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', '--decrypt', type=str, help='decrypt files from a list, requires directory and filename of list (e.g.: ./files.txt).')
group.add_argument('-e', '--encrypt', type=str, help='encrypt files from a list when watchdog executes, requires directory and filename of list (e.g., ./files.txt) - will ask for encryption key and then start watchdog.')
group.add_argument('-n', '--nuke', type=str, help='deletes files from a list, requires directory and filename of list (e.g., ./files.txt) - the nuclear option, for when you just want everything gone before shutdown.')
args = parser.parse_args()
global encFlag, userKey, encFile, decFile, nukeFlag, nukeFile
encFlag = False
if (args.decrypt == None) and (args.encrypt == None) and (args.nuke == None):
encFlag = False
nukeFlag = False
watchdog(encFlag, nukeFlag)
elif not args.decrypt == None:
decFile = os.path.expanduser(args.decrypt)
if not os.path.isfile(decFile):
print(' [-] Error: File list to decrypt does not exist. Exiting...')
os._exit(1)
else:
verifyKey = False
while verifyKey == False:
userKey = getpass(" [*] Enter 16-character key: ")
if (len(userKey) == 16):
verifyKey = True
else:
print(' [-] Key must be 16 characters long.')
verifyKey = False
input_file = open(decFile)
for i in input_file.readlines():
fileName = os.path.expanduser(i).strip('\n')
print(' [*] Attempting to decrypt: ' + fileName)
if not os.path.isfile(fileName):
print(' [-] File does not exist. Skipping...')
break
else:
try:
decryptFile(fileName, userKey)
print(' [+] File successfully decrypted.')
except:
print(' [-] Error decrypting file. Is the key correct?')
elif not args.encrypt == None:
encFile = os.path.expanduser(args.encrypt)
if not os.path.isfile(encFile):
print(' [-] File list to encrypt does not exist. Skipping encryption...')
encFlag = False
nukeFlag = False
watchdog(encFlag, nukeFlag)
else:
print(' [*] Establishing key for file encryption. Key entered must be 16 characters long.')
isSame = False
while isSame == False:
isSame = passPrompt()
if isSame == False:
print(' [-] Error with key entered.')
else:
print(' [+] Key set.')
encFlag = True
nukeFlag = False
watchdog(encFlag, nukeFlag)
elif not args.nuke == None:
nukeFile = os.path.expanduser(args.nuke)
if not os.path.isfile(nukeFile):
print(' [-] File list to nuke does not exist. Disarming nuclear option...')
encFlag = False
nukeFlag = False
watchdog(encFlag, nukeFlag)
else:
print(' [+] Nuclear option online - say Goodbye to Moscow.')
encFlag = False
nukeFlag = True
watchdog(encFlag, nukeFlag)
if __name__ == '__main__':
cls()
intro()
main()
|
# coding: utf-8
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import concurrent.futures
import os
def datagen(filename, destination):
datagen = ImageDataGenerator(
rotation_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
img = load_img(filename)
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
filename = filename.split('/')[1]
i = 0 #number of image generated
for batch in datagen.flow(x, batch_size=1,
save_to_dir=destination, save_prefix=filename, save_format='jpg'):
i += 1
if i > 50:
break
photoDir = 'female_faces/'
photoList = os.listdir(photoDir)
direction = 'female_faces/'
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
for photo in photoList:
try:
filename = photoDir+photo
executor.submit(datagen, filename, direction)
except Exception as exc:
print(exc)
|
from __future__ import print_function
import datetime
import hashlib
import json
import os
import sqlite3
import time
import uuid
from functools import wraps
import numpy as np
import optuna
import pandas as pd
import torch
import yaml
from box import Box
from clearml import Task
from ludos.models import common
from ludos.utils import dictionary, orm, s3
ROOT_DIR = os.environ['ROOT_DIR']
def spin_on_error(function):
@wraps(function)
def wrapper(*args, **kwargs):
print('*' * 50)
print('TrainingLand')
print('*' * 50)
try:
function(*args, **kwargs)
except Exception as e:
print(e)
print('sleeping now')
time.sleep(10000000)
def retrieve_summary(
model_task='.*',
model_name='.*',
dataset_name='.*',
expname='.*',
score_name='.*',
split='.*',
model_id=None):
"""
Retrieve previously computed summary
"""
with orm.session_scope() as sess:
query = sess.query(orm.Modelzoo).\
filter(orm.Modelzoo.model_name.op('~')(model_name)).\
filter(orm.Modelzoo.dataset_name.op('~')(dataset_name)).\
filter(orm.Modelzoo.model_task.op('~')(model_task)).\
filter(orm.Modelzoo.score_name.op('~')(score_name)).\
filter(orm.Modelzoo.split.op('~')(split)).\
filter(orm.Modelzoo.expname.op('~')(expname))
if model_id is not None:
query = query.filter(orm.Modelzoo.model_id == model_id)
records = query.all()
entries = []
for record in records:
record = Box(record.__dict__)
record.pop('_sa_instance_state')
entries.append(record)
return pd.DataFrame(entries)
class Experiment(object):
"""Experiment manager - given a task and a model, the next step
is usually to experiment with the parameters to otpimize some
kind of metrics.
The class help you managing your experiment process by create
new experiment ID for each new trial (a new set of parameters).
We will use indifferentl task/trial in the following :).
Args:
model_task (str): Task of the model
model_name (str): Name of the model
config_name (str): Name of the config
"""
def __init__(self, model_task: str, model_name: str, config_name: str):
self.model_task = model_task
self.model_name = model_name
self.config_name = config_name
self.bucket = s3.S3Bucket(bucket_name='s3ludos')
self.model_folder = os.path.join(
ROOT_DIR, "models", self.model_task, self.model_name)
def get_model_id(self, expname):
"""Return a uniaue ID for the model
"""
entry = dict(
model_task=self.model_task,
model_name=self.model_name,
expname=expname)
return hashlib.sha1(json.dumps(entry).encode()).hexdigest()
def next_trial_name(self):
"""Returns the next trial name
"""
with orm.session_scope() as sess:
results = sess.query(orm.Modelzoo).filter(
orm.Modelzoo.model_task == self.model_task).filter(
orm.Modelzoo.model_name == self.model_name).filter(
orm.Modelzoo.expname.op('~')(
"{}.*".format(self.config_name))).all()
expnames = [r.expname for r in results]
if not expnames:
idx = 0
else:
idx = max([int(expname.split('t')[-1]) for expname in expnames])
expname = "{}t{}".format(self.config_name, idx + 1)
return expname
def log_task(self, expname: str):
"""Log the existence of this task to the
modelzoo.
"""
with orm.session_scope() as sess:
entry = dict(
model_task=self.model_task,
model_name=self.model_name,
expname=expname)
model_id = self.get_model_id(expname)
entry = orm.Modelzoo(
model_id=model_id,
status="started",
created_on=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
**entry)
sess.add(entry)
def update_task(self, expname, **kwargs):
"""Update the modelzoo record for this task.
"""
with orm.session_scope() as sess:
entry = dict(
model_task=self.model_task,
model_name=self.model_name,
expname=expname)
model_id = hashlib.sha1(json.dumps(entry).encode()).hexdigest()
entry = sess.query(
orm.Modelzoo).filter(orm.Modelzoo.model_id == model_id).all()[0]
for key, value in kwargs.items():
setattr(entry, key, value)
def start(self, expname: str):
self.log_task(expname)
def end(self, expname: str, status: str):
self.update_task(expname, status=status)
class LightningExperiment(Experiment):
"""Experiment taking advantage of trains (from allegro.ai).
The trains client Task will log everything for you : ).
Examples:
.. code-block:: python
# Get your config
cfg = get_config()
task = LightningExperimentightningExperiment(model_task="instance_segmentation",
model_name="fancy_detectron2",
config_name=config_name)
# Get nex trial name for this exp -- config_nametX
expname = task.next_trial_name()
# Get a Trains task
logger = task.start(expname)
# Connect the config - should be a dict
logger.connect(cfg)
# Train your model and get your results as a dict
...
# report back
task.upload_checkpoints(get_checkpoint_file(),
expname,
cfg=trainer.cfg_to_dict(tr.cfg),
meta_data=meta_data,
train_meta=train_meta,
**results)
"""
def start(self, expname: str):
self.log_task(expname)
task = Task.init(
project_name=self.model_task,
task_name="{}/{}".format(self.model_name, expname),
reuse_last_task_id=False)
return task
def upload_checkpoints(self, checkpoint_path, expname, **kwargs):
if checkpoint_path == "":
raise ValueError('Could not retrieve checkpoints')
# add some stuff in there
data = torch.load(checkpoint_path)
data['extra'] = kwargs
torch.save(data, checkpoint_path)
# upload to s3
key = 'models/{}/{}/{}_weights.pth'.format(
self.model_task, self.model_name, expname)
print('Uploading {} to s3'.format(key))
self.bucket.upload_from_file(checkpoint_path, key, overwrite=True)
def end(
self,
expname: str,
dataset_name: str,
score_name: str,
score,
split: str = "validation",
status: str = "success",
maintainer: str = "clement"):
self.update_task(
expname,
dataset_name=dataset_name,
score_name=score_name,
score=score,
status=status,
split=split,
maintainer=maintainer)
class Optuna(object):
"""Optuna based hyperparameters explorer.
The task of finding a good set of hyperparameters can be
tiring... or you can use Optuna :).
Args:
model_task (str): Task of the model
model_name (str): Name of the model
exploration_name (str): Name of the exploration
training_method (str): Training method
direction (str): Pass to optuna to decide what is the good
direction to look for
Note:
The exploration should exist in
training_config/models/model_task/model_name/explorations/exploration_name.yaml
It should have three sections:
1. common: defined the arguments ultimately passed to the train method
2. base_cfg: defined the base config (which overwrite the config.py) common to all
3. parameter_space: defined the parameter space itself
Example:
.. code-block::
parameter_space:
g0:
method: 'suggest_loguniform'
name: 'solver.default_lr'
values: [0.00001, 0.01]
g1:
method: 'suggest_categorical'
name: 'loss.loss_prob.params.weight'
values: [[[1.0, 1.0], [0.49,0.51]]]
g2:
method: 'suggest_categorical'
name: 'model.arch_details.head.lin_ftrs'
values: [[[512,1024,2048], [4096, 4096], [512,256], [256,256], [1024, 1024]]]
"""
def __init__(
self,
model_task,
model_name,
exploration_name,
training_method,
direction='minimize',
reset=False):
"""
Optuna based explorer.
"""
self.config_folder = os.path.join(
common.CONFIG_FOLDER, 'models', model_task, model_name)
self.exploration_name = exploration_name
path = os.path.join(
self.config_folder, 'explorations',
'{}.yaml'.format(exploration_name))
self.exploration = Box.from_yaml(open(path, 'r'))
optuna_folder = os.path.join(
self.config_folder, 'explorations', 'optuna')
if not os.path.isdir(optuna_folder):
os.makedirs(optuna_folder)
db_path = '{}/{}.db'.format(optuna_folder, self.exploration_name)
if reset and os.path.isfile(db_path):
os.remove(db_path)
self.study = optuna.create_study(
study_name=self.exploration_name,
storage='sqlite:///{}'.format(db_path),
load_if_exists=True,
direction=direction)
self.training_method = training_method
def suggest_params(self, trial):
params = dict()
parameter_space = self.exploration.parameter_space.to_dict()
for key, param in parameter_space.items():
v = param['values']
n = param['name']
if param['method'] == 'suggest_categorical':
v = [param['values']]
params[param['name']] = getattr(trial, param['method'])(n, *v)
return params
def optimize(self, trial):
base_cfg = self.exploration.base_cfg.to_dict()
base = dictionary.flatten(base_cfg, sep='.')
params = self.suggest_params(trial)
base.update(params)
new_cfg = dictionary.unflatten(base, sep='.')
cfg_name = '{}_{}'.format(self.exploration_name, uuid.uuid4().hex)
with open(os.path.join(self.config_folder, '{}.yaml'.format(cfg_name)),
'w+') as f:
yaml.dump(new_cfg, f)
return self.training_method(cfg_name, **self.exploration.common)
def run(self, n_trials=1):
self.study.optimize(self.optimize, n_trials=n_trials)
class Bender(object):
"""Random exploration for hyperparameters
Args:
model_task (str): Task for the model
model_name (str): Name of the model
exploration_name (str): Name of the exploration
Note:
The exploration should exist in
training_config/models/model_task/model_name/explorations/exploration_name.yaml
It should have three sections:
1. common: defined the arguments ultimately passed to the train method
2. base_cfg: defined the base config (which overwrite the config.py) common to all
3. parameter_space: defined the parameter space itself
Example:
Each param can be off three types [uniform, params, constant]
.. code-block::
parameter_space:
g0:
type: 'uniform'
name: 'input.radius'
values:
low: 0.08
high: 0.15
g1:
name: 'input.voxelize'
type: 'params'
values: [0.001, 0.005, 0.01]
g2:
type: 'uniform'
name: 'solver.default_lr'
values:
low: 0.00006
high: 0.0005
"""
def __init__(self, model_task, model_name, exploration_name):
self.config_folder = os.path.join(
common.CONFIG_FOLDER, 'models', model_task, model_name)
self.exploration_name = exploration_name
path = os.path.join(
self.config_folder, 'explorations',
'{}.yaml'.format(exploration_name))
self.exploration = Box.from_yaml(open(path, 'r'))
def _suggest_params(self):
params = dict()
parameter_space = self.exploration.parameter_space.to_dict()
for key, param in parameter_space.items():
if param['type'] == 'params':
probs = param.get('probs', None)
values = param['values']
p = dict(zip(range(len(values)), values))
value = p[np.random.choice(list(p.keys()), p=probs).item()]
params[param['name']] = value
elif param['type'] == 'uniform':
values = param['values']
params[param['name']] = np.random.uniform(**values)
elif param['type'] == 'constant':
params[param['name']] = param['value']
else:
raise RuntimeError('Wrong parameter type')
return params
def suggest(self):
"""Suggest a new combination of parameters
Returns:
cfg_name (name): Name of the autogenerated config
with the new parameters
"""
base_cfg = self.exploration.base_cfg.to_dict()
base = dictionary.flatten(base_cfg, sep='.')
params = self._suggest_params()
base.update(params)
new_cfg = dictionary.unflatten(base, sep='.')
cfg_name = '{}_{}'.format(self.exploration_name, uuid.uuid4().hex)
with open(os.path.join(self.config_folder, '{}.yaml'.format(cfg_name)),
'w+') as f:
yaml.dump(new_cfg, f)
return cfg_name
|
# Generated by Django 3.0.6 on 2020-06-10 18:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Home', '0008_auto_20200610_1323'),
]
operations = [
migrations.CreateModel(
name='Promotions',
fields=[
('promoion_id', models.AutoField(primary_key=True, serialize=False)),
('promotion_title', models.CharField(max_length=100)),
('promotion_name', models.CharField(max_length=100)),
('promotion_desc', models.TextField()),
('promotion_up_to', models.IntegerField()),
('promtion_cover1', models.ImageField(upload_to='')),
('promotion_catagory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Home.Product')),
('promotion_item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Home.Item')),
],
),
]
|
'''
Crie uma lista, inicialmente vazia, para armazenar uma listagem de nomes.
Criar uma função para inserir um nome na lista
Criar uma função que recebe como parâmetro a lista e uma posição (índice) dessa lista e retornar o nome que está nessa posição.
Essa função deve gerar e tratar uma exceção do tipo IndexError caso o índice não exista na lista.
'''
def inserir_nome(nome):
nomes.append(nome)
return nomes
def posicao_lista(nomes, posicao):
try:
return nomes[posicao]
except IndexError:
print('Índice inexistente')
nomes = []
nome = input('Nome: ')
posicao = int(input('Posição: '))
inserir_nome(nome)
print(nomes)
posicao_lista(nomes, posicao)
'''
#Solução do professor:
inserir_nome(nomes, 'João')
inserir_nome(nomes, 'Maria')
print(posicao_lista(nomes, 0))
print(posicao_lista(nomes, 5))
''' |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
import time
from openerp import SUPERUSER_ID
from openerp import pooler, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
class pos_order(osv.osv):
_inherit = "pos.order"
def _amount_base(self, cr, uid, ids, name, args, context=None):
#poner aqui lo del redondeo
config_obj=self.pool.get('pos.order.config.settings')
config_id=config_obj.search(cr, uid, [('id', '>', 0)])
config=config_obj.browse(cr, uid, config_id[0])
def amount_all(cr, uid, ids, name, args, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_paid': 0.0,
'amount_return':0.0,
'amount_tax':0.0,
}
val1 = val2 = 0.0
cur = order.pricelist_id.currency_id
for payment in order.statement_ids:
res[order.id]['amount_paid'] += payment.amount
res[order.id]['amount_return'] += (payment.amount < 0 and payment.amount or 0)
for line in order.lines:
val1 += line.price_subtotal_incl
val2 += line.price_subtotal
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val1 - val2)
res[order.id]['amount_total'] = cur_obj.round(cr, uid, cur, val1)
return res
#res = super(pos_order, self)._amount_all(cr, uid, ids, name, args, context=context)
res = amount_all(cr, uid, ids, name, args, context=context)
for id in ids:
res[id]['amount_base']=round((res[id]['amount_total']-res[id]['amount_tax']),0)
screen=self.browse(cr,uid,id)
if screen.discount_money > res[id]['amount_base']:
raise osv.except_osv(('Error!'), ('No se puede asignar un descuento mayor al valor base'))
res[id]['discount_money']=screen.discount_money
res[id]['sub_total_discount']=round((res[id]['amount_base']-screen.discount_money),0)
res[id]['amount_tax']=round((res[id]['sub_total_discount']*0.19),0)
res[id]['amount_total']=round((res[id]['amount_tax']+res[id]['sub_total_discount']),0)
if config.round_method == 'line' and screen.discount_money < 1:
new_tax=0
line_obj=self.pool.get('pos.order.line')
for l in line_obj.search(cr, uid, [('order_id', '=', id)]):
line=line_obj.browse(cr, uid, l)
new_tax+=round((line.qty*line.price_unit*((100-line.discount)/100)*0.19),0)
res[id]['amount_tax']=new_tax
res[id]['amount_total']=new_tax+res[id]['sub_total_discount']
self.write(cr, uid, id, res[id])
return res
_columns = {
'discount_money' : fields.integer('Descuento', states={'draft': [('readonly', False)]},multi='all'),
'sub_total_discount' : fields.function(_amount_base, digits_compute=dp.get_precision('Point Of Sale'), string='Subtotal', multi='all'),
'amount_base' : fields.function(_amount_base, digits_compute=dp.get_precision('Point Of Sale'), string='Base', multi='all'),
'amount_total': fields.function(_amount_base, string='Total', digits_compute=dp.get_precision('Point Of Sale'),multi='all'),
#'amount_tax': fields.function(_amount_base, string='Taxes', digits_compute=dp.get_precision('Point Of Sale'), multi='all'),
#'amount_paid': fields.function(_amount_base, string='Paid', states={'draft': [('readonly', False)]}, readonly=True, digits_compute=dp.get_precision('Point Of Sale'), multi='all'),
#'amount_return': fields.function(_amount_base, 'Returned', digits_compute=dp.get_precision('Point Of Sale'), multi='all'),
}
def actualizar(self, cr, uid, ids, context=None):
base=discount=sub_total=tax=total=0.0
screen=self.browse(cr,uid,ids[0])
base=screen.amount_base
discount=context['discount_money']
sub_total=base-discount
tax=sub_total*0.19
total=sub_total+tax
cr.execute("""update pos_order set
discount_money = %d where id=%d"""%(context['discount_money'],ids[0]))
return True
def test_paid(self, cr, uid, ids, context=None):
"""A Point of Sale is paid when the sum
@return: True
"""
for order in self.browse(cr, uid, ids, context=context):
if order.lines and not order.amount_total:
return True
#if (not order.lines) or (not order.statement_ids) or (abs(order.amount_total - order.amount_paid) > 0.00001):
if (not order.lines) or (not order.statement_ids):
return False
vals=self._amount_base(cr, uid, ids, [], [], context)
if abs(vals[ids[0]]['amount_total'] - order.amount_paid) > 0.00001:
return False
return True
pos_order()
class pos_order_config_settings(osv.osv):
_name = "pos.order.config.settings"
_columns = {
'name': fields.char('Nombre'),
'round_method': fields.selection([('global','Redondeo Global'),
('line','Redondeo por linea')],'Metodo de redondeo', required=True),
}
_defaults = {
'name':'Metodo de redondeo',
}
def create(self, cr, uid, args, context=None):
cr.execute('delete from pos_order_config_settings')
res = super(pos_order_config_settings, self).create(cr, uid, args, context=context)
return res
pos_order_config_settings() |
#!/usr/bin/python3
# vim:fileencoding=utf-8:ts=2:sw=2:expandtab
# Setup the path
import os, os.path, sys; sys.path.insert(1, os.path.abspath(sys.path[0] + "/../Python"))
import json
from base64 import b64encode
try:
from DocStruct import Setup
from DocStruct.Config import EnvironmentConfig
except ImportError:
print()
print("Seems like your environment is not setup up correctly.")
print("Please make sure DocStruct.Setup is importable before running this script.")
print()
sys.exit(0)
import argparse
parser = argparse.ArgumentParser(description="Launch an instance of the DocStruct jobs processor.")
# Parse the credentials file name
parser.add_argument(
"credsfilepath", type=lambda s: os.path.abspath(s),
help="Path to the CSV file to use for credentials to access AWS"
)
# Parse the environment name
parser.add_argument(
"environment_id", type=str,
help="ID of the environment within which we are going to launch the instance"
)
# Parse the AMI ID
parser.add_argument(
"ami", type=str,
help="ID of the AMI we want to launch an instance of"
)
# Parse the number of instances to launch
parser.add_argument(
"--num-instances", metavar="num_instances", type=int, nargs="?", default=1,
help="Number of instances to launch"
)
# Parse arguments
args = parser.parse_args()
# Assert that the credentials file actually exists
try:
assert os.path.exists(args.credsfilepath)
except AssertionError:
print("Could not find credential file at %s. Please make sure the file actually exists before continuing..." % args.credsfilepath)
sys.exit(1)
# Assert that an environment with the provided name actually exists
try:
# Make sure the global environment exists
envconf = EnvironmentConfig(CredsFilePath=args.credsfilepath, EnvironmentID=args.environment_id)
assert envconf.User_Arn
except AssertionError:
print("Could not find environment named {0}. Please make sure the environment exists before calling this script.".format(args.environment_id))
sys.exit(1)
# Supply this config to the LaunchInstance call
instances = Setup.LaunchInstances(
AMI=args.ami,
EnvironmentConfig=envconf,
NumInstances=args.num_instances,
)
# Print out new instance information
for instance in instances:
print("Launched instance: {0}".format(instance['InstanceId']))
|
print ("questao 2")
mes = print ("Mes de Fevereiro")
ano = int(input("Digite o ano "))
if ano % 4 == 0 or 100!= 0 and 400 == 0:
print ("fevereiro tem 29 dias", ano)
else:
print ("fevereiro tem 28 dias", ano)
|
import sqlite3
def changePassword(user_id, new_password):
conn = sqlite3.connect('Database.db3')
conn.execute("UPDATE User SET password = '" + new_password + "' WHERE user_id = '" + user_id + "'")
conn.commit()
conn.close()
return True
|
# ¿Acaso hubo buhos aca?
# Definir una función que detecte si
# una palabra es un palíndromo y devuelve True o False.
# Ejemplos:
# palindromo( "python" ) => False
# palindromo( "reconocer" ) => True
# palindromo( "Neuquén" ) => False
# ★★ Challenge: Modificar la función para
# ignorar espacios, signos de puntuación, y que
# no haga distinción entre mayúsculas y minúsculas
# (pueden usar str.lower). Sugerimos usar el nombre
# del desafío como un palindromo de ejemplo.
def palindromo(palabra):
palabra = "".join([letra for letra in palabra.lower() if letra.isalpha()])
for i in range(len(palabra)//2):
if palabra[i] != palabra[-(i+1)]:
return False
return True
def palin(palabra):
tex = "".join([letra for letra in palabra.lower() if letra.isalpha()])
return tex == tex[::-1]
print(palin("¿Acaso hubo buhos aca?")) |
# kullanıcıdan okunan sayının kaç basamaklı olduğunu bulan algoritma
n = int(input("Bir sayı giriniz: "))
bs = 0
while(n!=0):
n = n//10
bs += 1
print(bs)
# n = input("Bir sayı giriniz: ")
# print(len(n)) fonkla kısaca bulabiliriz |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.