text stringlengths 8 6.05M |
|---|
class Nodo:
def __init__(self, name="", weight =0, type='', data={}, len=0):
self.name = name
self.data = data
self.weight = weight
self.len = len
self.type = type
self.nodes = []
def addList(self, list):
for n in list:
self.nodes += [n]
def add(self, node, type_ = None):
node.type = type_
self.nodes += [node]
return node
def get(self, enc_string):
i = int(enc_string[0])
if(len(enc_string) > 0 and len(self.nodes) > i):
return self.nodes[i].get(enc_string[1:])
return self
def print(self, tab="", i=0, print_node=True, print_index=True, only_leafs = False):
text = tab
if self.type == 'root':
text += ' ROOT: \"' + self.name + '\" ' + str(self.weight)
else:
if print_node:
text += "node-"
if print_index:
text += str(i)+':'
if (not only_leafs) or self.type == 'leaf':
text += ' \"{}\" {}'.format(self.name, self.weight)
print(text)
for i in range(len(self.nodes)):
node = self.nodes[i]
node.print(tab+' ', i, print_node, print_index, only_leafs)
# root = Nodo("root")
# root.add(Nodo())
# n0 = root.add(Nodo())
# n1 = Nodo()
# n2 = Nodo()
# n0.add(n1)
# n0.add(n2)
# root.print() |
# global imports
import os
import threading, queue
import multiprocessing as mp
import numpy as np
import tensorflow as tf
import time
import signal
# local imports
from centraltrainer.request_handler import RequestHandler
from centraltrainer.collector import Collector
from environment.environment import Environment
from utils.logger import config_logger
from utils.queue_ops import get_request, put_response
from utils.data_transf import arrangeStateStreamsInfo, getTrainingVariables, allUnique
from training import a3c
from training import load_trace
# ---------- Global Variables ----------
S_INFO = 6 # bandwidth_path_i, path_i_mean_RTT, path_i_retransmitted_packets + path_i_lost_packets
S_LEN = 8 # take how many frames in the past
A_DIM = 2 # two actions -> path 1 or path 2
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
# TRAIN_SEQ_LEN = 100 # take as a train batch
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 64
PATHS = [1, 3] # correspond to path ids
DEFAULT_PATH = 1 # default path without agent
RANDOM_SEED = 42
RAND_RANGE = 1000000
GRADIENT_BATCH_SIZE = 8
SUMMARY_DIR = ''
LOG_FILE = ''
NN_MODEL = ''
EPOCH = 0
SSH_HOST = '192.168.122.157'
def environment(bdw_paths: mp.Array, stop_env: mp.Event, end_of_run: mp.Event):
rhostname = 'mininet' + '@' + SSH_HOST
config = {
'server': 'ipc:///tmp/zmq',
'client': 'tcp://*:5555',
'publisher': 'tcp://*:5556',
'subscriber': 'ipc:///tmp/pubsub'
}
logger = config_logger('environment', filepath='./logs/environment.log')
env = Environment(bdw_paths, logger=logger, mconfig=config, remoteHostname=rhostname)
# Lets measure env runs in time
while not stop_env.is_set():
# Only the agent can unblock this loop, after a training-batch has been completed
while not end_of_run.is_set():
try:
# update environment config from session
if env.updateEnvironment() == -1:
stop_env.set()
end_of_run.set()
break
# run a single session & measure
#-------------------
now = time.time()
env.run()
end = time.time()
#-------------------
diff = int (end - now)
logger.debug("Time to execute one run: {}s".format(diff))
end_of_run.set() # set the end of run so our agent knows
# env.spawn_middleware() # restart middleware
except Exception as ex:
logger.error(ex)
break
time.sleep(0.1)
env.close()
def agent():
np.random.seed(RANDOM_SEED)
# Create results path
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
# Spawn request handler
tqueue = queue.Queue(1)
rhandler = RequestHandler(1, "rhandler-thread", tqueue=tqueue, host=SSH_HOST, port='5555')
rhandler.start()
# Spawn collector thread
cqueue = queue.Queue(0)
collector = Collector(2, "collector-thread", queue=cqueue, host=SSH_HOST, port='5556')
collector.start()
# Spawn environment # process -- not a thread
bdw_paths = mp.Array('i', 2)
stop_env = mp.Event()
end_of_run = mp.Event()
env = mp.Process(target=environment, args=(bdw_paths, stop_env, end_of_run))
env.start()
# keep record of threads and processes
tp_list = [rhandler, collector, env]
# Main training loop
logger = config_logger('agent', './logs/agent.log')
logger.info("Run Agent until training stops...")
with tf.Session() as sess, open(LOG_FILE, 'w') as log_file:
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
critic = a3c.CriticNetwork(sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
summary_ops, summary_vars = a3c.build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(SUMMARY_DIR, sess.graph) # training monitor
saver = tf.train.Saver() # save neural net parameters
# # restore neural net parameters
nn_model = NN_MODEL
if nn_model is not None: # nn_model is the path to file
saver.restore(sess, nn_model)
print("Model restored.")
epoch = EPOCH
time_stamp = 0
path = DEFAULT_PATH
action_vec = np.zeros(A_DIM)
action_vec[path] = 1
s_batch = [np.zeros((S_INFO, S_LEN))]
a_batch = [action_vec]
r_batch = []
entropy_record = []
actor_gradient_batch = []
critic_gradient_batch = []
list_states = []
while not end_of_run.is_set():
# Get scheduling request from rhandler thread
request, ev1 = get_request(tqueue, logger, end_of_run=end_of_run)
# end of iterations -> exit loop -> save -> bb
if stop_env.is_set():
break
if request is None and end_of_run.is_set():
logger.info("END_OF_RUN => BATCH UPDATE")
# get all stream_info from collector's queue
stream_info = []
with cqueue.mutex:
for elem in list(cqueue.queue):
stream_info.append(elem)
# clear the queue
cqueue.queue.clear()
# Validate
# Proceed to next run
# logger.info("len(list_states) {} == len(stream_info) {}".format(len(list_states), len(stream_info)))
if len(list_states) != len(stream_info) or len(list_states) == 0:
entropy_record = []
del s_batch[:]
del a_batch[:]
del r_batch[:]
stream_info.clear()
list_states.clear()
end_of_run.clear()
time.sleep(0.01)
continue
# Re-order rewards
stream_info = arrangeStateStreamsInfo(list_states, stream_info)
list_ids = [stream['StreamID'] for stream in stream_info]
logger.info("all unique: {}".format(allUnique(list_ids, debug=True)))
# for i, stream in enumerate(stream_info):
# logger.info(stream)
# logger.info(list_states[i]) # print this on index based
# For each stream calculate a reward
completion_times = []
for index,stream in enumerate(stream_info):
path1_smoothed_RTT, path1_bandwidth, path1_packets, \
path1_retransmissions, path1_losses, \
path2_smoothed_RTT, path2_bandwidth, path2_packets, \
path2_retransmissions, path2_losses, \
= getTrainingVariables(list_states[index])
normalized_bwd_path0 = (bdw_paths[0] - 1.0) / (100.0 - 1.0)
normalized_bwd_path1 = (bdw_paths[1] - 1.0) / (100.0 - 1.0)
normalized_srtt_path0 = ((path1_smoothed_RTT * 1000.0) - 1.0) / (120.0)
normalized_srtt_path1 = ((path2_smoothed_RTT * 1000.0) - 1.0) / (120.0)
normalized_loss_path0 = ((path1_retransmissions + path1_losses) - 0.0) / 20.0
normalized_loss_path1 = ((path2_retransmissions + path2_losses) - 0.0) / 20.0
# aggr_bdw = normalized_bwd_path0 + normalized_bwd_path1
aggr_srtt = normalized_srtt_path0 + normalized_srtt_path1
aggr_loss = normalized_loss_path0 + normalized_loss_path1
reward = (a_batch[index][0]* normalized_bwd_path0 + a_batch[index][1]*normalized_bwd_path1) - stream['CompletionTime'] - (0.8*aggr_srtt) - (1.0 * aggr_loss)
r_batch.append(reward)
completion_times.append(stream['CompletionTime'])
# Check if we have a stream[0] = 0 add -> 0 to r_batch
tmp_s_batch = np.stack(s_batch[:], axis=0)
tmp_r_batch = np.vstack(r_batch[:])
if tmp_s_batch.shape[0] > tmp_r_batch.shape[0]:
logger.debug("s_batch({}) > r_batch({})".format(tmp_s_batch.shape[0], tmp_r_batch.shape[0]))
logger.debug(tmp_s_batch[0])
r_batch.insert(0, 0)
# Save metrics for debugging
# log time_stamp, bit_rate, buffer_size, reward
for index, stream in enumerate(stream_info):
path1_smoothed_RTT, path1_bandwidth, path1_packets, \
path1_retransmissions, path1_losses, \
path2_smoothed_RTT, path2_bandwidth, path2_packets, \
path2_retransmissions, path2_losses, \
= getTrainingVariables(list_states[index])
log_file.write(str(time_stamp) + '\t' +
str(PATHS[path]) + '\t' +
str(bdw_paths[0]) + '\t' +
str(bdw_paths[1]) + '\t' +
str(path1_smoothed_RTT) + '\t' +
str(path2_smoothed_RTT) + '\t' +
str(path1_retransmissions+path1_losses) + '\t' +
str(path2_retransmissions+path2_losses) + '\t' +
str(stream['CompletionTime']) + '\t' +
str(stream['Path']) + '\n')
log_file.flush()
time_stamp += 1
# Single Training step
# ----------------------------------------------------------------------------------------------------
actor_gradient, critic_gradient, td_batch = \
a3c.compute_gradients(s_batch=np.stack(s_batch[1:], axis=0), # ignore the first chuck
a_batch=np.vstack(a_batch[1:]), # since we don't have the
r_batch=np.vstack(r_batch[1:]), # control over it
terminal=True, actor=actor, critic=critic)
td_loss = np.mean(td_batch)
actor_gradient_batch.append(actor_gradient)
critic_gradient_batch.append(critic_gradient)
logger.debug ("====")
logger.debug ("Epoch: {}".format(epoch))
msg = "TD_loss: {}, Avg_reward: {}, Avg_entropy: {}".format(td_loss, np.mean(r_batch[1:]), np.mean(entropy_record[1:]))
logger.debug (msg)
logger.debug ("====")
# ----------------------------------------------------------------------------------------------------
# Print summary for tensorflow
# ----------------------------------------------------------------------------------------------------
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: td_loss,
summary_vars[1]: np.mean(r_batch),
summary_vars[2]: np.mean(entropy_record),
summary_vars[3]: np.mean(completion_times)
})
writer.add_summary(summary_str, epoch)
writer.flush()
# ----------------------------------------------------------------------------------------------------
# Update gradients
if len(actor_gradient_batch) >= GRADIENT_BATCH_SIZE:
assert len(actor_gradient_batch) == len(critic_gradient_batch)
for i in range(len(actor_gradient_batch)):
actor.apply_gradients(actor_gradient_batch[i])
critic.apply_gradients(critic_gradient_batch[i])
epoch += 1
if epoch % MODEL_SAVE_INTERVAL == 0:
save_path = saver.save(sess, SUMMARY_DIR + "/nn_model_ep_" + str(epoch) + ".ckpt")
entropy_record = []
# Clear all before proceeding to next run
del s_batch[:]
del a_batch[:]
del r_batch[:]
stream_info.clear()
list_states.clear()
end_of_run.clear()
else:
ev1.set() # let `producer` (rh) know we received request
list_states.append(request)
# The bandwidth metrics coming from MPQUIC are not correct
# constant values not upgraded
path1_smoothed_RTT, path1_bandwidth, path1_packets, \
path1_retransmissions, path1_losses, \
path2_smoothed_RTT, path2_bandwidth, path2_packets, \
path2_retransmissions, path2_losses, \
= getTrainingVariables(request)
time_stamp += 1 # in ms
last_path = path
# retrieve previous state
if len(s_batch) == 0:
state = np.zeros((S_INFO, S_LEN))
else:
state = np.array(s_batch[-1], copy=True)
# dequeue history record
state = np.roll(state, -1, axis=1)
# this should be S_INFO number of terms
state[0, -1] = (bdw_paths[0] - 1.0) / (100.0 - 1.0) # bandwidth path1
state[1, -1] = (bdw_paths[1] - 1.0) / (100.0 - 1.0) # bandwidth path2
state[2, -1] = ((path1_smoothed_RTT * 1000.0) - 1.0) / (120.0) # max RTT so far 120ms
state[3, -1] = ((path2_smoothed_RTT * 1000.0) - 1.0) / (120.0)
state[4, -1] = ((path1_retransmissions + path1_losses) - 0.0) / 20.0
state[5, -1] = ((path2_retransmissions + path2_losses) - 0.0) / 20.0
s_batch.append(state)
action_prob = actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
action_cumsum = np.cumsum(action_prob)
path = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
action_vec = np.zeros(A_DIM)
action_vec[path] = 1
a_batch.append(action_vec)
logger.debug("PATH: {}".format(path))
entropy_record.append(a3c.compute_entropy(action_prob[0]))
# prepare response
response = [request['StreamID'], PATHS[path]]
response = [str(r).encode('utf-8') for r in response]
ev2 = threading.Event()
put_response((response, ev2), tqueue, logger)
ev2.wait() # blocks until `consumer` (i.e. rh) receives response
# send kill signal to all
stop_env.set()
rhandler.stophandler()
collector.stophandler()
# wait for threads and process to finish gracefully...
for tp in tp_list:
tp.join()
def main():
agent()
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.7 on 2021-05-04 21:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20210427_0001'),
('main', '0005_auto_20210427_2323'),
]
operations = [
]
|
from tkinter import Tk, Scale, Label, Entry, Button, font
from tkinter.constants import COMMAND
import serial
ventana = Tk()
ventana.title("Control de motor a pasos")
#ventana.geometry("400x300")
ventana.iconbitmap(r"D:\52556\Downloads\ejercicios_dia3\cas.ico")
ventana.config(bg = "#3498db")#azul
ventana.resizable(0,0)
grande = "Arial 14"
mediana = "Arial 12"
chica = "Arial 10"
def conectar():
if cajaTextoCom.get() != "":
try:
puerto = "COM" + cajaTextoCom.get()
serial.Serial(puerto, 9600)
etiqueta2["text"] = "Conectado"
etiqueta2["bg"] = "#2ecc71"
serial.Serial(puerto, 9600).write("u\n\r".encode("ascii"))
mostrarWidgets()
except serial.serialutil.SerialException:
etiqueta2["text"] = "No se encuentra el puerto COM ingresado"
etiqueta2["bg"] = "#e74c3c"
else:
etiqueta2["text"] = "No haz ingresado un valor"
etiqueta2["bg"] = "#f1c40f"
def ocultarWidgets():
etiqueta3.grid_forget()
scl.grid_forget()
etiqueta4.grid_forget()
cajaGrados.grid_forget()
botonConfirmar.grid_forget()
def mostrarWidgets():
etiqueta3.grid(row=2, column=0, columnspan=3, padx=3, pady=3)
scl.grid(row=3, column=0, columnspan=3, padx=3, pady=3)
etiqueta4.grid(row=4, column=0, padx=3,pady=3)
cajaGrados.grid(row=4, column=1, padx=3, pady=3)
botonConfirmar.grid(row=4, column=2, padx=3, pady=3)
def cerrarPuerto():
if cajaTextoCom.get() != "":
puerto = "COM" + cajaTextoCom.get()
serial.Serial(puerto, 9600).close()
ventana.destroy()
def enviar():
if cajaGrados.get() !="":
grados = cajaGrados.get()+"\n\r"
puerto = "COM" + cajaTextoCom.get()
serial.Serial(puerto, 9600).write(grados.encode("ascii"))
scl.set(int(grados))
cajaGrados.delete("0", "end")
else:
grados = str(scl.get()) + "\n\r"
puerto = "COM" + cajaTextoCom.get()
serial.Serial(puerto, 9600).write(grados.encode("ascii"))
etiqueta1 = Label(ventana, text= "Ingresa el puerto COM: ", bg = "#3498db", font=grande)
etiqueta1.grid(row=0, column=0, padx=3, pady=3)
cajaTextoCom = Entry(ventana, width=3, font=grande)
cajaTextoCom.grid(row=0,column=1, padx=3, pady=3)
botonConectar = Button(ventana, text="Conectar", font=mediana, command = conectar)
botonConectar.grid(row=0, column=2, padx=3, pady=3)
etiqueta2 = Label(ventana, text="...", bg="#3498db",font=chica)
etiqueta2.grid(row=1, column=0, columnspan=3, padx=3, pady=3)
etiqueta3 = Label(ventana, text="Posición en grados", bg="#3498db", font= grande)
etiqueta3.grid(row=2, column=0, columnspan=3, padx=3, pady=3)
scl = Scale(ventana, from_=0, to=360, length=300, resolution=1, orient="horizontal", bg="#3498db",troughcolor="#ecf0f1", activebackground="#2c3e50", highlightbackground="#3498db", font=grande)
scl.grid(row=3, column=0, columnspan=3, padx=3, pady=3)
etiqueta4 = Label(ventana, text="Ingresa la posición angular:", bg="#3498db", font= grande)
etiqueta4.grid(row=4, column=0, padx=3,pady=3)
cajaGrados = Entry(ventana, width=3, font=grande)
cajaGrados.grid(row=4, column=1, padx=3, pady=3)
botonConfirmar = Button(ventana, text="Enviar", font=mediana, command=enviar)
botonConfirmar.grid(row=4, column=2, padx=3, pady=3)
ventana.protocol("WM_DELETE_WINDOW", cerrarPuerto)
ocultarWidgets()
ventana.mainloop() |
'''
Consigna: En lista Tweets tenemos el texto de 5 tweets.
Crear una nueva variable Lista_Palabras, del tipo lista, y colocar ahí los strings de todas las palabras que aparecen en los 5 Tweets.
Extra: Intentar que las palabras que aparecen repetidas en los tweets aparezcan una sola vez en la lista (es posible que tengan que googlear algún método de las listas para lograr eso.)
Extra2: Intentar que los links que aparecen en algunos Tweets no aparezcan en las listas de palabras.
'''
Lista_Tweets=['Pentágono: Tenemos controlado al #CoheteChino El cohete: https://t.co/YCpSeHw7m8',
'Cuando te querías suicidar y te enteras que estás cerca de la zona de impacto del #CoheteChino https://t.co/Cf3IyIvlSP',
'Este video queda mejor con el opening de Dragon Ball Z 😌 #CoheteChino https://t.co/Uzfn33oVME',
'Ahora los medios internacionales se refieren al #CoheteChino como Long March 5B lo mismo ocurrió con el #VirusChino al cual prefirieron llamarlo SARSCoV2 pero sus variantes sí son nombradas según el país donde son descubiertas… El comunismo controlando el mundo.',
'#CoheteChino //// nosotros vemos la luna llena (completamente redonda) los chinos también (!!!!????!!?)']
'''
Lista_Palabras=[]
for tweet in Lista_Tweets:
tweet=tweet.split(' ')
#Lista_Palabras.extend(tweet)
Lista_Palabras.extend([palabra for palabra in tweet if 'https:' not in palabra])
Lista_Palabras=list(set(Lista_Palabras))
'''
Lista_Palabras=[]
for tweet in Lista_Tweets:
tweet=tweet.split(' ')
for palabra in tweet:
if palabra not in Lista_Palabras:
if 'https:' not in palabra:
Lista_Palabras.append(palabra)
|
#!/usr/bin/python3
"""
Outlook online calendar event popup notifications for the absent-minded
Requires O365, dateutil, and PyQt5 packages
Note: multiple-desktop users should configure their window manager to
make the popup appear on all desktops.
"""
import os, sys, time, datetime, pickle, threading
import dateutil
import O365
from PyQt5 import QtCore, QtWidgets
event_cache_file = os.path.expanduser('~/.reminders_event_cache')
config_file = os.path.expanduser('~/.reminders')
if not os.path.exists(config_file):
print("Create a configuration file ~/.reminders with your username and password on the first two lines.")
sys.exit(-1)
u,p = map(str.strip, open(config_file, 'r').readlines())
sch = O365.Schedule((u, p))
del p
try:
sch.getCalendars()
except Exception:
raise Exception("Error retrieving calendar; check username/password.")
calendar = [c for c in sch.calendars if c.name == 'Calendar'][0]
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None)
def event_times(ev):
start = utc_to_local(datetime.datetime(*ev.getStart()[:6]))
end = utc_to_local(datetime.datetime(*ev.getEnd()[:6]))
return start, end
if os.path.exists(event_cache_file):
try:
event_cache = pickle.load(open(event_cache_file, 'rb'))
print("Read events from cache file.")
except:
print("Error loading event cache; will pull from server instead.")
else:
event_cache = None
recache_events()
def get_events():
global event_cache
return event_cache['events']
def download_events():
"""Download and return all events sorted by start time.
Note: we download all old events because otherwise some repeated events
will be missed.
"""
global calendar
start = time.strftime(calendar.time_string, time.localtime(3600*24*365*40))
end = time.strftime(calendar.time_string, time.localtime(time.time() + 3600*240))
calendar.getEvents(start=start, end=end, eventCount=1000000)
return sorted(calendar.events, key=lambda ev: ev.getStart())
def recache_events():
"""Download all events and write to the cache.
"""
global event_cache
print("Reloading events..")
all_events = download_events()
now = time.time()
event_cache = {'events': all_events, 'time': now}
pickle.dump(event_cache, open(event_cache_file+'.tmp', 'wb'))
os.rename(event_cache_file+'.tmp', event_cache_file)
print(" done; downloaded %d events" % len(all_events))
def poll_events():
global event_cache, run_poller
while run_poller:
now = time.time()
if now > event_cache['time'] + 1200:
try:
recache_events()
except Exception:
print("Error loading events from server:")
sys.excepthook(*sys.exc_info())
time.sleep(1200)
run_poller = True
poller = threading.Thread(target=poll_events)
poller.daemon = True
poller.start()
def upcoming_events(window=20*60, verbose=False):
now = datetime.datetime.now(datetime.timezone.utc).astimezone()
today = now.date()
try:
events = get_events()[::-1]
except:
sys.excepthook(*sys.exc_info())
upcoming = []
# if verbose:
# print("[filtering %d events..]" % len(events))
n_cancelled = 0
n_not_today = 0
n_passed = 0
for i,ev in enumerate(events):
# print(ev.getSubject())
evid = ev.json['Id']
if ev.json['IsCancelled']:
# print(" cancelled")
n_cancelled += 1
continue
start, end = event_times(ev)
if start.date() > today:
# print(" not today")
n_not_today += 1
continue
if end < now - datetime.timedelta(seconds=3600*8):
# print(" already passed")
# n_passed = len(events) - i
# break
n_passed += 1
continue
sec = (start - now).total_seconds()
upcoming.append((ev, sec))
# print(" in %d sec" % sec)
if verbose:
print("[found %d upcoming events; %d cancelled, %d not today, %d already passed]" % (len(upcoming), n_cancelled, n_not_today, n_passed))
return upcoming[::-1]
class ScreenPrint(object):
"""Simple interface for printing full-screen text
"""
def __init__(self):
self.current_row = 0
self.rows = 100
self.columns = 100
def reset(self):
self.current_row = 0
print("\033[0;0f")
self.rows, self.columns = list(map(int, os.popen('stty size', 'r').read().split()))
def print_line(self, msg):
while len(msg) > self.columns:
line, msg = msg[:self.columns], msg[self.columns:]
print(line)
self.current_row += 1
print(("{:%ds}" % self.columns).format(msg))
self.current_row += 1
def clear_to_bottom(self):
for i in range(self.current_row, self.rows-1):
self.print_line("")
screen = ScreenPrint()
class ReminderDialog(QtWidgets.QWidget):
def __init__(self, check_interval=10.0):
self.current_event = None
self.dismissed_events = {}
self.snoozed_events = {}
self.format = """
<div style="font-weight: bold; font-size: 20pt; text-align: center">{start_time} ({how_soon} minutes)</div>
<div style="font-weight: bold; font-size: 15pt; text-align: center">{subject}</div>
<div style="font-weight: bold; font-size: 15pt; text-align: center">{location}</div>
<br>
<hr>
<div style="text-align: left; font-size: 12pt; font-weight: normal;">{body}</div>
"""
QtWidgets.QWidget.__init__(self)
self.layout = QtWidgets.QGridLayout()
self.setLayout(self.layout)
self.desc = QtWidgets.QTextBrowser()
self.layout.addWidget(self.desc, 0, 0, 1, 3)
self.dismissBtn = QtWidgets.QPushButton('dismiss')
self.layout.addWidget(self.dismissBtn, 1, 0)
self.dismissBtn.clicked.connect(self.dismiss)
self.snoozeBtn = QtWidgets.QPushButton('snooze until')
self.layout.addWidget(self.snoozeBtn, 1, 1)
self.snoozeBtn.clicked.connect(self.snooze)
self.snoozeSpin = QtWidgets.QSpinBox()
self.snoozeSpin.setSuffix(' minutes before')
self.snoozeSpin.setValue(3)
self.snoozeSpin.setMinimum(0)
self.layout.addWidget(self.snoozeSpin, 1, 2)
self.resize(600, 400)
self.window().setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.window().setWindowTitle("Event reminder")
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.check_events)
self.timer.start(int(check_interval*1000))
self.check_events()
def show_event(self, ev):
new_event = self.current_event is not ev
self.current_event = ev
start, end = event_times(ev)
start_str = start.strftime("%H:%M")
now = datetime.datetime.now(datetime.timezone.utc).astimezone()
mins = int((start - now).total_seconds() / 60)
desc = self.format.format(how_soon=mins, start_time=start_str, subject=ev.getSubject(), location=ev.getLocation()['DisplayName'], body=ev.getBody())
scroll = self.desc.verticalScrollBar().value()
self.desc.setHtml(desc)
if not new_event:
self.desc.verticalScrollBar().setValue(scroll)
self.snoozeSpin.setValue(mins//3)
self.show()
self.desc.setFocus()
self.activateWindow()
def dismiss(self):
self.dismissed_events[self.current_event.json['Id']] = True
if not self.check_events():
self.hide()
def snooze(self):
self.snoozed_events[self.current_event.json['Id']] = self.snoozeSpin.value() * 60
if not self.check_events():
self.hide()
def check_events(self, verbose=True):
"""Check upcoming events and show a notification for the next event,
if any.
"""
global screen
if verbose:
screen.reset()
events = upcoming_events(verbose=verbose)
if verbose:
screen.reset()
screen.print_line("Upcoming events:")
ret = False
for ev, how_soon in events:
if verbose:
screen.print_line(" (%d min) %s [%s]" % (how_soon//60, ev.getSubject(), ev.getLocation()['DisplayName']))
if ev.json['Id'] in self.dismissed_events:
if verbose:
screen.print_line(" already dismissed")
continue
if ev.json['Id'] in self.snoozed_events:
if how_soon > self.snoozed_events[ev.json['Id']]:
if verbose:
screen.print_line(" snoozing...")
continue
if how_soon > 20*60:
if verbose:
screen.print_line(" not time for notification yet.")
continue
if verbose:
screen.print_line(" notify!")
self.show_event(ev)
ret = True
if verbose:
screen.clear_to_bottom()
return ret
app = QtWidgets.QApplication([])
dlg = ReminderDialog()
def check():
dlg.check_events(verbose=True)
if sys.flags.interactive == 0:
app.exec_()
|
A, B = map( int, input().split())
print( max([A+ A-1, B + A, B + B -1]))
|
current_users = ['aaA', 'bbb', 'ccc', 'ddd', 'eee']
new_users = ['qqq', 'aAa', 'www', 'BBB', 'ppp']
for new_user in new_users:
flag = 0
for current_user in current_users:
if new_user.lower() == current_user.lower():
flag = 1
break
if flag == 1:
print(new_user + " has already been used.")
else:
print(new_user + " has not been used.") |
import requests
from credentials import client_id, client_secret
import urllib
from flask import Flask, request
from pprint import pprint
access_token_url = "https://github.com/login/oauth/access_token"
authorize_url = "https://github.com/login/oauth/authorize"
data = {
"client_id": client_id,
"redirect_uri": "http://localhost:8080",
"scope": "",
"state": "supersecretstate"
}
print("Go to:")
print("%s?%s" % (authorize_url, urllib.urlencode(data)))
print("Waiting for response...")
app = Flask(__name__)
@app.route("/")
def get_access_token():
# parse code
code = request.args.get('code')
print code
data = {
"client_id": client_id,
"client_secret": client_secret,
"code": code
}
headers = {
"Accept": "application/json"
}
r = requests.post(access_token_url, data=data, headers=headers)
if r.status_code == 200:
return str(r.json())
if __name__ == "__main__":
app.run('localhost', 8080)
|
from nmt.modeling.transformer.multiheadattention import MultiHeadAttentionLayer
from nmt.modeling.transformer.positionwiseff import PositionwiseFeedforwardLayer
from nmt.modeling.transformer.encoder import Encoder
from nmt.modeling.transformer.decoder import Decoder
from nmt.modeling.transformer.transformer import Transformer |
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def construct_tree(pre_seq, mid_seq, tree, left):
"""前序遍历第一个节点就是根节点,在中序遍历里面,1左边的节点都是左子树的节点,右边是右子树的节点
那么根据相应的前序遍历结果确定左子树和右子树,递归处理"""
if not pre_seq or not mid_seq:
return
root_val = pre_seq[0]
_next = Node(root_val)
if left:
tree.left = _next
else:
tree.right = _next
root_pos = mid_seq.index(root_val)
left_mid_seq = mid_seq[:root_pos]
right_mid_seq = mid_seq[root_pos+1:]
left_pre_seq = pre_seq[1:len(left_mid_seq)+1]
right_pre_seq = pre_seq[len(left_mid_seq)+1:]
construct_tree(left_pre_seq, left_mid_seq, _next, left=True)
construct_tree(right_pre_seq, right_mid_seq, _next, left=False)
if __name__ == "__main__":
pre_seq = [1, 2, 4, 7, 3, 5, 6, 8]
mid_seq = [4, 7, 2, 1, 5, 3, 8, 6]
tree = Node(pre_seq[0])
root_pos = mid_seq.index(pre_seq[0])
left_mid_seq = mid_seq[:root_pos]
right_mid_seq = mid_seq[root_pos + 1:]
left_pre_seq = pre_seq[1:len(left_mid_seq)+1]
right_pre_seq = pre_seq[len(left_mid_seq) + 1:]
construct_tree(left_pre_seq, left_mid_seq, tree, left=True)
construct_tree(right_pre_seq, right_mid_seq, tree, left=False)
print("pre order")
# 前序遍历
def traverse_pre(root):
if root is None:
return
print(root.val)
traverse_pre(root.left)
traverse_pre(root.right)
traverse_pre(tree)
print("mid order")
# 中序遍历
def traverse_mid(root):
if root is None:
return
traverse_mid(root.left)
print(root.val)
traverse_mid(root.right)
traverse_mid(tree)
print("post order")
# 后序遍历
def traverse_post(root):
if root is None:
return
traverse_post(root.left)
traverse_post(root.right)
print(root.val)
traverse_post(tree)
|
# coding:utf-8
# 轮盘赌算法
import numpy;
import random;
from calfitValue import calfitValue;
def gamble(pop):
temp = []
newfit_value = []
pop_len = len(pop)
# 适应度总和
for i in range(pop_len):
fit = calfitValue(pop[i])
temp.append(fit)
total_fit = sum(temp)
for i in range(len(temp)):
newfit_value.append(temp[i] / total_fit)
# 计算累计概率
newfit_value = numpy.cumsum(newfit_value)
ms = []
for i in range(pop_len):
ms.append(random.random())
ms.sort()
fitin = 0
newin = 0
newpop = pop
# 转轮盘选择法
while newin < pop_len:
if(ms[newin] < newfit_value[fitin]):
newpop[newin] = pop[fitin]
newin = newin + 1
else:
fitin = fitin + 1
pop = newpop
return pop
if __name__ == '__main__':
pop = [
[0, 15, 2, 5, 10, 9, 3, 8, 1, 13, 14, 7, 11, 6, 4, 12, 177018.55000000002],
[14, 15, 4, 1, 12, 2, 3, 8, 6, 5, 9, 10, 11, 13, 7, 0, 451486.0549999999],
[1, 2, 14, 13, 5, 15, 3, 8, 4, 9, 6, 11, 0, 10, 12, 7, 406772.40499999997],
[10, 5, 7, 14, 9, 15, 3, 8, 6, 0, 12, 1, 11, 13, 2, 4, 282844.04500000004],
[12, 15, 11, 0, 9, 1, 3, 8, 7, 4, 14, 5, 6, 13, 10, 2, 341203.6900000001],
]
new_pop = gamble(pop)
print new_pop
|
from django.http import request
NAME = "PERPUSTAKAAN"
context = {
"name":NAME,
"request":request,
} |
from optparse import OptionParser
import os
import numpy as np
from itertools import product
from subprocess import call
def parse_args():
parser = OptionParser()
parser.set_defaults()
parser.add_option("--n_seeds", type="int", dest="n_seeds")
(options, args) = parser.parse_args()
return options
def main(logs_dir='/dccstor/roxy1/fairness/metric_learning/logs/'):
options = parse_args()
print(options)
n_seeds = options.n_seeds
try:
os.makedirs(logs_dir)
except:
pass
seeds = list(range(n_seeds))
# Grids
eps_grid = [0.001,0.05,0.01,0.5,0.1] #5
fe_grid = [20,40,60,80,100] #5
slr_grid = [1,5,10,15,20] #5
se_grid = [20,40,60,80,100] #5
lr_grid = [1e-3, 5e-4, 1e-4, 5e-5, 1e-5] #5
# # Test grids
# eps_grid = [0.001] #5
# fe_grid = [20] #5
# slr_grid = [1] #5
# se_grid = [20] #5
# lr_grid = [1e-3] #5
# # Selection
# eps_grid = [0.01]
# fe_grid = [50]
# slr_grid = [5., 10., 20.]
# se_grid = [50]
# lr_grid = [1e-5]
hypers = [eps_grid, fe_grid, slr_grid, se_grid, lr_grid]
names = ['eps', 'fe', 'slr', 'se', 'lr']
names += ['flr', 'seed']
exp_idx = 0
for seed in seeds:
for pack in product(*hypers):
values = list(pack)
(
eps,
full_epoch,
subspace_step,
subspace_epoch,
lr
) = pack
full_step = eps/10
values.append(full_step)
values.append(seed)
exp_descriptor = []
for n, v in zip(names, values):
exp_descriptor.append(':'.join([n,str(v)]))
exp_name = '_'.join(exp_descriptor)
print(exp_name)
job_cmd='python ' +\
'adult_ccc.py ' +\
' --eps ' + str(eps) +\
' --fe ' + str(full_epoch) +\
' --flr ' + str(full_step) +\
' --se ' + str(subspace_epoch) +\
' --slr ' + str(subspace_step) +\
' --lr ' + str(lr) +\
' --idx ' + str(exp_idx) +\
' --seed ' + str(seed)
# queue = np.random.choice(['x86_6h','x86_12h', 'x86_24h'],1,p=[0.5, 0.3, 0.2])[0]
queue = np.random.choice(['x86_12h','x86_24h', 'x86_7d'],1,p=[0.5, 0.3, 0.2])[0]
# queue = np.random.choice(['x86_24h', 'x86_7d'],1,p=[0.65, 0.35])[0]
call(['jbsub', '-proj', 'explore', '-cores', '1', '-mem', '10g', '-queue', queue,
'-name', exp_name,
# '-require', '(v100) && (hname != dccxc326)',
'-out', logs_dir + exp_name + '.out',
'-err', logs_dir + exp_name + '.err',
job_cmd])
exp_idx += 1
return
if __name__ == '__main__':
main()
|
#!/usr/bin/python
import subprocess
import os
import sys
"""
This script attempts to disable hyperthreading by forcing offline all
hyperthreads on a each core except one.
"""
def disable(coreId):
os.system("/bin/echo 0 > /sys/devices/system/cpu/cpu{}/online".format(coreId))
print "Disabled Core {}.".format(coreId)
def main():
if not os.geteuid() == 0:
sys.exit('This script must be run as root!')
totalCores = int(subprocess.check_output("getconf _NPROCESSORS_CONF",
shell=True))
thread_siblings = "/sys/devices/system/cpu/cpu{}/topology/thread_siblings_list"
complete = False
while not complete:
complete = True
for i in xrange(totalCores):
try:
with open(thread_siblings.format(i)) as sibling_list:
siblings = sibling_list.read()
if '-' in siblings:
hyperthreads = siblings.split("-")
disable(hyperthreads[1].strip())
complete = False
elif ',' in siblings:
hyperthreads = siblings.split(",")
for h in hyperthreads[1:]:
disable(h.strip())
complete = False
except:
pass
if __name__ == '__main__':
main()
|
"""
This example uses the distributed training aspect of Determined
to quickly and efficiently train a state-of-the-art architecture
for ImageNet found by a leading NAS method called GAEA:
https://arxiv.org/abs/2004.07802
We will add swish activation and squeeze-and-excite modules in this
model to further improve upon the published 24.0 test error on imagenet.
We assume that you already have imagenet downloaded and the train and test
directories set up.
"""
import os
from collections import namedtuple
from typing import Any, Dict, Tuple, cast
import torch
import torchvision.transforms as transforms
from torch import nn
import determined as det
from data import ImageNetDataset
from determined.pytorch import DataLoader, LRScheduler, PyTorchTrial, reset_parameters
from model import NetworkImageNet
from utils import AutoAugment, CrossEntropyLabelSmooth, Cutout, HSwish, Swish, accuracy
Genotype = namedtuple("Genotype", "normal normal_concat reduce reduce_concat")
activation_map = {"relu": nn.ReLU, "swish": Swish, "hswish": HSwish}
class ImageNetTrial(PyTorchTrial):
def __init__(self, context: det.TrialContext) -> None:
self.context = context
self.data_config = context.get_data_config()
self.criterion = CrossEntropyLabelSmooth(
context.get_hparam("num_classes"), # num classes
context.get_hparam("label_smoothing_rate"),
)
self.last_epoch_idx = -1
def build_training_data_loader(self) -> DataLoader:
bucket_name = self.data_config["bucket_name"]
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
train_transforms = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2
),
transforms.ToTensor(),
normalize,
]
)
if self.context.get_hparam("cutout"):
train_transforms.transforms.append(
Cutout(self.context.get_hparam("cutout_length"))
)
if self.context.get_hparam("autoaugment"):
train_transforms.transforms.insert(0, AutoAugment)
train_data = ImageNetDataset(
"train",
bucket_name,
streaming=self.data_config["streaming"],
data_download_dir=self.data_config["data_download_dir"],
transform=train_transforms,
)
train_queue = DataLoader(
train_data,
batch_size=self.context.get_per_slot_batch_size(),
shuffle=True,
pin_memory=True,
num_workers=self.data_config["num_workers_train"],
)
return train_queue
def build_validation_data_loader(self) -> DataLoader:
bucket_name = self.data_config["bucket_name"]
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
valid_data = ImageNetDataset(
"validation",
bucket_name,
streaming=self.data_config["streaming"],
data_download_dir=self.data_config["data_download_dir"],
transform=transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]
),
)
valid_queue = DataLoader(
valid_data,
batch_size=self.context.get_per_slot_batch_size(),
shuffle=False,
pin_memory=True,
num_workers=self.data_config["num_workers_val"],
)
return valid_queue
def build_model(self) -> nn.Module:
genotype = Genotype(
normal=[
("skip_connect", 1),
("skip_connect", 0),
("sep_conv_3x3", 2),
("sep_conv_3x3", 1),
("sep_conv_5x5", 2),
("sep_conv_3x3", 0),
("sep_conv_5x5", 3),
("sep_conv_5x5", 2),
],
normal_concat=range(2, 6),
reduce=[
("max_pool_3x3", 1),
("sep_conv_3x3", 0),
("sep_conv_5x5", 1),
("dil_conv_5x5", 2),
("sep_conv_3x3", 1),
("sep_conv_3x3", 3),
("sep_conv_5x5", 1),
("max_pool_3x3", 2),
],
reduce_concat=range(2, 6),
)
activation_function = activation_map[self.context.get_hparam("activation")]
model = NetworkImageNet(
genotype,
activation_function,
self.context.get_hparam("init_channels"),
self.context.get_hparam("num_classes"),
self.context.get_hparam("layers"),
auxiliary=self.context.get_hparam("auxiliary"),
do_SE=self.context.get_hparam("do_SE"),
)
# If loading backbone weights, do not call reset_parameters() or
# call before loading the backbone weights.
reset_parameters(model)
return model
def optimizer(self, model: nn.Module) -> torch.optim.Optimizer: # type: ignore
self._optimizer = torch.optim.SGD(
model.parameters(),
lr=self.context.get_hparam("learning_rate"),
momentum=self.context.get_hparam("momentum"),
weight_decay=self.context.get_hparam("weight_decay"),
)
return self._optimizer
def create_lr_scheduler(self, optimizer):
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, self.context.get_hparam("cosine_annealing_epochs")
)
step_mode = LRScheduler.StepMode.MANUAL_STEP
return LRScheduler(self.scheduler, step_mode=step_mode)
def train_batch(
self, batch: Any, model: nn.Module, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
if batch_idx == 0 or self.last_epoch_idx < epoch_idx:
self.scheduler.step()
current_lr = self.scheduler.get_last_lr()[0]
if epoch_idx < 5:
lr = self.context.get_hparam("learning_rate")
for param_group in self._optimizer.param_groups:
param_group["lr"] = lr * (epoch_idx + 1) / 5.0
print(
"Warming-up Epoch: {}, LR: {}".format(
epoch_idx, lr * (epoch_idx + 1) / 5.0
)
)
else:
print("Epoch: {} lr {}".format(epoch_idx, current_lr))
input, target = batch
model.drop_path_prob = 0
logits, logits_aux = model(input)
loss = self.criterion(logits, target)
if self.context.get_hparam("auxiliary"):
loss_aux = self.criterion(logits_aux, target)
loss += self.context.get_hparam("auxiliary_weight") * loss_aux
top1, top5 = accuracy(logits, target, topk=(1, 5))
self.last_epoch_idx = epoch_idx
return {"loss": loss, "top1_accuracy": top1, "top5_accuracy": top5}
def evaluate_batch(self, batch: Any, model: nn.Module) -> Dict[str, Any]:
input, target = batch
logits, _ = model(input)
loss = self.criterion(logits, target)
top1, top5 = accuracy(logits, target, topk=(1, 5))
return {"loss": loss, "top1_accuracy": top1, "top5_accuracy": top5}
|
# REST Framework
from rest_framework import generics, permissions, status
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.test import APIRequestFactory, APITestCase
# User class from django
from django.contrib.auth.models import User, UserManager
# Models and Serializers
from main.serializers import UserSerializer, TopicSerializer, FeedSerializer, PostSerializer
from main.models import Topic, Feed, Post
from django.forms.models import model_to_dict
## Transaction Management
from django.db import transaction
# Python built-ins required for tests
import time
import datetime
import pytz
import traceback
class FeedCreateTests(APITestCase):
@classmethod
def setUpClass(cls):
cls.user = User.objects.create_user(username="FeedTests")
cls.user.save()
cls.f1_url = "http://home.uchicago.edu/~jharriman/example-rss.xml"
cls.f1_id = None
cls.f1 = None
cls.f1_post_list = [
{
"id": 6,
"feedURL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"author": "By KATIE HAFNER",
"category": [],
"rights": "",
"title": "Bracing for the Falls of an Aging Nation",
"subtitle": "",
"content": "As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border=\"0\" height=\"1\" src=\"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif\" width=\"1\" /><br clear=\"all\" />",
"generator": "",
"guid": "http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html",
"url": "http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
"contributor": "",
"pubDate": "2014-11-02T13:43:10Z",
"updated": "2014-11-02T13:43:10Z",
"ackDate": 1415855355.56354,
"feed": 2
},
{
"id": 5,
"feedURL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"author": "By LYNN VAVRECK",
"category": ["Elections, Senate","United States Politics and Government","Elections, House of Representatives", "Voting and Voters", "Midterm Elections (2014)"],
"rights": "",
"title": "Midterm Calculus: The Economy Elects Presidents. Presidents Elect Congress.",
"subtitle": "",
"content": "While presidential elections are shaped largely by economic performance, the largest factor in midterm elections is the president.",
"generator": "",
"guid": "http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html",
"url": "http://rss.nytimes.com/c/34625/f/642562/s/40134217/sc/1/l/0L0Snytimes0N0C20A140C110C0A30Cupshot0Cthe0Eeconomy0Eelects0Epresidents0Epresidents0Eelect0Econgress0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
"contributor": "",
"pubDate": "2014-11-02T14:00:22Z",
"updated": "2014-11-02T14:00:22Z",
"ackDate": 1415855355.55587,
"feed": 2
}]
cls.f1_details = {
"id": cls.f1_id,
"author": "",
"category": "",
"contributor": "",
"description": "US",
"docURL": "",
"editorAddr": "",
"generator": "",
"guid": "",
"language": "en-us",
"logo": "http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png",
"rights": "Copyright 2014 The New York Times Company",
"subtitle": "US",
"title": "NYT > U.S.",
"webmaster": "",
"URL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"ttl": 5,
"skipDays": None,
"skipHours": None,
"pubDate": "2014-11-02T16:13:02Z",
"updated": "2014-11-06T01:00:31Z",
"posts": [2,1]
}
@classmethod
def tearDownClass(cls):
cls.user.topics.get(name="Uncategorized").delete()
cls.user.delete()
# Make sure to delete the feed so we don't run into other tests
def test_create_feed(cls):
"""Test that Feed can be created by URL"""
response = cls.client.post('/feeds/create/', {"url" : cls.f1_url})
cls.assertEqual(response.status_code, 200)
# response = cls.client.get('/feeds/')
# cls.assertEqual(response.status_code, 200)
# cls.assertEqual(response.data, [{'id': 1, 'author': u'', 'category': u'',
# 'contributor': u'', 'description': u'US',
# 'docURL': u'http://www.nytimes.com/pages/national/index.html?partner=rss&emc=rss',
# 'editorAddr': u'', 'generator': u'', 'guid': u'',
# 'language': u'en-us',
# 'logo': u'http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png',
# 'rights': u'Copyright 2014 The New York Times Company',
# 'subtitle': u'US', 'title': u'NYT > U.S.', 'webmaster': u'',
# 'URL': u'http://home.uchicago.edu/~jharriman/example-rss.xml',
# 'ttl': 5, 'skipDays': None, 'skipHours': None,
# 'pubDate': datetime.datetime(2014, 11, 2, 16, 13, 2, tzinfo=pytz.UTC),
# 'updated': datetime.datetime(2014, 11, 6, 1, 0, 31, tzinfo=pytz.UTC),
# 'posts': [2, 1]}])
#
# #gets newly created feed object and its id
# cls.f1 = Feed.objects.get(id=response.data[0]["id"])
# cls.f1_id = cls.f1.id
# cls.f1.delete()
class FeedTests(APITestCase):
@classmethod
def setUpClass(cls):
cls.user = User.objects.create_user(username="FeedTests")
cls.user.save()
cls.f1_url = "http://home.uchicago.edu/~jharriman/example-rss.xml"
cls.f1_id = None
cls.f1 = None
cls.f1_post_list = [
{
"feedURL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"author": u"By KATIE HAFNER",
"category": [],
"rights": u"",
"title": u"Bracing for the Falls of an Aging Nation",
"subtitle": u"",
"content": u"As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border=\"0\" height=\"1\" src=\"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif\" width=\"1\" /><br clear=\"all\" />",
"generator": u"",
"guid": u"http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html",
"url": u"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
"contributor": u"",
"pubDate": u"2014-11-02T13:43:10Z",
"updated": u"2014-11-02T13:43:10Z",
"ackDate": 1415855355.56354,
"feed": 2
},
{
"feedURL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"author": u"By LYNN VAVRECK",
"category": ["Elections, Senate","United States Politics and Government","Elections, House of Representatives", "Voting and Voters", "Midterm Elections (2014)"],
"rights": u"",
"title": u"Midterm Calculus: The Economy Elects Presidents. Presidents Elect Congress.",
"subtitle": u"",
"content": u"While presidential elections are shaped largely by economic performance, the largest factor in midterm elections is the president.",
"generator": u"",
"guid": u"http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html",
"url": u"http://rss.nytimes.com/c/34625/f/642562/s/40134217/sc/1/l/0L0Snytimes0N0C20A140C110C0A30Cupshot0Cthe0Eeconomy0Eelects0Epresidents0Epresidents0Eelect0Econgress0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
"contributor": u"",
"pubDate": u"2014-11-02T14:00:22Z",
"updated": u"2014-11-02T14:00:22Z",
"ackDate": 1415855355.55587,
"feed": 2
}]
cls.f1 = Feed.createByURL(cls.f1_url)
cls.p1 = Post.objects.get(guid="http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html")
cls.p2 = Post.objects.get(guid="http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html")
cls.f1_details = {
"id": cls.f1_id,
"author": u"",
"category": u"",
"contributor": u"",
"description": u"US",
"docURL": u"",
"editorAddr": u"",
"generator": u"",
"guid": u"",
"language": u"en-us",
"logo": u"http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png",
"rights": u"Copyright 2014 The New York Times Company",
"subtitle": u"US",
"title": u"NYT > U.S.",
"webmaster": u"",
"URL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"ttl": 5,
"skipDays": None,
"skipHours": None,
"pubDate" : datetime.datetime(2014, 11, 2, 16, 13, 2, tzinfo=pytz.UTC),
"updated": datetime.datetime(2014, 11, 6, 1, 0, 31, tzinfo=pytz.UTC),
"posts": [cls.p1.id,cls.p2.id]
}
cls.f1_id = cls.f1.id
@classmethod
def tearDownClass(cls):
cls.user.topics.get(name="Uncategorized").delete()
cls.user.delete()
cls.f1.delete()
# Make sure to delete the feed so we don't run into other tests
def test_feed_detail_exists(cls):
"""Test accuracy of feed details"""
response = cls.client.get("/feeds/%d" % (cls.f1_id, ))
cls.assertEqual(response.status_code, 200)
cls.assertItemsEqual(response.data, cls.f1_details)
def test_post_list_exists(cls):
"""Test accuracy of post list"""
response = cls.client.get("/feeds/%d/posts/" % (cls.f1_id, ))
cls.assertEqual(response.status_code, 200)
# Delete the ids, since they are added by the server and not really relevant to checking correctness
for post in response.data:
del post["id"]
for res, exp in response.data, cls.f1_post_list:
cls.assertItemsEqual(res, exp)
def test_delete_feed(cls):
"""Feed deletion should fail - to build our database, a feed is never deleted"""
response = cls.client.delete("/feeds/%d" % (cls.f1_id,))
cls.assertEqual(response.status_code, 405)
class PostTests(APITestCase):
@classmethod
def setUpClass(cls):
cls.f1 = Feed.createByURL("http://home.uchicago.edu/~jharriman/example-rss.xml")
cls.f1.save()
cls.f1_id = cls.f1.id
cls.p1_id = cls.f1.posts.all()[0].id
cls.p1_data = {
'Length': 0,
'enclosures': [],
u'id': cls.p1_id,
'feedURL': u'http://www.nytimes.com/services/xml/rss/nyt/US.xml',
'author': u'By KATIE HAFNER',
'category': [],
'rights': u'',
'title': u'Bracing for the Falls of an Aging Nation',
'subtitle': u'',
'content': u'As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border="0" height="1" src="http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif" width="1" /><br clear="all" />',
'generator': u'',
'guid': u'http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html',
'url': u'http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm',
'contributor': u'',
'pubDate': datetime.datetime(2014, 11, 2, 13, 43, 10, tzinfo=pytz.UTC),
'updated': datetime.datetime(2014, 11, 2, 13, 43, 10, tzinfo=pytz.UTC),
'ackDate': 1415858199.31228,
'feed': cls.f1_id,
}
@classmethod
def tearDownClass(cls):
cls.f1.delete()
def test_post_detail_exists(cls):
"""Test accuracy of post"""
response = cls.client.get('/feeds/%d/posts/' % (cls.f1_id, ))
cls.assertEqual(response.status_code, 200)
cls.assertItemsEqual([p for p in response.data if p['id'] == cls.p1_id][0], cls.p1_data)
|
from bs4 import BeautifulSoup as soup
import re
cpfs = []
with open(input("digite o nome dos dados: ")+'.txt','r') as arq:
arqui = arq.read().strip()
regex = re.findall('([0-9]{2}[\.]?[0-9]{3}[\.]?[0-9]{3}[\/]?[0-9]{4}[-]?[0-9]{2})|([0-9]{3}[\.]?[0-9]{3}[\.]?[0-9]{3}[-]?[0-9]{2})',arqui)
for dados in regex:
cpfs.append(dados[1])
for cpf in cpfs:
cp = open('cpfs.txt', 'a')
cp.write(cpf + '\n')
print(cpf)
|
from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from werkzeug.security import generate_password_hash, check_password_hash
# database & migrations
migrate = Migrate()
db = SQLAlchemy()
class User(UserMixin, db.Model):
"""
User authentication
source: https://hackersandslackers.com/flask-login-user-authentication/
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32))
email = db.Column(db.String(32), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=False)
def set_password(self, password):
"""
Used during registration
"""
self.password = generate_password_hash(password, method='sha256')
def check_password(self, password):
"""
Used on login
"""
return check_password_hash(self.password, password)
|
##
# Copyright: Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File: breaksolver.py
#
# Purpose: Show how to break a long-running task.
##
import sys
import mosek.fusion
from mosek.fusion import *
import random
import threading
import time
def main():
timeout = 5
n = 200 # number of binary variables
m = n // 5 # number of constraints
p = n // 5 # Each constraint picks p variables and requires that exactly half of them are 1
R = random.Random(1234)
print("Build problem...")
##TAG:begin-model
with Model('SolveBinary') as M:
M.setLogHandler(sys.stdout)
x = M.variable("x", n, Domain.binary())
M.objective(ObjectiveSense.Minimize, Expr.sum(x))
M.setLogHandler(sys.stdout)
L = list(range(n))
for i in range(m):
R.shuffle(L)
M.constraint(Expr.sum(x.pick(L[:p])),Domain.equalsTo(p // 2))
##TAG:end-model
print("Start thread...")
##TAG:begin-create-thread
T = threading.Thread(target = M.solve)
##TAG:end-create-thread
T0 = time.time()
try:
T.start() # optimization now running in background
##TAG:begin-check-condition
# Loop until we get a solution or you run out of patience and press Ctrl-C
while True:
if not T.isAlive():
print("Solver terminated before anything happened!")
break
elif time.time()-T0 > timeout:
print("Solver terminated due to timeout!")
M.breakSolver()
break
except KeyboardInterrupt:
print("Signalling the solver that it can give up now!")
M.breakSolver()
##TAG:end-check-condition
##TAG:begin-check-return
finally:
try:
T.join() # wait for the solver to return
except:
pass
##TAG:end-check-return
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import sys
try:
import pyqtgraph
except ImportError:
raise ImportError("Required package `pyqtgraph` not found. "
"Please install it to proceed.")
try:
import fabio
except ImportError:
raise ImportError("Required package `fabio` not found. "
"Please install it to proceed.")
if len(sys.argv)<2:
print("see install.txt for installation instructions.")
setup( name = "atlasccd",
version = "0.1",
packages = ["atlasccd"],
package_dir = {"atlasccd":"lib"},
author = "Carsten Richter",
author_email = "carsten.richter@physik.tu-freiberg.de",
url = "https://github.com/carichte/atlasccd",
description = "functions to open and visualize images from agilents atlas ccd based on fabio and pyqtgraph",
)
|
#=========================================================================
# pisa_jalr_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
# Use r3 to track the control flow pattern
addiu r3, r0, 0
nop
nop
nop
nop
nop
nop
nop
nop
lui r1, %hi[label_a]
ori r1, r0, %lo[label_a]
nop
nop
nop
nop
nop
nop
nop
nop
jalr r2, r1
ori r3, r3, 0b01
nop
nop
nop
nop
nop
nop
nop
nop
label_a:
ori r3, r3, 0b10
# Only the second bit should be set if jump was taken
mtc0 r3, proc2mngr > 0b10
"""
#-------------------------------------------------------------------------
# gen_link_byp_test
#-------------------------------------------------------------------------
def gen_link_byp_test():
return [
gen_jalr_link_byp_test( 5, reset_test_count=True ),
gen_jalr_link_byp_test( 4, reset_test_count=False ),
gen_jalr_link_byp_test( 3, reset_test_count=False ),
gen_jalr_link_byp_test( 2, reset_test_count=False ),
gen_jalr_link_byp_test( 1, reset_test_count=False ),
gen_jalr_link_byp_test( 0, reset_test_count=False ),
]
#-------------------------------------------------------------------------
# gen_src_byp_test
#-------------------------------------------------------------------------
def gen_src_byp_test():
return [
gen_jalr_src_byp_test( 5 ),
gen_jalr_src_byp_test( 4 ),
gen_jalr_src_byp_test( 3 ),
gen_jalr_src_byp_test( 2 ),
gen_jalr_src_byp_test( 1 ),
gen_jalr_src_byp_test( 0 ),
]
#-------------------------------------------------------------------------
# gen_jump_test
#-------------------------------------------------------------------------
def gen_jump_test():
return """
# PC
# Use r3 to track the control flow pattern #
addiu r3, r0, 0 # 0x00000400
#
lui r1, %hi[label_a] # 0x00000404
ori r1, r0, %lo[label_a] # 0x00000408
jalr r30, r1 # j -. # 0x0000040c
# | #
ori r3, r3, 0b000001 # | # 0x00000410
# | #
label_b: # <--+-. #
ori r3, r3, 0b000010 # | | # 0x00000414
addiu r5, r29, 0 # | | # 0x00000418
# | | #
lui r1, %hi[label_c] # | | # 0x0000041c
ori r1, r0, %lo[label_c] # | | # 0x00000420
jalr r28, r1 # j -+-+-. # 0x00000424
# | | | #
ori r3, r3, 0b000100 # | | | # 0x00000428
# | | | #
label_a: # <--' | | #
ori r3, r3, 0b001000 # | | # 0x0000042c
addiu r4, r30, 0 # | | # 0x00000430
# | | #
lui r1, %hi[label_b] # | | # 0x00000434
ori r1, r0, %lo[label_b] # | | # 0x00000438
jalr r29, r1 # j ---' | # 0x0000043c
# | #
ori r3, r3, 0b010000 # | # 0x00000440
# | #
label_c: # <------' #
ori r3, r3, 0b100000 # # 0x00000444
addiu r6, r28, 0 # # 0x00000448
# Only the second bit should be set if jump was taken
mtc0 r3, proc2mngr > 0b101010
# Check the link addresses
mtc0 r4, proc2mngr > 0x00000410
mtc0 r5, proc2mngr > 0x00000440
mtc0 r6, proc2mngr > 0x00000428
"""
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_link_byp_test ),
asm_test( gen_src_byp_test ),
asm_test( gen_jump_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
|
import queue
import threading
import time
class OrangeBall_Handler:
def __init__(self, queue_size):
#using a queue to store unfiltered data
self.ball_queue = queue.Queue(queue_size)
self.ball_data = queue.Queue(queue_size)
def put(self, message):
if(self.ball_queue.full()):
self.ball_queue.get(2)
self.ball_queue.put(message, 2)
else:
self.ball_queue.put(message, 2)
def get(self):
if(self.ball_queue.empty()):
pass
else:
return self.ball_queue.get(2)
def loop(self):
while True:
if(self.ball_queue.empty()):
pass
else:
self.ball_queue.get(2) #to be determined
time.sleep(1)
|
from django.contrib import admin
from task_management.models import TaskList
# Register your models here.
admin.site.register(TaskList)
|
# Generated by Django 3.2.3 on 2021-05-29 21:05
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Airport',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True)),
('municipality', models.CharField(max_length=150)),
('iatacode', models.CharField(max_length=10, verbose_name='IATA Code')),
],
),
migrations.CreateModel(
name='Luggage',
fields=[
('tag_id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True)),
('time_stamp', models.TimeField(default=django.utils.timezone.now, verbose_name='Time Last Scanned')),
('origin_airport', models.CharField(max_length=150)),
('destination_airport', models.CharField(max_length=150)),
('status', models.CharField(choices=[('Checked In', 'Checked In'), ('In Transit', 'In Transit'), ('Arrived At Destination', 'Arrived At Destination'), ('Retrived', 'Retrived')], default='Checked In', max_length=30)),
('flagged', models.CharField(choices=[('N', 'N'), ('Y', 'Y')], default='N', max_length=1)),
('digital_signature', models.CharField(choices=[('Awaiting Signture', 'Awaiting Signture'), ('Missing Luggage', 'Missing Luggage'), ('Delayed', 'Delayed'), ('Approved', 'Approved'), ('Disapproved', 'Disapproved')], default='Awaiting Signture', max_length=50)),
],
),
migrations.CreateModel(
name='Blocks',
fields=[
('index', models.IntegerField(primary_key=True, serialize=False)),
('timestamp', models.FloatField()),
('prevHash', models.CharField(max_length=150)),
('nonce', models.IntegerField(default=0)),
('hash_curr', models.CharField(max_length=150)),
('transactions', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='LuggageTrackerWebApp.luggage')),
],
),
]
|
# Generated by Django 3.1.1 on 2020-09-20 15:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MovieDetails',
fields=[
('place', models.IntegerField(primary_key=True, serialize=False)),
('movie_title', models.CharField(max_length=255)),
('year', models.IntegerField()),
('star_cast', models.CharField(max_length=255)),
('rating', models.FloatField()),
('vote', models.CharField(max_length=255)),
('link', models.CharField(max_length=255)),
('images', models.CharField(blank=True, max_length=255, null=True)),
],
),
]
|
from flask import Flask, render_template, request
from werkzeug.debug import DebuggedApplication
from marvel_characters import character_info, character_images
import random
app = Flask(__name__)
app.wsgi_app = DebuggedApplication(app.wsgi_app, True)
heroes = character_images.keys()
real_name = character_images.values()
def score_counter(count):
if count != 0:
count = count + 1
else:
count = count + 0
return count
# from jinja2 import Template
# t = '''{% set correct_count=correct_count or 0 %}Count= {{ correct_count }}'''
# template = Template(t)
# print(template.render(correct_count=1))
# print(template.render())
@app.route('/')
def index():
return render_template('index.html', heroes=heroes, character_info=character_info, character_images=character_images)
@app.route('/question')
def show_question():
random_hero = random.choice(list(character_images.keys()))
heroes = character_images.keys()
return render_template('answer.html', random_hero=random_hero, character_images=character_images, heroes=heroes, character_info=character_info)
@app.route('/answer', methods=['POST'])
def show_answer():
random_hero = random.choice(list(character_images.keys()))
answer = request.form['answer']
previous_hero = request.form['random_hero']
previous_hero_real_name = character_info[previous_hero]['Real Name']
right_answers = int(request.form['correct_count'])
if right_answers > 0:
print(score_counter(right_answers))
return render_template('answer.html', random_hero=random_hero, character_images=character_images, heroes=heroes, character_info=character_info, answer=answer, previous_hero=previous_hero, real_name=real_name, previous_hero_real_name=previous_hero_real_name, right_answers=right_answers)
if __name__ == "__main__":
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
app.run(debug=True)
|
from wtforms import Form,StringField,IntegerField
from wtforms.validators import Length,Regexp,EqualTo,ValidationError,InputRequired,Email
from utils.memcached import mc
from apps.models import OrderModel
from .models import UserModel
from flask import g
class Verify_regist(Form): #前台用户注册验证
mobile=StringField(validators=[Regexp(r'^1(3|4|5|7|8)\d{9}$',message='手机号码输入错误')])
code=StringField(validators=[Regexp(r'\d{4}',message='验证码格式不正确')])
username=StringField(validators=[Length(2,16,message='用户名长度应为4到16位')])
password=StringField(validators=[Regexp(r'\w{6,16}',message='密码长度为6到16位,只能为数字字母下划线')])
repassword=StringField(validators=[EqualTo('password',message='两次密码输入不一致')])
def validate_code(self,field): #验证验证码是否正确
mobile=self.mobile.data
if mobile:
code=mc.get(mobile)
if not code or code!=field.data:
raise ValidationError(message='验证码错误')
def validate_mobile(self,field): #验证手机号码是否已经注册
mobile=field.data
user=UserModel.query.filter_by(mobile=mobile).first()
if user:
raise ValidationError(message='手机号码已注册过了')
class Verify_login(Form): #前台用户账号密码登录验证
mobile=StringField(validators=[Regexp(r'^1(3|4|5|7|8)\d{9}$',message='手机号码输入错误')])
password=StringField(validators=[Regexp(r'\w{6,16}',message='密码输入不正确')])
class Verify_code_login(Form): #前台用户验证码登录验证
mobile=StringField(validators=[Regexp(r'^1(3|4|5|7|8)\d{9}$',message='手机号码输入错误')])
code=StringField(validators=[Regexp(r'\d{4}',message='验证码格式不正确')])
def validate_code(self,field): #验证验证码是否正确
mobile=self.mobile.data
if mobile!=None:
code=mc.get(mobile)
if not code or code!=field.data:
raise ValidationError(message='验证码错误')
class Verify_resetpassword(Form): #修改密码验证
password = StringField(validators=[Regexp(r'[0-9a-zA-Z_]{6,16}', message='密码长度为6到16位,只能为数字字母下划线')])
newpassword = StringField(validators=[Regexp(r'[0-9a-zA-Z_]{6,16}', message='新密码长度为6到16位,只能为数字字母下划线')])
newpassword2 = StringField(validators=[EqualTo('newpassword',message='2次密码输入不一致')])
class Verify_GenerateOrder(Form): #生成订单验证
# 数量(number),商品单价(price), 商品编号(good_id),
number=IntegerField(validators=[InputRequired('数量不能为空')])
price=IntegerField(validators=[InputRequired(message='价格不能为空')])
good_id=IntegerField(validators=[InputRequired(message='商品ID不能为空')])
address_id=IntegerField(validators=[InputRequired(message='地址不能为空')])
class Verify_aCart(Form): #添加购物车商品id验证
goods_id=IntegerField(validators=[InputRequired(message='商品id没传参')]) #商品id
number=IntegerField(validators=[Regexp(r'\d',message='数量输入错误')])
class Verify_dCart(Form): #移除购物车商品
types=IntegerField(validators=[InputRequired(message='传参类型错误')])
good_id = IntegerField(validators=[InputRequired(message='商品id没传参')]) # 商品id
class Verify_apost(Form): #添加帖子验证
title=StringField(validators=[InputRequired(message='标题不能为空')]) #标题
content=StringField(validators=[InputRequired(message='内容不能为空')])#内容
img_code=IntegerField(validators=[Regexp(r'\d{4}',message='验证码不正确')])#验证码
def validate_img_code(self,field):
img_code=field.data
user_id=g.front_user.id
code=mc.get(user_id)
if not code and img_code!=code:
raise ValidationError(message='验证码输入不正确')
class Verify_refer_Verify(Form):
order_code=IntegerField(validators=[InputRequired(message='请输入订单编号')])
passwd=StringField(InputRequired(message='请输入密码'))
class Verify_aAddress(Form):
mobile=StringField(validators=[Regexp(r'^1(3|4|5|7|8)\d{9}$',message='手机号码输入错误')])
name=StringField(validators=[InputRequired(message='请输入收货人姓名')])
address=StringField(validators=[InputRequired(message='请输入收货地址')])
class Verify_upPersonal(Form): #更新用户信息验证
username=StringField(validators=[Length(min=2,max=50,message='用户名称长度只能在2到50位之间')])
intr=StringField(validators=[InputRequired(message='请输入您的签名')])
email=StringField(validators=[InputRequired(message='请输入邮箱'), Email(message='邮箱格式不正确')]) # 验证邮箱格式
def validate_email(self,field):
user=UserModel.query.filter_by(Email=field.data).first()
if user:
raise ValidationError(message='邮箱已存在')
|
#Sorting Contours
"""
Sorting Contours is quite useful when doing image processing
Sorting by Area can assist in Object Recognition (using contour area)
means firt the shape with largest area will be contoured and then it follows the decresing order
-Eliminate small contours that may be noise
-Extract the largest contour
Sorting by spatial position(using the contour centrouid)
-Sort characters left to right
-Process images in specific order
"""
import cv2
import numpy as np
#Function we'll use to display contour area
def get_contour_areas(contours):
all_areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
all_areas.append(area)
return all_areas
#to load an image
image = cv2.imread('C:/Users/LENOVO IDEAPAD 320/OneDrive/Desktop/Python_Projects/CV2 Learning Codes/Required Images/shapes.jpg')
original_image = image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Find Canny edges
edged = cv2.Canny(gray, 30, 200)
cv2.imshow("edged image", edged)
cv2.waitKey(0)
extra, contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print("Contour areas before sorting")
print(get_contour_areas(contours))
sorted_contours = sorted(contours, key = cv2.contourArea, reverse = True)
print("Contour area after sorting")
print(get_contour_areas(sorted_contours))
#Iterates over our contours and draw one at a time
for c in sorted_contours:
cv2.drawContours(original_image, [c], -1, (255, 0, 0), 3)
cv2.waitKey(0)
cv2.imshow("Contours by area", original_image)
#It will show the window until the user press a key
cv2.waitKey(0)
#when you have seen all the windows press ESC at the last to destroy all the windows
#the waitKey(0) means it is waiting for ESC key
#waitKey(1) == 13 means press ENTER
#This closes all windows
cv2.destroyAllWindows()
|
import prism.settings
from prism.server import Server
from prism.prism import Prism
prism.settings.init()
files_dict = prism.settings.prism.getFiles(\
prism.settings.prism.getVideos()['maluco'])
Server(5000, files_dict).start() |
# curriculum.py
#
# written by yuxq in 2018/9/15. all rights reserved.
class Info:
class_name = ""
holding_school = ""
teacher_name = ""
teacher_title = ""
population = 0
def get_full_teacher_name(self):
return self.teacher_name + " " + self.teacher_title
class Arrangement:
# 单次上课的具体参数
# 一门课可能会在一个学期内包含不同的课程教室组合
# 由一个 Arrangement 数组来描述
week_day = 0
# 星期数。约定使用 1 ~ 7 分别代表周一到周日。
start_lesson = 0
# 开始节数
end_lesson = 0
# 结束节数(怪怪的)
classroom = ''
# 授课教室
def print_me(self):
print('\t', end='')
print(self.week_day)
print('\t', end='')
print(self.start_lesson)
print('\t', end='')
print(self.end_lesson)
print('\t', end='')
print(self.start_lesson)
print('\t', end='')
print(self.end_lesson)
print('\t', end='')
print(self.classroom)
print()
class Curriculum:
def __init__(self):
self.odd_week = []
# 单周的行课安排
self.even_week = []
# 霜周的行课安排
holder_school = ''
# 开课院系
teacher_name = ''
# 教师名称
teacher_title = ''
# 教师职称
title_name = ''
# 课程名称
identifier = ''
# 课程唯一识别代码
learn_hour = 0
# 学时
credit_score = 0.0
# 学分
start_week = 0
# 起始周数
end_week = 0
# 终止周数
notes = ''
# 备注
target_grade = 0
# 目标年级
school_year = 0
# 学年
term = 0
# 学期
student_number = 0
# 上课人数
def related_rooms(self):
classrooms = []
for i in self.odd_week:
if not i.classroom in classrooms:
classrooms.append(i.classroom)
for i in self.even_week:
if not i.classroom in classrooms:
classrooms.append(i.classroom)
return classrooms
def print_me(self):
print(self.title_name)
print(self.teacher_name)
print(self.teacher_title)
print(self.holder_school)
print(self.identifier)
print(self.learn_hour)
print(self.credit_score)
for i in self.odd_week:
i.print_me()
for i in self.even_week:
i.print_me()
print(self.notes)
print(self.target_grade)
print(self.school_year)
print(self.term)
print(self.student_number)
print()
|
# DEEP BELIEF NETWORK (DBN)
'''
One problem with traditional multilayer perceptrons/artificial neural networks is that backpropagation can
often lead to “local minima”. This is when your “error surface” contains multiple grooves and you fall into
a groove that is not lowest possible groove as you perform gradient descent.
Deep belief networks solve this problem by using an extra step called pre-training. Pre-training is done
before backpropagation and can lead to an error rate not far from optimal. This puts us in the “neighborhood”
of the final solution. Then we use backpropagation to slowly reduce the error rate from there.
DBNs can be divided in two major parts. The first one are multiple layers of Restricted Boltzmann Machines (RBMs)
to pre-train our network. The second one is a feed-forward backpropagation network, that will further refine
the results from the RBM stack
'''
import math
import tensorflow as tf
import numpy as np
from PIL import Image
import utils1 # downloaded in same folder from: http://deeplearning.net/tutorial/code/utils.py
from utils1 import tile_raster_images
## implement a class to create Restricted Boltzmann Machines (RBM)s to be used in DBN
#Class that defines the behavior of the RBM
class RBM(object):
def __init__(self, input_size, output_size):
#Defining the hyperparameters
self._input_size = input_size #Size of input
self._output_size = output_size #Size of output
self.epochs = 5 #Amount of training iterations
self.learning_rate = 1.0 #The step used in gradient descent
self.batchsize = 100 #The size of how much data will be used for training per sub iteration
#Initializing weights and biases as matrices full of zeroes
self.w = np.zeros([input_size, output_size], np.float32) #Creates and initializes the weights with 0
self.hb = np.zeros([output_size], np.float32) #Creates and initializes the hidden biases with 0
self.vb = np.zeros([input_size], np.float32) #Creates and initializes the visible biases with 0
#Fits the result from the weighted visible layer plus the bias into a sigmoid curve
def prob_h_given_v(self, visible, w, hb):
#Sigmoid
return tf.nn.sigmoid(tf.matmul(visible, w) + hb)
#Fits the result from the weighted hidden layer plus the bias into a sigmoid curve
def prob_v_given_h(self, hidden, w, vb):
return tf.nn.sigmoid(tf.matmul(hidden, tf.transpose(w)) + vb)
#Generate the sample probability
def sample_prob(self, probs):
return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs))))
#Training method for the model
def train(self, X):
#Create the placeholders for our parameters
_w = tf.placeholder("float", [self._input_size, self._output_size])
_hb = tf.placeholder("float", [self._output_size])
_vb = tf.placeholder("float", [self._input_size])
prv_w = np.zeros([self._input_size, self._output_size], np.float32) #Creates and initializes the weights with 0
prv_hb = np.zeros([self._output_size], np.float32) #Creates and initializes the hidden biases with 0
prv_vb = np.zeros([self._input_size], np.float32) #Creates and initializes the visible biases with 0
cur_w = np.zeros([self._input_size, self._output_size], np.float32)
cur_hb = np.zeros([self._output_size], np.float32)
cur_vb = np.zeros([self._input_size], np.float32)
v0 = tf.placeholder("float", [None, self._input_size])
#Initialize with sample probabilities
h0 = self.sample_prob(self.prob_h_given_v(v0, _w, _hb))
v1 = self.sample_prob(self.prob_v_given_h(h0, _w, _vb))
h1 = self.prob_h_given_v(v1, _w, _hb)
#Create the Gradients
positive_grad = tf.matmul(tf.transpose(v0), h0)
negative_grad = tf.matmul(tf.transpose(v1), h1)
#Update learning rates for the layers
update_w = _w + self.learning_rate *(positive_grad - negative_grad) / tf.to_float(tf.shape(v0)[0])
update_vb = _vb + self.learning_rate * tf.reduce_mean(v0 - v1, 0)
update_hb = _hb + self.learning_rate * tf.reduce_mean(h0 - h1, 0)
#Find the error rate
err = tf.reduce_mean(tf.square(v0 - v1))
#Training loop
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#For each epoch
for epoch in range(self.epochs):
#For each step/batch
for start, end in zip(range(0, len(X), self.batchsize),range(self.batchsize,len(X), self.batchsize)):
batch = X[start:end]
#Update the rates
cur_w = sess.run(update_w, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
cur_hb = sess.run(update_hb, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
cur_vb = sess.run(update_vb, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
prv_w = cur_w
prv_hb = cur_hb
prv_vb = cur_vb
error = sess.run(err, feed_dict={v0: X, _w: cur_w, _vb: cur_vb, _hb: cur_hb})
print ('Epoch: %d' % epoch,'reconstruction error: %f' % error)
self.w = prv_w
self.hb = prv_hb
self.vb = prv_vb
#Create expected output for our DBN
def rbm_outpt(self, X):
input_X = tf.constant(X)
_w = tf.constant(self.w)
_hb = tf.constant(self.hb)
out = tf.nn.sigmoid(tf.matmul(input_X, _w) + _hb)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
return sess.run(out)
##
'''
since 'from tensorflow.examples.tutorials.mnist import input_data' is deprecated in newer versions of TF,
we had to download the old 'input_data.py', put it in the same folder as the project, import that, and use it
on the mnist dataset downloaded under MNIST_data folder
new version shows this as proper way (https://www.tensorflow.org/tutorials/quickstart/advanced) :
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
...
...
'''
import input_data
## MNIST
#The one-hot = True argument only means that, in contrast to Binary representation, the labels will be
#presented in a way that to represent a number N, the Nth bit is 1 while the other bits are 0
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
##
## create the DBN
'''
For our example, we are going to use a 3 RBMs, one with 500 hidden units, the second one with 200 and the last
one with 50. We are generating a deep hierarchical representation of the training data
'''
RBM_hidden_sizes = [500, 200 , 50 ] #create 3 layers of RBM with size 500, 200 and 50
#Since we are training, set input as training data
inpX = trX
#Create list to hold our RBMs
rbm_list = []
#Size of inputs is the number of inputs in the training set
input_size = inpX.shape[1]
#For each RBM we want to generate
for i, size in enumerate(RBM_hidden_sizes):
print ('RBM: ',i,' ',input_size,'->', size)
rbm_list.append(RBM(input_size, size))
input_size = size
print()
##
## RBM train
'''
We will now begin the pre-training step and train each of the RBMs in our stack by individiually calling
the train function, getting the current RBMs output and using it as the next RBM's input
'''
#For each RBM in our list
for rbm in rbm_list:
print ('New RBM:')
#Train a new one
rbm.train(inpX)
#Return the output layer
inpX = rbm.rbm_outpt(inpX)
print()
##
## construct the neural network
'''
Now we can convert the learned representation of input data into a supervised prediction, e.g. a linear classifier.
Specifically, we use the output of the last hidden layer of the DBN to classify digits using a shallow Neural Network
'''
class NN(object):
def __init__(self, sizes, X, Y):
#Initialize hyperparameters
self._sizes = sizes
self._X = X
self._Y = Y
self.w_list = []
self.b_list = []
self._learning_rate = 1.0
self._momentum = 0.0
self._epoches = 10
self._batchsize = 100
input_size = X.shape[1]
#initialization loop
for size in self._sizes + [Y.shape[1]]:
#Define upper limit for the uniform distribution range
max_range = 4 * math.sqrt(6. / (input_size + size))
#Initialize weights through a random uniform distribution
self.w_list.append(
np.random.uniform( -max_range, max_range, [input_size, size]).astype(np.float32))
#Initialize bias as zeroes
self.b_list.append(np.zeros([size], np.float32))
input_size = size
#load data from rbm
def load_from_rbms(self, dbn_sizes,rbm_list):
#Check if expected sizes are correct
assert len(dbn_sizes) == len(self._sizes)
for i in range(len(self._sizes)):
#Check if for each RBN the expected sizes are correct
assert dbn_sizes[i] == self._sizes[i]
#If everything is correct, bring over the weights and biases
for i in range(len(self._sizes)):
self.w_list[i] = rbm_list[i].w
self.b_list[i] = rbm_list[i].hb
#Training method
def train(self):
#Create placeholders for input, weights, biases, output
_a = [None] * (len(self._sizes) + 2)
_w = [None] * (len(self._sizes) + 1)
_b = [None] * (len(self._sizes) + 1)
_a[0] = tf.placeholder("float", [None, self._X.shape[1]])
y = tf.placeholder("float", [None, self._Y.shape[1]])
#Define variables and activation functoin
for i in range(len(self._sizes) + 1):
_w[i] = tf.Variable(self.w_list[i])
_b[i] = tf.Variable(self.b_list[i])
for i in range(1, len(self._sizes) + 2):
_a[i] = tf.nn.sigmoid(tf.matmul(_a[i - 1], _w[i - 1]) + _b[i - 1])
#Define the cost function
cost = tf.reduce_mean(tf.square(_a[-1] - y))
#Define the training operation (Momentum Optimizer minimizing the Cost function)
train_op = tf.train.MomentumOptimizer(self._learning_rate, self._momentum).minimize(cost)
#Prediction operation
predict_op = tf.argmax(_a[-1], 1)
#Training Loop
with tf.Session() as sess:
#Initialize Variables
sess.run(tf.global_variables_initializer())
#For each epoch
for i in range(self._epoches):
#For each step
for start, end in zip(range(0, len(self._X), self._batchsize), range(self._batchsize, len(self._X), self._batchsize)):
#Run the training operation on the input data
sess.run(train_op, feed_dict={ _a[0]: self._X[start:end], y: self._Y[start:end]})
for j in range(len(self._sizes) + 1):
#Retrieve weights and biases
self.w_list[j] = sess.run(_w[j])
self.b_list[j] = sess.run(_b[j])
print ("Accuracy rating for epoch " + str(i) + ": " + str(np.mean(np.argmax(self._Y, axis=1) == sess.run(predict_op, feed_dict={_a[0]: self._X, y: self._Y}))))
print()
##
## execute the code
nNet = NN(RBM_hidden_sizes, trX, trY)
nNet.load_from_rbms(RBM_hidden_sizes,rbm_list)
nNet.train()
##
|
"""
Implement Binary Search: given a sorted array and a target, return index of the target in the array or None is the target is not in the array.
Compare the target to the middle element of the array.
If target equals the middle element, return the index of the middle element.
If the target is less than the middle element, then restrict to the lower half of the array
If the target is greater than the middle element, restrict to the upper half.
Repeat on the restricted portion.
If restricted array becomes empty, fail.
"""
def binary_search(array, target):
left = 0
right = len(array)-1
while left<=right:
mid = (left+right)//2
if target == array[mid]:
return mid
elif target<array[mid]:
right = mid-1
else:
left = mid+1
return None
a = [8, 13, 14, 26, 27, 28, 39, 50, 60, 61, 69, 73, 88, 93, 99]
print(binary_search(a, 8)) # 0
print(binary_search(a, 39)) # 6
print(binary_search(a, 99)) #14
print(binary_search(a, 10)) #None |
from django.test import TestCase
from .models import Meeting, Resource, Meetingminutes, Event
from .views import getResource, getMeeting
from django.urls import reverse
from django.contrib.auth.models import User
# Create your tests here.
class MeetingTest(TestCase):
def test_string(self):
type=Meeting(meetingtitle="environment")
self.assertEqual(str(type), type.meetingtitle)
def test_table(self):
self.assertEqual(str(Meeting._meta.db_table), 'Meeting')
class EventTest(TestCase):
def test_string(self):
type=Event(eventtitle="official")
self.assertEqual(str(type), type.eventtitle)
def test_table(self):
self.assertEqual(str(Event._meta.db_table), 'Event')
class MeetingminuteTest(TestCase):
def test_string(self):
type=Meetingminutes(minutestext="official")
self.assertEqual(str(type), type.minutestext)
def test_table(self):
self.assertEqual(str(Meetingminutes._meta.db_table), 'Meetingminutes')
class ResourceTest(TestCase):
def test_string(self):
type=Resource(resourcename="official")
self.assertEqual(str(type), type.resourcename)
def test_table(self):
self.assertEqual(str(Resource._meta.db_table), 'Resource')
class GetResourceTest(TestCase):
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('getresource'))
self.assertEqual(response.status_code, 200)
class GetMeetingTest(TestCase):
def setUp(self):
self.meet = Meeting.objects.create(meetingtitle='environment',
meetingtime=100,
location='Seattle',
meetingdate='2019-04-02',
agenda='ASD')
def test_view_url_accessible_by_name(self):
response = self.client.get(reverse('getmeeting'))
self.assertEqual(response.status_code, 200)
def test_meeting_detail_success(self):
response = self.client.get(reverse('meetingdetails', args=(self.meet.id,)))
# Assert that self.post is actually returned by the post_detail view
self.assertEqual(response.status_code, 200)
|
import datetime
from sqlalchemy import func
from model import User, Meal, mealMedia, mealType, connect_to_db, db
from model import app
def load_users(user_filename):
for i, row in enumerate(open(user_filename)):
row = row.rstrip()
user_id, first_name, last_name, email, password = row.split("|")
user = User(user_id=user_id,
first_name=first_name,
last_name=last_name,
email=email,
password=password)
user_id = int(user_id)
db.session.add(user)
db.session.commit()
def load_meals(meal_filename):
for i, row in enumerate(open(user_filename)):
row = row.rstrip()
meal_id, user_id, name, description, start_time, end_time, location,
longitude, latitude, media_type_id= row.split("|")
meal = Meal(meal_id=meal_id,
user_id=user_id,
name=name,
description=description,
start_time=start_time,
end_time=end_time,
location=location,
longitude=longitude,
latitude=latitude,
meal_type_id=meal_type_id)
meal_id = int(meal_id)
user_id = int(user_id)
longitude = float(longitude)
latitude = float(latitude)
db.session.add(user)
db.session.commit()
def load_meal_media(meal_media_filename):
for i, row in enumerate(open(user_filename)):
row = row.rstrip()
meal_media_id, meal_id, media = row.split("|")
meal_media = mealMedia( meal_media_id=meal_media_id,
meal_id=meal_id,
media=media)
meal_media_id = int(meal_media_id)
meal_id = int(meal_id)
db.session.add(user)
db.session.commit()
def load_meal_type(meal_type_filename):
for i, row in enumerate(open(user_filename)):
row = row.rstrip()
meal_type_id, title = row.split("|")
meal_type = mealType(meal_type_id=meal_type_id, title=title)
meal_type_id = int(meal_type_id)
db.session.add(meal_type)
db.session.commit()
def set_val_user_id():
"""Set value for the next user_id after seeding database"""
# Get the Max user_id in the database
result = db.session.query(func.max(User.user_id)).one()
max_id = int(result[0])
# Set the value for the next user_id to be max_id + 1
query = "SELECT setval('users_user_id_seq', :new_id)"
db.session.execute(query, {'new_id': max_id + 1})
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
db.create_all()
user_filename = "seed_data/user_data"
load_users(user_filename)
set_val_user_id()
load_meals(meal_filename)
load_meal_media(meal_media_filename)
load_meal_type(meal_type_filename)
|
from fastapi import Depends, FastAPI
from fastapi_etag import add_exception_handler
from sqlalchemy.orm import Session
from . import models, schemas, deps
from .database import engine
from .routers import orders
models.Base.metadata.create_all(bind=engine)
app = FastAPI(
title="Restbucks",
description="An API for taking orders in a coffee shop"
)
add_exception_handler(app)
app.include_router(orders.router)
# Obviously, don't do this in real life
@app.post("/seed")
def seed_database(db: Session = Depends(deps.get_db)):
db_milks = [models.Milk(name=milk.value) for milk in schemas.MilkEnum]
db_sizes = [models.Size(name=size.value) for size in schemas.SizeEnum]
db_shots = [models.EspressoShot(name=shot.value)
for shot in schemas.EspressoShotEnum]
db_locations = [models.ConsumeLocation(
name=location.value) for location in schemas.ConsumeLocationEnum]
db_statuses = [models.OrderStatus(name=status.value)
for status in schemas.OrderStatusEnum]
db_products = [models.Product(name=product.value)
for product in schemas.ProductEnum]
db.add_all(db_milks)
db.add_all(db_sizes)
db.add_all(db_shots)
db.add_all(db_locations)
db.add_all(db_statuses)
db.add_all(db_products)
db.commit()
return True
|
import networkx as nx
from networkx.drawing.nx_pydot import read_dot
G = nx.fast_gnp_random_graph(20,2/20.,directed=True)
print(G)
print(list(G.nodes()))
#nx.relabel_nodes(G,{u:int(u)-1 for u in G.nodes()},copy=False)
print(list(G.nodes()))
from networkx.readwrite import json_graph
import simplejson as json
data1 = nx.node_link_data(G)
with open('data.json','w') as f:
json.dump(data1,f,iterable_as_array=True)
|
import signal
import torch
from factory import create_scheduler, create_callbacks, create_model, create_loss, create_optimizer, \
create_train_dataloader, create_val_dataloader, create_device, create_metrics
from callbacks import Callback, StopAtStep
import logging
from collections import OrderedDict
from itertools import chain
from utils.utils import set_determenistic, flatten_dict, loss_to_dict
from accelerate import Accelerator, GradScalerKwargs
import copy
logger = logging.getLogger(__name__)
class State:
def __init__(self):
self.step = 0
self.last_train_loss = None
self.validation_metrics = dict()
def get(self, attribute_name: str):
return getattr(self, attribute_name)
def load_state_dict(self, state_dict):
for k, v in state_dict.items():
setattr(self, k, v)
def state_dict(self):
return self.__dict__
def add_attribute(self, name, value):
if not hasattr(self, name):
setattr(self, name, value)
def add_validation_metric(self, name, value):
self.validation_metrics[name] = value
def reset(self):
self.step = 0
self.last_train_loss = None
def update(self, loss_dict=None):
self.step += 1
if loss_dict is not None:
self.last_train_loss = flatten_dict(loss_dict)
def log_train(self):
msg = f'Step - {self.step} '
for name, value in self.last_train_loss.items():
msg += f'{name} - {value:.7f} '
logger.info(msg)
def log_validation(self):
msg = f'Validation '
for name, value in self.validation_metrics.items():
msg += f'{name} - {value:.7f} '
logger.info(msg)
class Trainer:
def __init__(self, cfg):
signal.signal(signal.SIGINT, self._soft_exit)
set_determenistic()
self.train_dataloader_dict = create_train_dataloader(cfg)
self.val_dataloader_dict = create_val_dataloader(cfg)
self.state = State()
self.criterion = create_loss(cfg)
self.model = create_model(cfg)
self.optimizer = create_optimizer(cfg, self.model)
self.scheduler = create_scheduler(cfg, self.optimizer)
self.n_steps = cfg.n_steps
self.accumulation_steps = cfg.desired_bs // cfg.bs
self.stop_condition = StopAtStep(last_step=self.n_steps)
self.callbacks = OrderedDict()
self.metrics = create_metrics(cfg)
create_callbacks(cfg, self)
self._check_frequencies()
self.cfg = cfg
self.stop_validation = False
self.grad_scaler_kwargs = GradScalerKwargs(init_scale=2048, enabled=cfg.amp)
self.accelerator = Accelerator(cpu=bool(cfg.device == 'cpu'), fp16=cfg.amp)
self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer)
self.train_dataloader_dict, self.val_dataloader_dict = \
self.prepare_dataloader_dict(self.train_dataloader_dict, self.val_dataloader_dict)
def get_train_batch(self):
if not getattr(self, 'train_data_iter', False):
self.train_data_iter = chain.from_iterable(
iter(train_dataloader['dataloader']) for _, train_dataloader in self.train_dataloader_dict.items()
)
try:
batch = next(self.train_data_iter)
except StopIteration:
self.train_data_iter = chain.from_iterable(
iter(train_dataloader['dataloader']) for _, train_dataloader in self.train_dataloader_dict.items()
)
batch = next(self.train_data_iter)
return batch
def run_step(self, batch):
if self.accumulation_steps == 1 or (self.state.step + 1) % self.accumulation_steps == 1:
self.optimizer.zero_grad()
inputs = batch['input']
targets = batch['target']
outputs = self.model(inputs)
loss_dict = self.criterion(outputs, targets)
loss_dict = loss_to_dict(loss_dict)
loss = loss_dict['loss'] / self.accumulation_steps
self.accelerator.backward(loss)
if (self.state.step + 1) % self.accumulation_steps == 0:
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
loss_dict['loss'] = loss_dict['loss'].detach()
return loss_dict
def run_train(self, n_steps=None):
if n_steps is not None:
self.stop_condition = StopAtStep(last_step=n_steps)
self.state.reset()
self.model.train()
self._before_run_callbacks()
while not self.stop_condition(self.state):
batch = self.get_train_batch()
loss = self.run_step(batch)
self.state.update(loss)
self._run_callbacks()
self._after_run_callbacks()
logger.info('Done')
def evaluate(self, dataloader=None, metrics=None):
if dataloader is None:
dataloader = chain.from_iterable(
iter(dataloader['dataloader']) for _, dataloader in self.val_dataloader_dict.items()
)
if metrics is None:
metrics = self.metrics
previous_training_flag = self.model.training
self.model.eval()
for metric in metrics:
metric.reset()
with torch.no_grad():
for batch in dataloader:
if self.stop_validation:
break
input_tensor = batch['input']
target_tensor = batch['target']
outputs = self.model(input_tensor)
for metric in metrics:
metric.step(y=target_tensor, y_pred=outputs)
metrics_computed = {metric.name: metric.compute() for metric in metrics}
self.model.train(previous_training_flag)
return flatten_dict(metrics_computed)
def prepare_dataloader_dict(self, *args):
result = list()
for dataloader_dict in args:
d = copy.deepcopy(dataloader_dict)
for name in d:
d[name]['dataloader'] = self.accelerator.prepare(d[name]['dataloader'])
result.append(d)
return tuple(result)
def _check_frequencies(self):
if 'ValidationCallback' in self.callbacks:
assert self.callbacks['ValidationCallback'].frequency % self.accumulation_steps == 0, \
'accumulation_steps must be divisor of ValidationCallback frequency'
if 'TensorBoardCallback' in self.callbacks:
assert self.callbacks['TensorBoardCallback'].frequency % self.accumulation_steps == 0, \
'accumulation_steps must be divisor of TensorboardCallback frequency'
def register_callback(self, callback: Callback):
callback.set_trainer(self)
callback_name = callback.__class__.__name__
self.callbacks[callback_name] = callback
def _soft_exit(self, sig, frame):
logger.info('Soft exit... Currently running steps will be finished')
self.stop_condition = lambda state: True
self.stop_validation = True
def _before_run_callbacks(self):
for name, callback in self.callbacks.items():
callback.before_run(self)
def _after_run_callbacks(self):
for name, callback in self.callbacks.items():
callback.after_run(self)
def _run_callbacks(self):
for name, callback in self.callbacks.items():
freq = callback.frequency
if freq != 0 and self.state.step % freq == 0:
callback(self)
|
from __future__ import print_function
import os
import tensorflow as tf
import gym
from alg.PeterKovacs.ddpg import DDPG
import numpy as np
BASE_PATH = '../out/tests/'
RANDOM_SEED = 2016
def launch(proc, env_name, episodes=125000, steps=None, save_every_episodes=100, reuse_weights=False):
def func_name():
import traceback
return traceback.extract_stack(None, 4)[0][2]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
env = gym.make(env_name)
if steps is None:
steps = env.spec.timestep_limit
np.random.seed(RANDOM_SEED)
tf.set_random_seed(RANDOM_SEED)
env.seed(RANDOM_SEED)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
obs_box = [env.observation_space.low, env.observation_space.high]
act_box = [env.action_space.low, env.action_space.high]
path = os.path.join(BASE_PATH, func_name())
if not reuse_weights and os.path.exists(path):
import shutil
shutil.rmtree(path)
if not os.path.exists(path):
os.makedirs(path)
agent = DDPG(sess, env.spec.id, obs_dim, obs_box, act_dim, act_box, path)
if proc == 'train':
agent.train(env, episodes, steps, save_every_episodes)
elif proc == 'run':
agent.run(env, episodes, steps)
env.close()
def run(env_name, episodes=1000, steps=None):
launch("run", env_name, episodes, steps, save_every_episodes=0, reuse_weights=True)
def train(env_name, episodes=125000, steps=None, save_every_episodes=100, reuse_weights=False):
launch("train", env_name, episodes, steps, save_every_episodes, reuse_weights)
# =================================================================================================================
# PASSED
def Pendulum():
env = 'Pendulum-v0'
train(env, episodes=300, steps=100, save_every_episodes=50)
# run(env, steps=100)
# PASSED
def InvertedDoublePendulum():
env = 'InvertedDoublePendulum-v1'
# train(env)
run(env)
def Tentacle():
env = 'Tentacle-v0'
train(env)
# run(env)
# FAIL
def Reacher():
env = 'Reacher-v1'
train(env)
# run(env)
def BipedalWalker():
env = 'BipedalWalker-v2'
train(env)
# run(env)
def BipedalWalker():
env = 'BipedalWalker-v2'
train(env)
# run(env)
def Swimmer():
env = 'Swimmer-v1'
train(env)
# run(env)
def Ant():
env = 'Ant-v1'
train(env, reuse_weights=True)
# run(env)
def HumanoidStandup():
env = 'HumanoidStandup-v1'
train(env)
# run(env)
# LunarLanderContinuous-v2
if __name__ == '__main__':
Pendulum()
# InvertedDoublePendulum()
# Tentacle()
# Swimmer()
# BipedalWalker()
# Reacher()
# Ant()
# HumanoidStandup()
|
GOOGLE_MAPS_KEY = 'AIzaSyB4tmlZXpBLzNF2x9Am6RjL5jOsIUwujd8'
|
from collections import namedtuple
import numpy as np
from colosseum.games import GameTracker
Move = namedtuple('Move', ['player', 'horizontal', 'row', 'col'])
class ImmutableArray:
"""
A wrapper for a numpy array that exposes acceesses but not modifications.
There will be ways to circumvent this through __getitem__ if the array is
multidimensional. However, you can always just modify self._a since python
has no formal access control. The purpose of this class is simply to
*discourage* modifications.
"""
def __init__(self, a:np.ndarray):
self._a = a
def __getitem__(self, i):
return self._a[i]
def __iter__(self):
return iter(self._a)
def __len__(self):
return len(self._a)
def __str__(self):
return str(self._a)
@property
def shape(self):
return self._a.shape
class DnBTracker(GameTracker):
def __init__(self, playerid:int=-1, n:int=5, **kwargs):
super().__init__(n_players=2)
"""
params:
n:int=5 - Side length of the board
playerid:int=-1 - The id of the player that created this tracker.
If the tracker was created by the host, the id is -1 (default).
"""
assert n>1, f'n must be greater than 1 (given {n})!'
self._n = n
self._hlines = np.zeros(shape=(self._n, self._n-1), dtype=np.int32)
self._vlines = np.zeros(shape=(self._n-1, self._n), dtype=np.int32)
self._boxes = -np.ones(shape=(self._n-1, self._n-1), dtype=np.int32)
self._moves = 0
self._playerid = playerid
self._latest_move = None
self._turn = 0
# Getters
@property
def n(self)->int:
return self._n
@property
def playerid(self)->int:
return self._playerid
@property
def latest_move(self)->Move:
return self._latest_move
@property
def moves(self)->int:
return self._moves
@property
def whose_turn(self)->int:
return self._turn
@property
def is_done(self)->bool:
return self._moves >= 2*(self._n-1)*(self._n-2)
@property
def hlines(self)->ImmutableArray:
"""
Array containing the state of the horizontal lines (1 if filled in, 0
otherwise)
"""
return ImmutableArray(self._hlines)
@property
def vlines(self)->ImmutableArray:
"""
Array containing the state of the vertical lines (1 if filled in, 0
otherwise)
"""
return ImmutableArray(self._vlines)
@property
def boxes(self)->ImmutableArray:
"""
Array containing the state of the boxes --- captured or not. Each box
is -1 if it is uncaptured and if the box is captured, it will contain
the id of the player that captured it
"""
return ImmutableArray(self._boxes)
# Behavior
def edges_left(self, row:int, col:int)->int:
"""
Returns the number of edges left to capture (row, col).
"""
if not (0 <= row <= self._n-1 and 0 <= col <= self._n-1):
return -1
return 4 - (self._hlines[row][col] + self._hlines[row+1][col]
+ self._vlines[row][col] + self._vlines[row][col+1])
def check_move(self, horizontal:bool, row:int, col:int)->bool:
"""
Returns True if the move is valid
"""
# Check bounds
if horizontal:
if not (0 <= row < self._n-1) or not (0 <= col < self._n):
return False
else:
if not (0 <= row < self._n) or not (0 <= col < self._n-1):
return False
a = self._hlines if horizontal else self._vlines
return 1-a[row][col]
def make_move(self, horizontal:bool, row:int, col:int):
"""
params:
horizontal:bool - True if the move is a horizontal line
row:int, col:int - The position of the move
"""
assert self.check_move(horizontal, row, col), \
f'The move {"h" if horizontal else "v"}{row},{col} is invalid!'
return {'horizontal': horizontal, 'row':row, 'col':col}
def update(self, player:int, horizontal:bool, row:int, col:int):
"""
params:
player:int - The id of the player that made the move
horizontal:bool - True if the move was horizontal
row:int, col:int - The position of the move
"""
assert self.check_move(horizontal, row, col), \
f'The move {"h" if horizontal else "v"}{row} {col} is invalid!'
a = self._hlines if horizontal else self._vlines
a[row][col] = 1
pts_gained = 0
if horizontal:
otherrow = row-1
othercol = col
else:
otherrow = row
othercol = col-1
if self.edges_left(row, col) == 0:
pts_gained += 1
self._boxes[row, col] = player
if self.edges_left(otherrow, othercol) == 0:
pts_gained += 1
self._boxes[otherrow, othercol] = player
self.points[self._turn] += pts_gained
if not pts_gained:
self._turn = (self._turn + 1)%self._n_players
self._moves += 1
self._latest_move = Move(player, horizontal, row, col)
def render(self)->str:
"""
Renders a string representation of the board for printing
"""
# Start with the board
hline = '---'
vline = ' | '
dot = ' • '
blank = ' '
def v2l(val, line):
return line if val == 1 else blank
lines1 = []
lines2 = []
# Prepare all even rows with horizontal lines
for line in self._hlines:
lines1.append(dot + dot.join(v2l(c, hline) for c in line) + dot)
# Prepare all odd rows with the vertical lines
for line, boxes in zip(self._vlines, self._boxes):
s = ''
for col, box in zip(line, boxes):
s += v2l(col, vline) + (blank if box < 0 else f' {box} ')
lines2.append(s)
lines = [lines1[0],]
for l1, l2 in zip(lines1[1:], lines2):
lines.append(l2)
lines.append(l1)
board = '\n'.join(lines)
# Now for the status
status = f'| Score: {self.points[0]}-{self.points[1]} Player ' \
f'{self._turn}\'s turn |'
bars = f'+{"-"*(len(status)-2)}+'
return '\n'.join([board, bars, status, bars]) |
# WRITTEN BY MILO HARTSOE (SOME CODE USED FROM STACKOVERFLOW)
from __future__ import print_function
import string
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import numpy as np
import matplotlib.image as mpimg
file_prefix = 'letter_data_'
width = 6
height = 8
def char_to_pixels(text, path='arialbd.ttf', fontsize=14):
"""
Based on https://stackoverflow.com/a/27753869/190597 (jsheperd)
"""
font = ImageFont.truetype(path, fontsize)
w, h = font.getsize(text)
h *= 2
image = Image.new('L', (w, h), 1)
draw = ImageDraw.Draw(image)
draw.text((0, 0), text, font=font)
arr = np.asarray(image)
arr = np.where(arr, 0, 1)
arr = arr[(arr != 0).any(axis=1)]
return arr
def display(arr):
result = np.where(arr, '#', ' ')
print('\n'.join([''.join(row) for row in result]))
def char_to_pixels_from_image(text):
try:
img = Image.open(str(ord(text)) + '.png')
arr = np.zeros((height, width))
for i in range(img.size[0]):
for j in range(img.size[1]):
arr[j,i] = (255 / 2) < img.getpixel((i,j))[3]
return arr
except:
print()
return [[]]
print(file_prefix + 'lookup_letter', end='')
for letter in range(128):
print((' ' * 37) + file_prefix + str(letter))
print()
print()
for letter in range(128):
# arr = char_to_pixels(
# c,
# path='/usr/share/fonts/truetype/liberation/LiberationSerif-Bold.ttf',
# fontsize=9)
# padded_arr = np.zeros((width, height))
# padded_arr[:arr.shape[0],:arr.shape[1]] = arr
# arr = padded_arr
# print(arr)
c = chr(letter)
print(file_prefix + str(letter), end='')
arr = char_to_pixels_from_image(c)
for row in arr:
for i in row:
print((' ' * 37) + str(int(i)))
|
#-*- coding:utf-8 -*-
import datetime
import time
import json
from celery import Task
from celery.task import task
from celery.task.sets import subtask
from django.conf import settings
from django.db.models import Sum,Max
from django.db import transaction
from django.db.models.query import QuerySet
from shopback import paramconfig as pcfg
from shopback.items.models import Item,Product,ProductSku,SkuProperty,\
ItemNumTaskLog,ProductDaySale
from shopback.fenxiao.models import FenxiaoProduct
from shopback.orders.models import Order, Trade
from shopback.trades.models import MergeOrder, MergeTrade, Refund
from shopback.users import Seller
from shopback.fenxiao.tasks import saveUserFenxiaoProductTask
from shopback import paramconfig as pcfg
from auth import apis
from common.utils import format_datetime,parse_datetime,get_yesterday_interval_time
import logging
logger = logging.getLogger('django.request')
PURCHASE_STOCK_PERCENT = 0.5
@task()
def updateUserItemsTask(user_id):
""" 更新淘宝线上商品信息入库 """
has_next = True
cur_page = 1
onsale_item_ids = []
#更新出售中的商品
try:
while has_next:
response_list = apis.taobao_items_onsale_get(page_no=cur_page, tb_user_id=user_id
, page_size=settings.TAOBAO_PAGE_SIZE, fields='num_iid,modified')
item_list = response_list['items_onsale_get_response']
if item_list['total_results'] > 0:
items = item_list['items']['item']
for item in items:
modified = parse_datetime(item['modified']) if item.get('modified', None) else None
item_obj, state = Item.objects.get_or_create(num_iid=item['num_iid'])
if modified != item_obj.modified:
response = apis.taobao_item_get(num_iid=item['num_iid'], tb_user_id=user_id)
item_dict = response['item_get_response']['item']
Item.save_item_through_dict(user_id, item_dict)
onsale_item_ids.append(item['num_iid'])
total_nums = item_list['total_results']
cur_nums = cur_page * settings.TAOBAO_PAGE_SIZE
has_next = cur_nums < total_nums
cur_page += 1
#更新库存中的商品
has_next = True
cur_page = 1
while has_next:
response_list = apis.taobao_items_inventory_get(page_no=cur_page, tb_user_id=user_id
, page_size=settings.TAOBAO_PAGE_SIZE, fields='num_iid,modified')
item_list = response_list['items_inventory_get_response']
if item_list['total_results'] > 0:
items = item_list['items']['item']
for item in item_list['items']['item']:
modified = parse_datetime(item['modified']) if item.get('modified', None) else None
item_obj, state = Item.objects.get_or_create(num_iid=item['num_iid'])
if modified != item_obj.modified:
response = apis.taobao_item_get(num_iid=item['num_iid'], tb_user_id=user_id)
item_dict = response['item_get_response']['item']
Item.save_item_through_dict(user_id, item_dict)
onsale_item_ids.append(item['num_iid'])
total_nums = item_list['total_results']
cur_nums = cur_page * settings.TAOBAO_PAGE_SIZE
has_next = cur_nums < total_nums
cur_page += 1
except:
logger.error('update user inventory items task error', exc_info=True)
else:
Item.objects.filter(user__visitor_id=user_id).exclude(num_iid__in=onsale_item_ids)\
.update(approve_status=pcfg.INSTOCK_STATUS,status=False)
return len(onsale_item_ids)
@task()
def updateAllUserItemsTask():
""" 更新所有用户商品信息任务 """
users = Seller.effect_users.all()
for user in users:
subtask(updateUserItemsTask).delay(user.visitor_id)
@task()
def updateUserProductSkuTask(user_id=None,outer_ids=None,force_update_num=False):
""" 更新用户商品SKU规格信息任务 """
user = Seller.getSellerByVisitorId(user_id)
items = user.items.filter(status=pcfg.NORMAL)
if outer_ids:
items = items.filter(outer_id__in=outer_ids)
num_iids = []
prop_dict = {}
for index, item in enumerate(items):
num_iids.append(item.num_iid)
prop_dict[int(item.num_iid)] = item.property_alias_dict
if len(num_iids) >= 40 or index + 1 == len(items):
sku_dict = {}
try:
num_iids_str = ','.join(num_iids)
response = apis.taobao_item_skus_get(num_iids=num_iids_str, tb_user_id=user_id)
if response['item_skus_get_response'].has_key('skus'):
skus = response['item_skus_get_response']['skus']
for sku in skus.get('sku'):
if sku_dict.has_key(sku['num_iid']):
sku_dict[sku['num_iid']].append(sku)
else:
sku_dict[sku['num_iid']] = [sku]
item = Item.objects.get(num_iid=sku['num_iid'])
sku_property = SkuProperty.save_or_update(sku.copy())
sku_outer_id = sku.get('outer_id', '').strip()
if (not item.user.is_primary or not item.product
or item.approve_status != pcfg.ONSALE_STATUS or
not sku_outer_id or sku['status'] != pcfg.NORMAL):
continue
sku_prop_dict = dict([('%s:%s' % (p.split(':')[0], p.split(':')[1]),
p.split(':')[3])
for p in sku['properties_name'].split(';') if p])
pskus = ProductSku.objects.filter(outer_id=sku_outer_id,
product=item.product)
if pskus.count() <= 0:
continue
psku = pskus[0]
psku.properties_name = psku.properties_name or sku['properties_name']
if force_update_num:
wait_post_num = psku.wait_post_num >= 0 and psku.wait_post_num or 0
psku.quantity = sku['quantity'] + wait_post_num
#psku.std_sale_price = float(sku['price'])
properties = ''
props = sku['properties'].split(';')
for prop in props:
if prop :
properties += (prop_dict[sku['num_iid']].get(prop, '')
or sku_prop_dict.get(prop,''))
psku.properties_name = properties
# psku.status = pcfg.NORMAL
psku.save()
except Exception, exc:
logger.error('update product sku error!', exc_info=True)
finally:
for num_iid, sku_list in sku_dict.items():
item = Item.objects.get(num_iid=num_iid)
item.skus = sku_list and json.dumps({'sku':sku_list}) or item.skus
item.save()
sku_ids = [sku['sku_id'] for sku in sku_list if sku]
if sku_ids:
SkuProperty.objects.filter(num_iid=num_iid)\
.exclude(sku_id__in=sku_ids).update(status=pcfg.DELETE)
num_iids = []
prop_dict = {}
@task()
def updateProductWaitPostNumTask():
""" 更新商品待发数任务 """
products = Product.objects.filter(status=pcfg.NORMAL)
for product in products:
Product.objects.updateProductWaitPostNum(product)
class CalcProductSaleTask(Task):
""" 更新商品销售数量任务 """
def getYesterdayDate(self):
dt = datetime.datetime.now() - datetime.timedelta(days=1)
return dt.date()
def getYesterdayStarttime(self,day_date):
return datetime.datetime(day_date.year,day_date.month,day_date.day,0,0,0)
def getYesterdayEndtime(self,day_date):
return datetime.datetime(day_date.year,day_date.month,day_date.day,23,59,59)
def getSourceList(self,yest_start,yest_end):
return set(MergeOrder.objects.filter(
pay_time__gte=yest_start,
pay_time__lte=yest_end)\
.values_list('outer_id','outer_sku_id'))
def getValidUser(self):
return Seller.effect_users.all()
def genPaymentQueryset(self,yest_start,yest_end):
return MergeOrder.objects.filter(
pay_time__gte=yest_start,
pay_time__lte=yest_end,
is_merge=False)\
.exclude(gift_type=pcfg.RETURN_GOODS_GIT_TYPE)\
.exclude(merge_trade__sys_status=pcfg.EMPTY_STATUS)\
.exclude(merge_trade__type=pcfg.EXCHANGE_TYPE,sys_status=pcfg.INVALID_STATUS)
def genRealQueryset(self,yest_start,yest_end):
return MergeOrder.objects.filter(
sys_status=pcfg.IN_EFFECT,
pay_time__gte=yest_start,
pay_time__lte=yest_end,
merge_trade__status__in=pcfg.ORDER_SUCCESS_STATUS)\
.exclude(gift_type=pcfg.RETURN_GOODS_GIT_TYPE)\
.exclude(merge_trade__sys_status__in=(pcfg.INVALID_STATUS,pcfg.ON_THE_FLY_STATUS))\
.exclude(merge_trade__sys_status=pcfg.FINISHED_STATUS,
merge_trade__is_express_print=False)
def sumQueryset(self,queryset,user,product,sku):
return queryset.filter(merge_trade__user=user,
outer_id=product.outer_id,
outer_sku_id=sku and sku.outer_id or '')
def getTotalRefundFee(self,order_qs):
effect_oids = [o[0] for o in order_qs.values_list('oid') if len(o[0]) > 6 ]
refunds = Refund.objects.filter(oid__in=effect_oids,status__in=(
pcfg.REFUND_WAIT_SELLER_AGREE,pcfg.REFUND_CONFIRM_GOODS,pcfg.REFUND_SUCCESS))
return refunds.aggregate(total_refund_fee=Sum('refund_fee')).get('total_refund_fee') or 0
def calcSaleByUserAndProduct(self,yest_start,yest_end,user,product,sku):
yest_date = yest_start.date()
queryset = self.genPaymentQueryset(yest_start, yest_end)
real_queryset = self.genRealQueryset(yest_start, yest_end)
sale_queryset = self.sumQueryset(queryset, user, product, sku)
sale_dict = sale_queryset.aggregate(sale_num=Sum('num'),sale_payment=Sum('payment'))
real_sale_queryset = self.sumQueryset(real_queryset, user, product, sku)
real_sale_dict = real_sale_queryset.aggregate(sale_num=Sum('num'),sale_payment=Sum('payment'))
refund_fee = self.getTotalRefundFee(real_sale_queryset)
if sale_dict['sale_num']:
pds,state = ProductDaySale.objects.get_or_create(
day_date=yest_date,
user_id=user.id,
product_id=product.id,
sku_id=sku and sku.id)
pds.sale_num = sale_dict['sale_num'] or 0
pds.sale_payment = sale_dict['sale_payment'] or 0
pds.sale_refund = sale_dict['sale_payment'] - (real_sale_dict['sale_payment'] or 0) + refund_fee
pds.confirm_num = real_sale_dict['sale_num'] or 0
pds.confirm_payment = (real_sale_dict['sale_payment'] or 0) - refund_fee
pds.save()
return sale_dict['sale_num'] or 0,sale_dict['sale_payment'] or 0
def run(self,yest_date=None,update_warn_num=False,*args,**kwargs):
yest_date = yest_date or self.getYesterdayDate()
yest_start = self.getYesterdayStarttime(yest_date)
yest_end = self.getYesterdayEndtime(yest_date)
sellers = self.getValidUser()
outer_tuple = self.getSourceList(yest_start,yest_end)
for outer_id,outer_sku_id in outer_tuple:
prod = Product.objects.getProductByOuterid(outer_id)
prod_sku = Product.objects.getProductSkuByOuterid(outer_id, outer_sku_id)
if prod_sku:
total_sale = 0
for user in sellers:
pds = self.calcSaleByUserAndProduct(yest_start,yest_end,user,prod,prod_sku)
total_sale += pds[0]
if update_warn_num:
prod_sku.warn_num = total_sale
prod_sku.save()
if not prod_sku and prod and prod.prod_skus.count() == 0:
total_sale = 0
for user in sellers:
pds = self.calcSaleByUserAndProduct(yest_start,yest_end,user,prod,None)
total_sale += pds[0]
if update_warn_num:
prod.warn_num = total_sale
prod.save()
if update_warn_num:
products = Product.objects.all()
for p in products:
for sku in p.prod_skus.all():
if (prod.outer_id,sku.outer_id) not in outer_tuple:
sku.warn_num = 0
sku.save()
if p.prod_skus.count() == 0 and (p.outer_id,"") not in outer_tuple:
prod.warn_num = total_sale
prod.save()
@task()
def updateAllUserProductSkuTask():
""" 更新所有用户SKU信息任务 """
users = Seller.effect_users.filter(is_primary=True)
for user in users:
subtask(updateUserProductSkuTask).delay(user.visitor_id)
@task()
def updateUserItemsEntityTask(user_id):
""" 更新用户商品及SKU信息任务 """
updateUserItemsTask(user_id)
subtask(updateUserProductSkuTask).delay(user_id)
@task()
def updateAllUserItemsEntityTask():
""" 更新所有用户商品及SKU信息任务 """
users = Seller.effect_users.all()
for user in users:
subtask(updateUserItemsEntityTask).delay(user.visitor_id)
@task()
def updateUserItemSkuFenxiaoProductTask(user_id):
""" 更新用户商品信息,SKU信息及分销商品信息任务 """
updateUserItemsTask(user_id)
updateUserProductSkuTask(user_id)
saveUserFenxiaoProductTask(user_id)
@task()
def gradCalcProductSaleTask():
""" 计算商品销售 """
dt = datetime.datetime.now()
#更新一个月以前的账单
if settings.DEBUG:
CalcProductSaleTask()(yest_date = dt - datetime.timedelta(days=30))
else:
subtask(CalcProductSaleTask()).delay(yest_date = dt - datetime.timedelta(days=30))
#更新昨日的账单
if settings.DEBUG:
CalcProductSaleTask()(yest_date = dt - datetime.timedelta(days=1),update_warn_num = True)
else:
subtask(CalcProductSaleTask()).delay(yest_date = dt - datetime.timedelta(days=1),
update_warn_num = True)
########################################################### 商品库存管理 ########################################################
@transaction.commit_on_success
def updateItemNum(user_id,num_iid):
"""
taobao_item_quantity_update response:
{'iid': '21557036378',
'modified': '2012-12-26 12:51:16',
'num': 24,
'num_iid': 21557036378,
'skus': {'sku': ({'modified': <type 'str'>,
'quantity': <type 'int'>,
'sku_id': <type 'int'>},
{'modified': <type 'str'>,
'quantity': <type 'int'>,
'sku_id': <type 'int'>})}}
"""
item = Item.objects.get(num_iid=num_iid)
user = item.user
product = item.product
if not product or not item.sync_stock:
return
user_percent = user.stock_percent
p_outer_id = product.outer_id
skus = json.loads(item.skus) if item.skus else None
if skus:
for sku in skus.get('sku',[]):
try:
outer_sku_id = sku.get('outer_id','')
outer_id,outer_sku_id = Product.objects.trancecode(p_outer_id,outer_sku_id)
if p_outer_id != outer_id or sku['status'] != pcfg.NORMAL or not outer_sku_id:
continue
product_sku = product.prod_skus.get(outer_id=outer_sku_id)
order_nums = 0
wait_nums = max(product_sku.wait_post_num , 0)
remain_nums = product_sku.remain_num or 0
real_num = product_sku.quantity
sync_num = real_num - wait_nums - remain_nums
#如果自动更新库存状态开启,并且计算后库存不等于在线库存,则更新
if sync_num>0 and user_percent>0:
sync_num = int(user_percent * sync_num)
elif sync_num >0 and sync_num <= product_sku.warn_num:
total_num,user_order_num = MergeOrder.get_yesterday_orders_totalnum(item.user.id,
outer_id,
outer_sku_id)
if total_num>0 and user_order_num>0:
sync_num = int(float(user_order_num)/float(total_num)*sync_num)
elif total_num == 0:
item_count = Item.objects.filter(outer_id=outer_id,
approve_status=pcfg.ONSALE_STATUS).count() or 1
sync_num = int(sync_num/item_count) or sync_num
else:
sync_num = (real_num - wait_nums)>10 and 2 or 0
elif sync_num > 0:
product_sku.is_assign = False
else:
sync_num = 0
#当前同步库存值,与线上拍下未付款商品数,哪个大取哪个
sync_num = max(sync_num,sku.get('with_hold_quantity',0))
# #针对小小派,测试线上库存低量促销效果
# if product.outer_id == '3116BG7':
# sync_num = product_sku.warn_num > 0 and min(sync_num,product_sku.warn_num+10) or min(sync_num,15)
#同步库存数不为0,或者没有库存警告,同步数量不等于线上库存,并且店铺,商品,规格同步状态正确
if (not (sync_num == 0 and product_sku.is_assign)
and sync_num != sku['quantity']
and user.sync_stock
and product.sync_stock
and product_sku.sync_stock):
response = apis.taobao_item_quantity_update(num_iid=item.num_iid,
quantity=sync_num,
sku_id=sku['sku_id'],
tb_user_id=user_id)
item_dict = response['item_quantity_update_response']['item']
Item.objects.filter(num_iid=item_dict['num_iid']).update(modified=item_dict['modified'],
num=sync_num)
product_sku.save()
ItemNumTaskLog.objects.get_or_create(user_id=user_id,
outer_id=product.outer_id,
sku_outer_id= outer_sku_id,
num=sync_num,
start_at= item.last_num_updated,
end_at=datetime.datetime.now())
except Exception,exc:
logger.error('sync sku num error!', exc_info=True)
else:
order_nums = 0
outer_id,outer_sku_id = Product.objects.trancecode(p_outer_id,'')
wait_nums = max( product.wait_post_num , 0)
remain_nums = product.remain_num or 0
real_num = product.collect_num
sync_num = real_num - wait_nums - remain_nums
#如果自动更新库存状态开启,并且计算后库存不等于在线库存,则更新
if sync_num>0 and user_percent>0:
sync_num = int(user_percent*sync_num)
elif sync_num >0 and sync_num <= product.warn_num:
total_num,user_order_num = MergeOrder.get_yesterday_orders_totalnum(
item.user.id,
outer_id,
outer_sku_id)
if total_num>0 and user_order_num>0:
sync_num = int(float(user_order_num)/float(total_num)*sync_num)
elif total_num == 0:
item_count = Item.objects.filter(outer_id=outer_id,
approve_status=pcfg.ONSALE_STATUS).count() or 1
sync_num = int(sync_num/item_count) or sync_num
else:
sync_num = (real_num - wait_nums)>10 and 2 or 0
elif sync_num > 0:
product.is_assign = False
else:
sync_num = 0
#当前同步库存值,与线上拍下未付款商品数,哪个大取哪个
sync_num = max(sync_num,item.with_hold_quantity)
#同步库存数不为0,或者没有库存警告,同步数量不等于线上库存,并且店铺,商品同步状态正确
if (not (sync_num == 0 and product.is_assign)
and sync_num != item.num
and user.sync_stock
and product.sync_stock):
response = apis.taobao_item_update(num_iid=item.num_iid,
num=sync_num,
tb_user_id=user_id)
item_dict = response['item_update_response']['item']
Item.objects.filter(num_iid=item_dict['num_iid']).update(
modified=item_dict['modified'],
num=sync_num)
product.save()
ItemNumTaskLog.objects.get_or_create(user_id=user_id,
outer_id=product.outer_id,
num=sync_num,
start_at= item.last_num_updated,
end_at=datetime.datetime.now())
Item.objects.filter(num_iid=item.num_iid).update(last_num_updated=datetime.datetime.now())
def getPurchaseSkuNum(product,product_sku):
wait_nums = product_sku.wait_post_num>0 and product_sku.wait_post_num or 0
remain_nums = product_sku.remain_num or 0
real_num = product_sku.quantity
sync_num = real_num - wait_nums - remain_nums
if sync_num >0 and sync_num <= product_sku.warn_num:
sync_num = int(sync_num * PURCHASE_STOCK_PERCENT / 2)
elif sync_num > 0:
sync_num = PURCHASE_STOCK_PERCENT * sync_num
else:
sync_num = 0
return int(sync_num)
@transaction.commit_on_success
def updatePurchaseItemNum(user_id,pid):
"""
{"fenxiao_sku": [{"outer_id": "10410",
"name": "**",
"quota_quantity": 0,
"standard_price": "39.90",
"reserved_quantity": 0,
"dealer_cost_price": "78.32",
"id": 2259034511371,
"cost_price": "35.11",
"properties": "**",
"quantity": 110}]}
"""
item = FenxiaoProduct.objects.get(pid=pid)
user = item.user
try:
product = Product.objects.get(outer_id=item.outer_id)
except Product.DoesNotExist:
product = None
if not product or not product.sync_stock:
return
outer_id = product.outer_id
skus = json.loads(item.skus) if item.skus else None
if skus:
sku_tuple = []
for sku in skus.get('fenxiao_sku',[]):
outer_sku_id = sku.get('outer_id','')
try:
product_sku = product.prod_skus.get(outer_id=outer_sku_id)
except:
continue
sync_num = getPurchaseSkuNum(product,product_sku)
sku_tuple.append(('%d'%sku['id'],'%d'%sync_num,outer_sku_id))
#同步库存数不为0,或者没有库存警告,同步数量不等于线上库存,并且店铺,商品,规格同步状态正确
if ( sku_tuple and user.sync_stock and product.sync_stock):
response = apis.taobao_fenxiao_product_update(pid=pid,
sku_ids=','.join([s[0] for s in sku_tuple]),
sku_quantitys=','.join([s[1] for s in sku_tuple]),
tb_user_id=user_id)
item_dict = response['fenxiao_product_update_response']
FenxiaoProduct.objects.filter(pid=pid).update(modified=item_dict['modified'])
for index,sku in enumerate(sku_tuple):
ItemNumTaskLog.objects.get_or_create(user_id=user_id,
outer_id=outer_id,
sku_outer_id= 'fx%s'%sku[2],
num=sku[1],
start_at=item_dict['modified'],
end_at=item_dict['modified'])
else:
order_nums = 0
wait_nums = product.wait_post_num >0 and product.wait_post_num or 0
remain_nums = product.remain_num or 0
real_num = product.collect_num
sync_num = real_num - wait_nums - remain_nums
#如果自动更新库存状态开启,并且计算后库存不等于在线库存,则更新
if sync_num >0 and sync_num <= product.warn_num:
sync_num = int(sync_num * PURCHASE_STOCK_PERCENT / 2)
elif sync_num > 0:
sync_num = PURCHASE_STOCK_PERCENT * sync_num
else:
sync_num = 0
sync_num = int(sync_num)
if (not (sync_num == 0 and product.is_assign)
and user.sync_stock and product.sync_stock):
response = apis.taobao_fenxiao_product_update(pid=pid,
quantity=sync_num,
tb_user_id=user_id)
item_dict = response['fenxiao_product_update_response']
FenxiaoProduct.objects.filter(pid=pid).update(
modified=item_dict['modified'])
ItemNumTaskLog.objects.get_or_create(user_id=user_id,
outer_id='',
num=sync_num,
start_at=item_dict['modified'],
end_at=item_dict['modified'])
@task()
def updateUserItemNumTask(user_id):
updateUserItemsTask(user_id)
updateUserProductSkuTask(user_id)
items = Item.objects.filter(user__visitor_id=user_id,approve_status=pcfg.ONSALE_STATUS)
for item in items:
try:
updateItemNum(user_id,item.num_iid)
except Exception,exc :
logger.error(u'更新淘宝库存异常:%s'%exc,exc_info=True)
@task()
def updateUserPurchaseItemNumTask(user_id):
saveUserFenxiaoProductTask(user_id)
purchase_items = FenxiaoProduct.objects.filter(user__visitor_id=user_id,
status=pcfg.UP_STATUS)
for item in purchase_items:
try:
updatePurchaseItemNum(user_id,item.pid)
except Exception,exc :
logger.error(u'更新分销库存异常:%s'%exc.message,exc_info=True)
@task()
def updateAllUserItemNumTask():
updateProductWaitPostNumTask()
for user in Seller.effect_users.TAOBAO:
updateUserItemNumTask(user.visitor_id)
@task()
def updateAllUserPurchaseItemNumTask():
updateProductWaitPostNumTask()
users = Seller.effect_users.TAOBAO.filter(has_fenxiao=True)
for user in users:
updateUserPurchaseItemNumTask(user.visitor_id)
|
import time; #引入time模块
ticks = time.time()
print("当前时间截至为:",ticks)
localtime = time.localtime(time.time())
print("本地时间为: ",localtime)
localtime = time.asctime(time.localtime(time.time()))
print("本地时间为: ",localtime)
print(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))
print(time.strftime("%a %b %c %H:%M:%S %y",time.localtime()))
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import Post
from django.utils import timezone
from django.contrib.auth.decorators import login_required, user_passes_test
import requests
from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys
from spotdl.command_line.core import Spotdl
import os
from django.core.files import File
# Create your views here.
# def test(request):
# driver = webdriver.Chrome('chromedriver')
# driver.get("https://www.youtube.com/")
# time.sleep(3)
# #검색어 창을 찾아 search 변수에 저장
# search = driver.find_element_by_xpath('//*[@id="search"]')
# #search 변수에 저장된 곳에 값을 전송
# search.send_keys('반원 코딩')
# time.sleep(1)
# #search 변수에 저장된 곳에 엔터를 입력
# search.send_keys(Keys.ENTER)
# HttpResponse
def index(request):
print (' api 호출호출')
dog_api_response=requests.get('https://dog.ceo/api/breeds/image/random')
print (dog_api_response)
dog_api_response_dictionary = dog_api_response.json()
posts = Post.objects.all()
dog=None
if dog_api_response_dictionary['status']=='success':
dog = dog_api_response_dictionary['message']
context = {'posts': posts,
'dog': dog}
return render (request, 'musictest/index.html', context)
def detail(request, post_id):
post = Post.objects.get(id=post_id)
context = {'post':post}
return render(request, 'musictest/detail.html', context)
@login_required
def create(request):
user = request.user
image = None
if 'image' in request.FILES:
image = request.FILES['image']
music = request.POST['music'] #검색하고자 하는 음악 타이틀 입력
singer = request.POST['singer'] #검색하고자 하는 가수 입력
args = { "no_encode":False, 'output_ext': 'mp3', 'output_file': f'{singer}-{music}.mp3', 'quality': 'best',
} #검색옵션, 다운로드 mp3형식, 다운로드 파일명은 가수 - 타이틀.mp3
spotdl_handler = Spotdl(args) #spotdl.handler에 검색옵션 저장
spotdl_handler.download_track(f'{singer} {music}') #'가수 타이틀' 로 노래 검색 후 다운로드
tag = request.POST['tag']
body = request.POST['body']
song = f'/Musiary/{singer}-{music}.mp3' #song에 받은 파일을 지정해주기
post = Post(user=user, song=song, image=image,music=music, singer=singer, tag=tag, body=body, created_at=timezone.now())#post에 저장
post.save()
# os.remove(f'{singer} - {music}.mp3')
return redirect('musictest:detail', post_id=post.id)
# @login_required
# def search(request):
# keyword = request.GET['song']
# args = { "no_encode":False, 'output_ext': 'mp3', 'output_file': '{artist} - {track-name}.{output-ext}', }
# spotdl_handler = Spotdl(args)
# song = spotdl_handler.download_track(keyword)
# post = Post(song = song)
# post.save()
# return redirect('musictest:new')
@login_required
def new(request):
# if not request.user.is_authenticated:
# return redirect('accounts:login')
return render(request, 'musictest/new.html')
@login_required
def edit(request, post_id):
try:
post = Post.objects.get(id=post_id, user=request.user)
except Post.DoesNotExist:
return redirect('posts:index')
context={'post':post}
return render(request, 'musictest/edit.html', context)
@login_required
def update(request, post_id):
try:
post = Post.objects.get(id=post_id, user=request.user)
except Post.DoesNotExist:
return redirect('posts:index')
post.user = request.user
if 'image' in request.FILES:
post.image = request.FILES['image']
post.music = request.POST['music']
post.singer = request.POST['singer']
post.tag = request.POST['tag']
post.body = request.POST['body']
post.save()
return redirect('musictest:detail', post_id=post.id)
@login_required
def delete(request, post_id):
try:
post = Post.objects.get(id=post_id, user=request.user)
except Post.DoesNotExist:
return redirect('musictest:index')
context={'post': post}
return render(request, 'musictest/delete.html', context)
@login_required
def realdelete(request, post_id):
try:
post = Post.objects.get(id=post_id, user=request.user)
except Post.DoesNotExist:
return redirect('musictest:index')
post.delete()
return redirect('musictest:index')
@login_required
def like(request, post_id):
if request.method == 'POST':
try:
post = Post.objects.get(id=post_id)
if request.user in post.liked_users.all():
post.liked_users.remove(request.user)
else:
post.liked_users.add(request.user)
return redirect('musictest:detail',post.id)
except Post.DoesNotExist:
pass
return redirect('musictest:index')
|
__author__ = 'natalie'
from bot.config import config
def error(message):
return 'Error: {}'.format(message)
def not_valid_args(args, message=None):
err = 'arguments not valid'
if args:
err += ': {}'.format(args)
if message:
err += '\n{}'.format(message)
return error(err)
def get_transition(transitions, status):
# O(n)
for t in transitions:
if unicode(status) == t.get('to', {}).get('name', ''):
return t.get('id', None)
return None
def check_project(jira, project_key):
if project_key in [p.key for p in jira.projects()]:
return True
return False
def project_info(project):
return '{}: {}'.format(project.key, project.name)
def issue_info(issue):
issue_summary = issue.fields.summary
issue_description = issue.fields.description or 'No description'
issue_key = issue.key
issue_labels = ','.join(issue.fields.labels or ['no labels', ])
issue_type = issue.fields.issuetype
issue_status = issue.fields.status
issue_link = '{}/browse/{}'.format(config.get('jira_server'), issue_key)
assignee = issue.fields.assignee
if assignee:
assignee = '@' + user_info(assignee)
else:
assignee = 'not assigned'
return '{}\n{}\n{}|{}|{}|{}|{}\n{}'.format(issue_summary,
issue_description,
issue_key,
issue_labels,
issue_type,
issue_status,
assignee,
issue_link)
def user_info(user):
return '{}: {}'.format(user.key, user.displayName)
|
from flask import Blueprint
notification = Blueprint('notification', __name__)
from . import logic |
# log/urls.py
from django.urls import path
from . import views
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# 127.0.0.1:8000/log/
path('', views.index, name='index'),
# Signup/Account Creation
url(r'^signup/$', views.signup, name='signup'),
# Profile View -- OLD
#url(r'^profile/$', views.profile, name='profile'),
# Profile View with Entry List
path('profile/', views.profile.as_view(), name='profile'),
# Add Car
url(r'^createCar/$', views.createCar, name='createCar'),
# My Garage
path('myGarage/', views.myGarage.as_view(), name='myGarage'),
# Car Detail Page .. Needs to have list of recent log entries, recent first
path('car/<int:pk>', views.carDetail.as_view(), name='carDetail'),
# Edit Car Details .. Everything Editable except Owner
path('car/<int:pk>/update/', views.carUpdate.as_view(), name='carUpdate'),
# Delete Car .. OR a method for deleting a car in a garage
path('car/<int:pk>/delete/', views.carDelete.as_view(), name='carDelete'),
# New Entry .. Add a Log Entry Associated with the Car
url(r'^createEntry/$', views.createEntry, name='createEntry'),
# Edit Entry
path('entry/<int:pk>/update/', views.entryUpdate.as_view(), name='entryUpdate'),
# Entry Detail View
path('entry/<int:pk>', views.entryDetail.as_view(), name='entryDetail'),
# Redirect to simple html: Activation sent
path('account_activation_sent/', views.account_activation_sent, name='account_activation_sent'),
#url(r'^activate/(?P<uidb64>.+)/(?P<token>.+)/$', views.activate, name='activate'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', views.activate, name='activate'),
# Display search Results from Profile Page
path('search/', views.searchResults, name='searchResults'),
# Display search Results from Car Detail Page
#path('carsearch/<int:pk>', views.carSearchResults, name='carSearchResults'),
# Create Reminder
url(r'^createReminder/$', views.createReminder, name='createReminder'),
# Reminder Detail
path('reminder/<int:pk>', views.reminderDetail.as_view(), name='reminderDetail'),
# Reminder Update
path('reminder/<int:pk>/update/', views.reminderUpdate.as_view(), name='reminderUpdate'),
# Reminder Delete
path('reminder/<int:pk>/delete/', views.reminderDelete.as_view(), name='reminderDelete'),
# Administrative Data Page
path('diag_data/', views.diagnosticData, name='diag_data'),
# Administrative List Pages:
path('user_list/', views.userList.as_view(), name='userList'),
path('car_list/', views.carList.as_view(), name='carList'),
path('entry_list/', views.entryList.as_view(), name='entryList'),
path('reminder_list/', views.reminderList.as_view(), name='reminderList'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT) |
print('welcome to the Gingerbread_Checkers launcher')
print('would you like to start a new game?')
wu = input('>>>')
yes = 'yes'
Yes = 'Yes'
no = 'no'
No = 'no'
def y():
print('okay, starting new game')
def x():
print('okay then')
if wu = yes:
y()
if wu = Yes
y()
if wu = no
x()
if wu = No
x()
else:
print('error code 000: invalid response recived')
|
import nltk
text="Dan's parents were overweight.,Dan was overweight as well.,The doctors told his parents it was unhealthy.,His parents understood and decided to make a change.,They got themselves and Dan on a diet.".split(',')
print [sen.lower() for sen in text]
print [nltk.word_tokenize(sen) for sen in text]
wnl=nltk.WordNetLemmatizer()
print [wnl.lemmatize(sen) for sen in text]
from nltk.stem.lancaster import LancasterStemmer
lancaster_stemmer = LancasterStemmer()
lancaster_stemmer.stem('presumably')
from nltk.stem.porter import PorterStemmer
porter_stemmer = PorterStemmer()
porter_stemmer.stem('presumably')
from nltk.stem import SnowballStemmer
snowball_stemmer = SnowballStemmer('english')
snowball_stemmer.stem('presumably')
# 典型特例,Excited,Lying。目前可能snowball_stemmer是很合适的。
>>> print [lancaster_stemmer.stem(sen) for sen in text] #变小写
["dan's parents were overweight.", 'dan was overweight as well.', 'the doctors told his parents it was unhealthy.', 'his parents understood and decided to make a change.', 'they got themselves and dan on a diet.']
>>> print [porter_stemmer.stem(sen) for sen in text] #不会变小写
[u"Dan's parents were overweight.", u'Dan was overweight as well.', u'The doctors told his parents it was unhealthy.', u'His parents understood and decided to make a change.', u'They got themselves and Dan on a diet.']
>>> print [snowball_stemmer.stem(sen) for sen in text] #变小写
[u"dan's parents were overweight.", u'dan was overweight as well.', u'the doctors told his parents it was unhealthy.', u'his parents understood and decided to make a change.', u'they got themselves and dan on a diet.']
#nltk version
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.stem import SnowballStemmer
snowball_stemmer = SnowballStemmer('english')
wordnet_lemmatizer = WordNetLemmatizer()
text="Dan's parents were overweight.,Dan was overweight as well.,The doctors told his parents it was unhealthy.,His parents understood and decided to make a change.,They got themselves and Dan on a diet.".split(',')
for sen in text:
token_list=nltk.word_tokenize(sen[:-1])
tagged_sen=nltk.pos_tag(token_list)
new_sen=[]
for (word,tag) in tagged_sen:
if tag[0]=='V':
lemma_word=wordnet_lemmatizer.lemmatize(word,pos='v')
else:
lemma_word=wordnet_lemmatizer.lemmatize(word)
stem_word=snowball_stemmer.stem(lemma_word)
new_sen.append(stem_word)
print " ".join(new_sen)
# stanford version
import nltk
from nltk.tag import StanfordPOSTagger
from nltk.tokenize import StanfordTokenizer
from nltk.stem import WordNetLemmatizer
from nltk.stem import SnowballStemmer
snowball_stemmer = SnowballStemmer('english')
wordnet_lemmatizer = WordNetLemmatizer()
tokenizer = StanfordTokenizer()
eng_tagger = StanfordPOSTagger('english-bidirectional-distsim.tagger')
text="Dan's parents were overweight.,Dan was overweight as well.,The doctors told his parents it was unhealthy.,His parents understood and decided to make a change.,They got themselves and Dan on a diet.".split(',')
for sen in text:
token_list=tokenizer.tokenize(sen[:-1])
tagged_sen=eng_tagger.tag(token_list)
new_sen=[]
for (word,tag) in tagged_sen:
# print word,tag
if tag[0]=='V':
lemma_word=wordnet_lemmatizer.lemmatize(word,pos='v')
else:
lemma_word=wordnet_lemmatizer.lemmatize(word)
stem_word=snowball_stemmer.stem(lemma_word)
new_sen.append(stem_word)
print " ".join(new_sen)
|
"""
Object
The Object is the "naked" base class for things in the game world.
Note that the default Character, Room and Exit does not inherit from
this Object, but from their respective default implementations in the
evennia library. If you want to use this class as a parent to change
the other types, you can do so by adding this as a multiple
inheritance.
"""
from objects import Object
class Consumable(Object):
self.db.health = 100 |
from typing import List
class Solution:
def nextPermutation(self, nums: List[int]) -> List:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
for i in range(n-1, -1, -1):
if nums[i] > nums[i-1]:
small_num = nums[i-1]
for j in range(i, n):
if nums[j] > small_num:
pass
else:
j -= 1
nums[i-1] = nums[j]
nums[j] = small_num
nums = nums[0:i] + nums[i:][::-1]
return nums
nums[i-1] = nums[j]
nums[j] = small_num
nums = nums[0:i] + nums[i:][::-1]
return nums
nums = nums[::-1]
return nums
print(Solution().nextPermutation([1,3,2])) |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 20 19:08:59 2021
@author: Gustavo
@mail: gustavogodoy85@gmail.com
"""
#%% Ejercicio 2.18 Balances
import csv
#from pprint import pprint
def leer_camion(ruta_archivo):
camion = []
with open(ruta_archivo, 'rt') as f:
rows = csv.reader(f)
header = (next(f).rstrip('\n')).split(',')
for row in rows:
product_dictionary = {}
try:
product_dictionary[header[0]] = row[0]
product_dictionary[header[1]] = int(row[1])
product_dictionary[header[2]] = float(row[2])
except AttributeError:
print ('Warning: some error')
camion.append(product_dictionary)
return camion
# camion = leer_camion('./Data/camion.csv')
# pprint(camion)
def leer_precios(ruta_archivo):
#check = 0
precios = {}
with open(ruta_archivo, 'rt', encoding=('utf8')) as f:
file = csv.reader(f)
for line in file:
try:
precios[line[0]] = line[1]
except IndexError:
# print('Warning: some error in leer_precios')
pass
return precios
# precios = leer_precios('./Data/precios.csv')
# pprint(precios)
def balance(precios_venta, costo_camion):
costo = 0
ventas = 0
camion = leer_camion(costo_camion)
precios = leer_precios(precios_venta)
for prod in camion:
costo += prod['cajones']*prod['precio']#costo del camion
ventas += prod['cajones']*float(precios[prod['nombre']])
saldo = round(ventas - costo, 2)
print( f'Las ventas fueron de {ventas}')
print( f'El costeo de la mercaderia fue de {costo}')
print(f'Su saldo es de {saldo}. ')
datos = {'saldo' : saldo, 'ventas' : ventas, 'costo' : costo}
return datos #por si se quieren usar los datos
# Primera forma
def hacer_informe(precios, camion):
dicc_precios = leer_precios(precios)
lista_camion = leer_camion(camion)
header = ('Nombre', 'Cajones', 'Precio', 'Cambio')
informe = []
for j in lista_camion:
if j['nombre'] in dicc_precios.keys():
e = (j['nombre'], j['cajones'], j['precio'], round(float(dicc_precios[j['nombre']])-float(j['precio']), 2) )
informe.append(e)
#print('%10s %10d %10.2f %10.2f' % e)
print('%10s %10s %10s %10s' % header)
#print(f'{header[0]:>10s} {header[1]:>10s} {header[2]:>10s} {header[3]:>10s}')#otra forma de imprimir el header
print(f'{"":->10s} {"":->10s} {"":->10s} {"":->10s}')
for nombre, cajones, precio, cambio in informe:
# print(f'{nombre:>10s} {cajones:>10d} {precio:>10.2f} {cambio:>10.2f}')
# print('{:>10s} {:>10d} {:>10s} {:>10.2f}'.format(nombre, cajones, '$'+'{:.2f}'.format(precio), cambio))
precio_ = '$'+ '{:.2f}'.format(precio)
print(f'{nombre:>10s} {cajones:>10d} {precio_:>10s} {cambio:>10.2f}')
# Segunda forma
# def hacer_informe(precios, camion):
# dicc_precios = leer_precios(precios)
# lista_camion = leer_camion(camion)
# for j in lista_camion:
# if j['nombre'] in dicc_precios.keys():
# dicc_informe = {
# 'nombre' : j['nombre'],
# 'cajones' : j['cajones'],
# 'precio' : j['precio'],
# 'cambio' : round(float(dicc_precios[j['nombre']]) - float(j['precio']))
# }
# print('{nombre:>10s} {cajones:>10d} {precio:>10.2f} {cambio:>10.2f}'.format_map(dicc_informe))
# Tercera forma
# def hacer_informe(precios, camion):
# dicc_precios = leer_precios(precios)
# lista_camion = leer_camion(camion)
# header = ('Nombre', 'Cajones', 'Precio', 'Cambio')
# for j in lista_camion:
# if j['nombre'] in dicc_precios.keys():
# nombre = j['nombre']
# cajones = j['cajones']
# precio = j['precio']
# cambio = round(float(dicc_precios[j['nombre']]) - float(j['precio']))
# print(f'{nombre:>10s} {cajones:>10d} {precio:>10.2f} {cambio:>10.2f}')
#g = balance('../Data/precios.csv', '../Data/camion.csv')
h = hacer_informe('../Data/precios.csv', '../Data/camion.csv')
|
import pprint
import xmltodict
import yaml
from typing import Dict, List, Union
from convertlib import is_null, simplify_attr_list, ensure_list
with open('vos.xml', 'r') as vo_xml_file:
# Use dict_constructore = dict so we don't get ordered dicts, we don't really care about ordering
parsed = xmltodict.parse(vo_xml_file.read(), dict_constructor=dict)
def is_true_str(a_str: Union[str, None]) -> bool:
return a_str and a_str.strip("'\" ").lower() in ["1", "on", "true"]
# Multiline string to look nice'er
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.add_representer(str, str_presenter)
def simplify_contacttypes(contacttypes):
"""Simplify ContactTypes attribute
Turn e.g.
{"ContactType":
[{"Contacts":
{"Contact": [{"Name": "Steve Timm"},
{"Name": "Joe Boyd"}]},
"Type": "Miscellaneous Contact"}
]
}
into
{"Miscellanous Contact":
[ "Steve Timm", "Joe Boyd" ]
}
"""
if is_null(contacttypes, "ContactType"):
return None
new_contacttypes = {}
for ct in ensure_list(contacttypes["ContactType"]):
if is_null(ct, "Contacts", "Contact"):
continue
type_ = ct["Type"]
# Remove duplicates but keep ordering
contacts = []
for c in ensure_list(ct["Contacts"]["Contact"]):
if c["Name"] not in contacts:
contacts.append(c["Name"])
new_contacttypes[type_] = contacts
return new_contacttypes
def simplify_reportinggroups(reportinggroups):
"""Simplify ReportingGroups attributes
Turn e.g.
{"ReportingGroup": [{"Contacts": {"Contact": [{"Name": "a"},
{"Name": "b"}
},
"FQANs": {"FQAN": [{"GroupName": "XXX",
"Role": "YYY"}]
}
"Name": "ZZZ"
}]
}
into
{"ZZZ": {"Contacts": ["a", "b"],
"FQANs": [{"GroupName": "XXX", "Role": "YYY"}]
}
}
"""
if is_null(reportinggroups, "ReportingGroup"):
return None
# [{"Name": "XXX", <...>}, {"Name": "YYY", <...>}] becomes
# {"XXX": {<...>}, "YYY": {<...>}>
new_reportinggroups = simplify_attr_list(reportinggroups["ReportingGroup"], "Name")
if not new_reportinggroups: # only null entries found
return None
for rgname, rgdata in new_reportinggroups.items():
if not is_null(rgdata["Contacts"], "Contact"):
# {"Contacts": {"Contact": [{"Name": "a"}, {"Name": "b"}]}} becomes
# {"Contacts": ["a", "b"]}
new_contacts = []
for c in ensure_list(rgdata["Contacts"]["Contact"]):
if not is_null(c, "Name") and c["Name"] not in new_contacts:
new_contacts.append(c["Name"])
rgdata["Contacts"] = new_contacts
if not is_null(rgdata["FQANs"], "FQAN"):
rgdata["FQANs"] = ensure_list(rgdata["FQANs"]["FQAN"])
return new_reportinggroups
def simplify_oasis_managers(managers):
"""Simplify OASIS/Managers attributes
Turn
{"Manager": [{"Name": "a", "DNs": {"DN": [...]}}]}
into
{"a": {"DNs": [...]}}
"""
if is_null(managers, "Manager"):
return None
new_managers = simplify_attr_list(managers["Manager"], "Name")
for manager, data in new_managers.items():
if not is_null(data, "DNs"):
data["DNs"] = data["DNs"]["DN"]
if not is_null(data, "ContactID"):
data["ContactID"] = int(data["ContactID"])
return new_managers
def simplify_fields_of_science(fos: Dict) -> Union[Dict, None]:
"""Turn
{"PrimaryFields": {"Field": ["P1", "P2", ...]},
"SecondaryFields": {"Field": ["S1", "S2", ...]}}
into
{"PrimaryFields": ["P1", "P2", ...],
"SecondaryFields": ["S1", "S2", ...]}
"""
if is_null(fos, "PrimaryFields") or is_null(fos["PrimaryFields"], "Field"):
return None
new_fields = {"PrimaryFields": ensure_list(fos["PrimaryFields"]["Field"])}
if not is_null(fos, "SecondaryFields", "Field"):
new_fields["SecondaryFields"] = ensure_list(fos["SecondaryFields"]["Field"])
return new_fields
reportinggroup_data = {}
for vo in parsed['VOSummary']['VO']:
name = vo["Name"]
if "/" in name: continue # bad name
if "ID" in vo:
vo["ID"] = int(vo["ID"])
vo["Active"] = is_true_str(vo.get("Active", ""))
vo["CertificateOnly"] = is_true_str(vo.get("CertificateOnly", ""))
vo["Disable"] = is_true_str(vo.get("Disable", ""))
if "ContactTypes" in vo:
vo["Contacts"] = simplify_contacttypes(vo["ContactTypes"])
del vo["ContactTypes"]
if "ReportingGroups" in vo:
rgs = simplify_reportinggroups(vo["ReportingGroups"])
if rgs is not None:
vo["ReportingGroups"] = sorted(set(rgs.keys()))
reportinggroup_data.update(rgs)
if "OASIS" in vo:
if not is_null(vo["OASIS"], "Managers"):
vo["OASIS"]["Managers"] = simplify_oasis_managers(vo["OASIS"]["Managers"])
else:
vo["OASIS"].pop("Managers", None)
if not is_null(vo["OASIS"], "OASISRepoURLs", "URL"):
vo["OASIS"]["OASISRepoURLs"] = ensure_list(vo["OASIS"]["OASISRepoURLs"]["URL"])
else:
vo["OASIS"].pop("OASISRepoURLs")
vo["OASIS"]["UseOASIS"] = is_true_str(vo["OASIS"].get("UseOASIS", ""))
if not is_null(vo, "FieldsOfScience"):
vo["FieldsOfScience"] = simplify_fields_of_science(vo["FieldsOfScience"])
if not is_null(vo, "ParentVO"):
vo["ParentVO"]["ID"] = int(vo["ParentVO"]["ID"])
vo.pop("MemeberResources", None) # will recreate MemeberResources [sic] from RG data
# delete empty fields
for key in ["Contacts", "MembershipServicesURL", "ParentVO", "PrimaryURL", "PurposeURL", "ReportingGroups", "SupportURL"]:
if is_null(vo, key):
vo.pop(key, None)
serialized = yaml.safe_dump(vo, encoding='utf-8', default_flow_style=False)
print(serialized.decode())
with open("virtual-organizations/{0}.yaml".format(name), 'w') as f:
f.write(serialized.decode())
with open("virtual-organizations/REPORTING_GROUPS.yaml", "w") as f:
f.write(yaml.safe_dump(reportinggroup_data, encoding="utf-8").decode())
|
# search
@api.route('/search', methods=['POST','GET'])
@allow_cross_domain
def search_api():
rd = request.get_data().decode()
print(rd)
rd = json.loads(rd)
searched = search.search_api(rd['sentence'])
return jsonify(searched)
# return jsonify(searched)
@api.route('/search/<s>', methods=['GET'])
@allow_cross_domain
def get_search_api(s):
searched = search.search_api(s)
return jsonify(searched)
|
from abc import ABCMeta, abstractmethod
class AbsProxySensorTemperatura(metaclass=ABCMeta):
@abstractmethod
def leer_temperatura(self):
pass
|
# Do the following commands in the mongo shell
"""
use nlp100
db.artists.find({name: "Queen"})
"""
from pymongo import MongoClient
client = MongoClient("localhost")
db = client.nlp100
collection = db.artists
for idx, data in enumerate(collection.find({"name": "Queen"})):
assert isinstance(data, dict)
print(idx, data)
|
import time
import threading
from datetime import datetime
from config import basedir, LIGHT_START_TIME, LIGHT_ON_TIME, REQUIRED_FEED_LAPSE, REQUIRED_AUTOFEED_LAPSE
class Aquarium(object):
def __init__(self, lightStartTime=LIGHT_START_TIME, lightOnTime=LIGHT_ON_TIME):
self.lightOnTime = lightOnTime
self.lightStartTime = lightStartTime
self._lightManualOn = False
self._lightOn = False
self._aquariumOn = True
try:
print basedir + "/app/lastFeed"
f = open(basedir + "/app/lastFeed", "r")
self._lastFeed = datetime.strptime(f.read(),"%Y-%m-%d %H:%M")
f.close()
except:
print 'cant read feed log'
self._food()
self.lightThread = threading.Thread(target=self._autoController)
self.lightThread.setDaemon(True)
self.lightThread.start()
def tryFeed(self):
currentTime = datetime.now()
if (currentTime-self._lastFeed).seconds >= REQUIRED_FEED_LAPSE*60*60:
if self._food():
return {'success':True}
else:
return {'success':False, 'last':(currentTime-self._lastFeed).seconds}
else:
return {'success':False, 'last':(currentTime-self._lastFeed).seconds}
def _food(self):
try:
print 'Feeding fish'
f = open("/sys/class/gpio/gpio18/value", "w")
f.write("0")
f.seek(0)
time.sleep(5)
f.write("1")
f.close()
self._lastFeed = datetime.now()
f = open(basedir + "/app/lastFeed", "w")
f.write(self._lastFeed.strftime("%Y-%m-%d %H:%M"))
f.close()
print 'Fish feeded'
return True
except:
return False
def _autoController(self):
while self._aquariumOn:
try:
currentHour = datetime.now().hour
currentTime = datetime.now()
if (currentHour >= self.lightStartTime) and (currentHour < self.lightStartTime + self.lightOnTime):
f = open("/sys/class/gpio/gpio2/value", "w")
f.write("1")
f.close()
self._lightOn = True
else:
if self._lightManualOn != True:
f = open("/sys/class/gpio/gpio2/value", "w")
f.write("0")
f.close()
self._lightOn = False
else:
print 'Light turned on by user'
if (currentTime-self._lastFeed).seconds >= REQUIRED_AUTOFEED_LAPSE*60*60:
self._food()
time.sleep(30)
self._food()
time.sleep(60)
except:
time.sleep(10)
print "Error in light control"
print "Light controller off"
def _lightOneMinute(self):
if self._lightOn == False:
self._lightManualOn = True
self._lightOn = True
f2 = open("/sys/class/gpio/gpio2/value", "w")
f2.write("1")
f2.seek(0)
time.sleep(60)
f2.write("0")
f2.close()
self._lightManualOn = False
self._lightOn = False
print "Done"
else:
print "Light is on"
def turnLightOn(self):
try:
if self._lightOn == True:
return False
lightThread = threading.Thread(target=self._lightOneMinute)
lightThread.start()
return True
except:
print "Error starting light thread"
return False
def status(self):
if (datetime.now()-self._lastFeed).seconds >= REQUIRED_FEED_LAPSE*60*60:
need = True
else:
need = False
return {'light':{'status':self._lightOn,
'manual':self._lightManualOn
},
'food':{'need':need,
'last':(datetime.now()-self._lastFeed).seconds
}}
|
import sys
import copy
import rospy
import StringIO
from std_msgs.msg import String
from std_msgs.msg import Header
from std_msgs.msg import Int64
from StringIO import StringIO
import moveit_commander
import moveit_msgs.msg
from moveit_msgs.msg import PositionIKRequest, RobotState
from moveit_msgs.msg import RobotTrajectory
from moveit_msgs.srv import GetPositionIK, GetPositionIKRequest
import geometry_msgs.msg
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from sensor_msgs.msg import JointState
from goal_pos_generate import generate_goal_points, generate_goal_joints
def Get_current_state(group):
return JointState(
name=group.get_joints()[:7],
position=group.get_current_joint_values(),
);
def find_IK_solution(ik, target, seed, group_name):
response = ik( GetPositionIKRequest(ik_request = PositionIKRequest( group_name = group_name,
pose_stamped = PoseStamped( header = Header(frame_id="/base_link"),
pose = target),
robot_state = RobotState(joint_state=seed))
) )
return response
def copy_joint_value(group_name, joint_values):
count = 0
target_joint_value = []
target_joint_value.append(copy.deepcopy(joint_values[26]))
for count in range(1,27):
if group_name == "arm_left_torso":
if count > 1 and count < 9:
target_joint_value.append(copy.deepcopy(joint_values[count-1]))
elif group_name == "arm_right_torso":
if count > 19 and count < 27:
target_joint_value.append(copy.deepcopy(joint_values[count-1]))
return target_joint_value
def Save_traj(goal_jnt_value,plan):
file_name = "Traj/bin "+ str(goal_jnt_value.bin_num) + goal_jnt_value.traj_property;
print "saving bin.",goal_jnt_value.bin_num,"trajectory to file",file_name;
buf = StringIO();
plan.serialize(buf);
f = open(file_name,"w");
f.write(buf.getvalue());
f.close();
def plan_trajectory(group_handle,pose_target):
ArmJointValue = [];
target = geometry_msgs.msg.Pose();
target.position.x = pose_target.x;
target.position.y = pose_target.y;
target.position.z = pose_target.z;
target.orientation.x = pose_target.qx;
target.orientation.y = pose_target.qy;
target.orientation.z = pose_target.qz;
target.orientation.w = pose_target.qw;
print "EE Target position: ";
print target;
if pose_target.property is "init_pos":
print ">>>>>> Go to bin", pose_target.bin_num, "start pos >>>>>>"
ArmJointValue = left_arm_init_joint_value;
else:
if pose_target.property is "test_pos":
print ">>>>>> Go to bin", pose_target.bin_num, "test pos >>>>>>"
else:
print ">>>>>> Go to bin", pose_target.bin_num, "drop pos >>>>>>"
current_state = Get_current_state(group_handle);
result = find_IK_solution(ik, target, current_state, group_handle.get_name());
if result.error_code.val != 1:
attempt = 0;
Success = False;
while attempt < 100:
attempt += 1;
result = find_IK_solution(ik, target, current_state, group_handle.get_name());
if result.error_code.val == 1:
Success = True;
break;
if Success is not True:
print "Can't find IK solution for Bin ", pose_target.bin_num, pose_target.pnt_property;
return 1;
ArmJointValue = Copy_joint_value(group_handle.get_name(),result.solution.joint_state.position);
group_handle.set_start_state_to_current_state();
group_handle.set_joint_value_target(ArmJointValue);
plan = group_handle.plan();
# Plan is valid, Save trajectory to file
if len(plan.joint_trajectory.points):
Save_traj(pose_target,plan);
print "Executing trajectory";
group_handle.go();
rospy.sleep(5);
return 0;
else:
print "Planning failed!";
return 1;
def pos_test(pose_targets, group_handle, IK_handle, animate_result = False):
test_number = len(pose_targets);
group = group_handle;
if IK_handle is not None:
ik = IK_handle;
else:
print "No IK solver assigned! Exit!";
return False;
# We have valid targets assigned
if len(pose_targets):
count = 1;
success_number = 0;
for pose_target in pose_targets:
if plan_trajectory(group, pose_target):
print "--------------- Attempts on bin ", pose_target.bin_num," failed---------------";
#group = moveit_commander.MoveGroupCommander("arm_left");
#group.set_planner_id("RRTConnectkConfigDefault");
#group.allow_replanning(True);
#group.set_planning_time(20);
#print "Attempts 2 on bin", pose_target.bin_num;
#if plan_trajectory(group, pose_target):
# print "Test on bin", pose_target.bin_num, "failed!";
# group = moveit_commander.MoveGroupCommander("arm_left_with_torso");
# group.set_planner_id("RRTConnectkConfigDefault");
# group.allow_replanning(True);
# group.set_planning_time(20);
else:
success_number += 1;
if success_number == test_number:
print "Available for all position!";
return True;
else:
print "Can't find IK solution for all target position!"
print "Success ratio:", success_number, "/",test_number;
return False;
else:
print "No target Assigned, Exit!";
return False;
def goal_jnt_val_test(goal_jnt_value_set, group_handle, animate_result = False):
success_num = 0;
for goal_jnt_value in goal_jnt_value_set:
if len(goal_jnt_value.value):
group_handle.set_start_state_to_current_state();
group_handle.set_joint_value_target(goal_jnt_value.value);
plan = group_handle.plan();
count = 0;
while len(plan.joint_trajectory.points) == 0:
plan = group_handle.plan();
count += 1;
if count > 100:
break;
if len(plan.joint_trajectory.points):
print "Executing trajectory",goal_jnt_value.bin_num;
group_handle.execute(plan);
#for point in plan.joint_trajectory.points:
# point.velocities = [0]*len(point.velocities);
# point.accelerations = [0]*len(point.accelerations);
#Save_traj(goal_jnt_value,plan);
rospy.sleep(5);
success_num += 1;
else:
print "Planning failed!";
else:
print "Joint value is empty!";
print "Success number:", success_num;
if __name__=='__main__':
try:
print ">>>> Initializing... >>>>"
moveit_commander.roscpp_initialize(sys.argv);
rospy.init_node('IK_Solution_Test', anonymous=True);
#robot = moveit_commander.RobotCommander();
scene = moveit_commander.PlanningSceneInterface();
print ">>>> Import Bin model, Generate Testing Targets >>>>"
if len(sys.argv)>1:
X_pos = float(sys.argv[1]);
Y_pos = float(sys.argv[2]);
Z_pos = float(sys.argv[3]);
else:
print "No distance assigned, using default parameters"
X_pos = 1.35;
Y_pos = 0;
Z_pos = 0;
#Goal_points = generate_goal_points(Bin_base_x = X_pos, Bin_base_y = Y_pos, Bin_base_z = Z_pos);
#print "Total", len(Goal_points), "targets need to be test";
Goal_jnt_val_set = generate_goal_joints()
print "Total", len(Goal_jnt_val_set), "traj need to be test";
bin_pose = PoseStamped();
bin_pose.pose.position.x = X_pos;
bin_pose.pose.position.y = Y_pos;
bin_pose.pose.position.z = Z_pos;
bin_pose.pose.orientation.x = 0.5;
bin_pose.pose.orientation.y = 0.5;
bin_pose.pose.orientation.z = 0.5;
bin_pose.pose.orientation.w = 0.5;
scene.attach_mesh(link = "base_link",
name = "kiva_pod",
pose = bin_pose,
filename = "Model/pod_lowres.stl");
print ">>>> Set Init Position >>>>"
arm_left_group = moveit_commander.MoveGroupCommander("arm_left");
#arm_left_group.set_planner_id("RRTstarkConfigDefault");
arm_left_group.set_planner_id("RRTConnectkConfigDefault");
#arm_left_group.set_planner_id("RRTkConfigDefault");
arm_left_group.allow_replanning(True);
arm_left_group.set_planning_time(30);
arm_right_group = moveit_commander.MoveGroupCommander("arm_right_torso");
#arm_right_group.set_planner_id("RRTstarkConfigDefault");
arm_right_group.set_planner_id("RRTConnectkConfigDefault");
#arm_right_group.set_planner_id("RRTkConfigDefault");
arm_right_group.allow_replanning(True);
arm_right_group.set_planning_time(30);
# pos_init(arm_left_group, arm_right_group);
print ">>>> Waiting for service `compute_ik` >>>>";
rospy.wait_for_service('compute_ik');
ik = rospy.ServiceProxy("compute_ik", GetPositionIK);
print ">>>> Start Testing >>>>"
#pos_test(Goal_points,arm_left_group, ik, animate_result = True)
goal_jnt_val_test(Goal_jnt_val_set,arm_right_group, animate_result = True)
print "**** Test End ****"
moveit_commander.roscpp_shutdown()
except rospy.ROSInterruptException:
pass |
import os
import os.path as op
import sys
import shutil
import argparse
import subprocess
# Directories/paths
this_dir = os.path.dirname(os.path.realpath(__file__))
template_origin_path = os.path.join(this_dir, 'xlwings_template.xltm')
if sys.platform.startswith('win'):
win_template_path = op.join(os.getenv('APPDATA'), 'Microsoft', 'Templates', 'xlwings_template.xltm')
else:
# Mac 2011 and 2016 use different directories
from appscript import k, app
from xlwings._xlmac import hfs_to_posix_path
mac_template_dirs = set((op.realpath(op.join(op.expanduser("~"), 'Library', 'Application Support', 'Microsoft',
'Office', 'User Templates', 'My Templates')),
hfs_to_posix_path(app('Microsoft Excel').properties().get(k.templates_path))))
if sys.platform.startswith('win'):
addin_path = os.path.join(os.getenv('APPDATA'), 'Microsoft', 'Excel', 'XLSTART', 'xlwings.xlam')
def addin_install(args):
if not sys.platform.startswith('win'):
print('Error: This command is only available on Windows right now.')
else:
try:
shutil.copyfile(os.path.join(this_dir, 'xlwings.xlam'), addin_path)
print('Successfully installed the xlwings add-in! Please restart Excel.')
except IOError as e:
if e.args[0] == 13:
print('Error: Failed to install the add-in: If Excel is running, quit Excel and try again.')
else:
print(str(e))
except Exception as e:
print(str(e))
def addin_remove(args):
if not sys.platform.startswith('win'):
print('Error: This command is only available on Windows right now.')
else:
try:
os.remove(addin_path)
print('Successfully removed the xlwings add-in!')
except WindowsError as e:
if e.args[0] == 32:
print('Error: Failed to remove the add-in: If Excel is running, quit Excel and try again.')
elif e.args[0] == 2:
print("Error: Could not remove the xlwings add-in. The add-in doesn't seem to be installed.")
else:
print(str(e))
except Exception as e:
print(str(e))
def addin_status(args):
if not sys.platform.startswith('win'):
print('Error: This command is only available on Windows right now.')
else:
if os.path.isfile(addin_path):
print('The add-in is installed at {}'.format(addin_path))
print('Use "xlwings addin remove" to uninstall it.')
else:
print('The add-in is not installed.')
print('"xlwings addin install" will install it at: {}'.format(addin_path))
def template_open(args):
if sys.platform.startswith('win'):
subprocess.Popen('start {0}'.format(template_origin_path), shell=True)
else:
subprocess.Popen('open {0}'.format(template_origin_path), shell=True)
def template_install(args):
if sys.platform.startswith('win'):
try:
shutil.copyfile(template_origin_path, win_template_path)
print('Successfully installed the xlwings template')
except Exception as e:
print(str(e))
else:
for dir in mac_template_dirs:
try:
if os.path.isdir(dir):
path = op.realpath(op.join(dir, 'xlwings_template.xltm'))
shutil.copyfile(template_origin_path, path)
print('Successfully installed the xlwings template to {}'.format(path))
except Exception as e:
print('Error installing template to {}. {}'.format(path, str(e)))
def template_remove(args):
if sys.platform.startswith('win'):
try:
os.remove(win_template_path)
print('Successfully removed the xlwings template!')
except WindowsError as e:
print("Error: Could not remove the xlwings template. The template doesn't seem to be installed.")
except Exception as e:
print(str(e))
else:
for dir in mac_template_dirs:
try:
if os.path.isdir(dir):
path = op.realpath(op.join(dir, 'xlwings_template.xltm'))
os.remove(path)
print('Successfully removed the xlwings template from {}'.format(path))
except OSError as e:
print("Error: Could not remove the xlwings template. "
"The template doesn't seem to be installed at {}.".format(path))
except Exception as e:
print('Error removing template from {}. {}'.format(path, str(e)))
def template_status(args):
if sys.platform.startswith('win'):
if os.path.isfile(win_template_path):
print('The template is installed at: {}'.format(win_template_path))
print ('Use "xlwings template remove" to uninstall it.')
else:
print('The template can be installed at {}'.format(win_template_path))
print('Use "xlwings template install" to install it or '
'"xlwings template open" to open it without installing.')
else:
is_installed = False
can_be_installed = False
for dir in mac_template_dirs:
path = op.realpath(op.join(dir, 'xlwings_template.xltm'))
if os.path.isfile(path):
is_installed = True
print('The template is installed at: {}'.format(path))
else:
if os.path.isdir(dir):
can_be_installed = True
print('The template can be installed at: {}'.format(dir))
if can_be_installed:
print('Use "xlwings template install" to install it or '
'"xlwings template open" to open it without installing.')
if is_installed:
print('Use "xlwings template remove" to uninstall it from all locations.')
def quickstart(args):
project_name = args.project_name
cwd = os.getcwd()
# Project dir
project_path = os.path.join(cwd, project_name)
if not os.path.exists(project_path):
os.makedirs(project_path)
else:
sys.exit('Error: Directory already exists.')
# Python file
with open(os.path.join(project_path, project_name + '.py'), 'w'):
pass
# Excel file
shutil.copyfile(os.path.join(this_dir, 'quickstart.xlsm'),
os.path.join(project_path, project_name + '.xlsm'))
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
# Add-in
addin_parser = subparsers.add_parser('addin', help='xlwings Excel Add-in')
addin_subparsers = addin_parser.add_subparsers(dest='subcommand')
addin_subparsers.required = True
addin_install_parser = addin_subparsers.add_parser('install')
addin_install_parser.set_defaults(func=addin_install)
addin_update_parser = addin_subparsers.add_parser('update')
addin_update_parser.set_defaults(func=addin_install)
addin_upgrade_parser = addin_subparsers.add_parser('upgrade')
addin_upgrade_parser.set_defaults(func=addin_install)
addin_remove_parser = addin_subparsers.add_parser('remove')
addin_remove_parser.set_defaults(func=addin_remove)
addin_uninstall_parser = addin_subparsers.add_parser('uninstall')
addin_uninstall_parser.set_defaults(func=addin_remove)
addin_status_parser = addin_subparsers.add_parser('status')
addin_status_parser.set_defaults(func=addin_status)
# Template
template_parser = subparsers.add_parser('template', help='xlwings Excel template')
template_subparsers = template_parser.add_subparsers(dest='subcommand')
template_subparsers.required = True
template_open_parser = template_subparsers.add_parser('open')
template_open_parser.set_defaults(func=template_open)
template_install_parser = template_subparsers.add_parser('install')
template_install_parser.set_defaults(func=template_install)
template_update_parser = template_subparsers.add_parser('update')
template_update_parser.set_defaults(func=template_install)
template_remove_parser = template_subparsers.add_parser('remove')
template_remove_parser.set_defaults(func=template_remove)
template_uninstall_parser = template_subparsers.add_parser('uninstall')
template_uninstall_parser.set_defaults(func=template_remove)
template_status_parser = template_subparsers.add_parser('status')
template_status_parser.set_defaults(func=template_status)
# Quickstart
quickstart_parser = subparsers.add_parser('quickstart', help='xlwings quickstart')
quickstart_parser.add_argument("project_name")
quickstart_parser.set_defaults(func=quickstart)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
#!/usr/bin/python -tt
#Derek Ruiz, csce470-500
"""
NOTES:
program scores documents by the summation of their tf-idf scores
------------------------------------------------------------------------------------------
This program returns a list of how many iterations it takes for convergence before
returning clustering results in the form:
(number of items in cluster) ITEMS IN CLUSTER <(cluster key)> ARE:
(items in cluster)
-------------------------------------------------------------------------------------------
to see a summary of the program results without seeing all the items in each
cluster comment out "print clustt[key]" in line 273
--------------------------------------------------------------------------------------------
Sources Cited: https://datasciencelab.wordpress.com/2013/12/12/clustering-with-k-means-in-python/
"""
from __future__ import division
from random import randint
from flask import Flask
from flask import render_template
from flask import request
import numpy as np
import random
import sys
import os
import re
import math
import time
#configuration (used for flask)
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
if __name__ == '__main__':
app.run()
@app.route('/',methods=['POST','GET'])
def process_form():
if request.method == 'POST':
form_input = request.form['name']
return render_template('index.html',name=form_input)
else:
return render_template('index.html')
def word_count_dict(folder_path, review_list, index):
word_count = {}
#check_q = get_query(q)
pathname = os.path.join(folder_path, review_list[index])
input_file = open(pathname, 'r')
doc = input_file.read()
#d_l = doc.split('\n')
#print "title: " + d_l[0]
#print "description: " + d_l[1]
text = doc #d_l[1]
input_file.close()
words = re.split('\W+', text)
for word in words:
word = word.lower()
if len(word) >= 3:
if word.isalpha():
#if word in check_q:
if not word in word_count:
word_count[word] = 1
else:
word_count[word] = word_count[word] + 1
return word_count
def create_tfidf(folder_path):
wcd_list = []
#check_q = get_query(q)
idf_list = []
review_list=os.listdir(folder_path)
for index in range(len(review_list)):
wcd_list.append(word_count_dict(folder_path, review_list, index))
df_dic = {}
#for word in check_q: #set df vals to zero
# df_dic[word] = 0
for dict in wcd_list: #get df values
for key in dict:
if key not in df_dic:
df_dic[key] = 1 #first document occurence
else:
df_dic[key] = df_dic[key] + 1
for dict in wcd_list:
idf_dic = {}
for key in dict:
#if key in df_dic:
idf_dic[key] = df_dic[key]
idf_list.append(idf_dic)
#get tf component
for dict in wcd_list:
for key in dict:
dict[key] = 1 + math.log10(dict[key])
for dict in idf_list: #get idf component
for key in dict:
dict[key] = math.log10(len(review_list) / dict[key])
#get tfidf values using wcd_list and idf_list
#sort dicts to get list components aligned
list_tfidf = []
for dict in idf_list: #get idf values in
tfidf_dict = {}
for key in sorted(dict):
tfidf_dict[key] = dict[key]
list_tfidf.append(tfidf_dict)
v = 0
for dict in wcd_list: # multiply tf values to the idf values
for key in sorted(dict):
list_tfidf[v][key] = list_tfidf[v][key] * dict[key]
v = v + 1
#get tfidf values
return list_tfidf
def find_norm(list_tfidf):
# norm for doc = sqrt( summation( tfidf_vals^2 ) )
use_tfd = list_tfidf
sums_list = []
fd_list = []
for dict in use_tfd:
sum_d = 0
for key in sorted(dict):
sum_d = sum_d + math.pow(dict[key] , 2)
sum_d = math.sqrt(sum_d)
sums_list.append(sum_d)
i = 0
for dict in use_tfd:
for key in sorted(dict):
if dict[key] != 0:
dict[key] = dict[key] / sums_list[i]
i = i + 1
return use_tfd #returns normalized tfidf values
def norm_q(q):
use_q = get_query(q)
sum = 0
for key in use_q:
sum = sum + math.pow(use_q[key] , 2)
norm_factor = math.sqrt(sum)
for key in use_q:
use_q[key] = use_q[key] / norm_factor
return use_q # get normalized query values
def get_count(tup): #helper for sorting
return tup[1]
def find_doc_tf_score(tfidf_dict_list, folder_path):
scores = tfidf_dict_list
#for dict in scores:
# for key in dict:
# for key2 in normed_q_list:
# if key == key2 :
# dict[key] = dict[key] * normed_q_list[key2]
score_list = []
for dict in scores:
doc_score = 0
for key in dict:
#for key2 in normed_q_list:
# if key == key2:
doc_score = doc_score + dict[key]
score_list.append(doc_score)
#relate files to scores
file_dict = {}
review_list=os.listdir(folder_path)
for index in range(len(review_list)):
#pathname = os.path.join(folder_path, review_list[index])
#input_file = open(pathname, 'r')
#doc = input_file.read()
#d_l = doc.split('\n')
#print "title: " + d_l[0]
#print "description: " + d_l[1]
title = review_list[index]#d_l[0]
#input_file.close()
file_dict[title] = score_list[index]
#sort file scores
items = sorted(file_dict.items(), key=get_count, reverse=True)
#print "The top five scoring files (highest to lowest) based on doc tfidf are: "
#num = 1
#for item in items[:5]:
# print str(num)+". ", "Title: ", item[0], " Score: ", item[1] #print results
# num = num + 1
#for item in items:
# print item[0]
# print item[1]
return items
def find_q_score(tfidf_dict_list, folder_path, normed_q_list):
scores = tfidf_dict_list
for dict in scores:
for key in dict:
for key2 in normed_q_list:
if key == key2 :
dict[key] = dict[key] * normed_q_list[key2]
score_list = []
for dict in scores:
doc_score = 0
for key in dict:
for key2 in normed_q_list:
if key == key2:
doc_score = doc_score + dict[key]
score_list.append(doc_score)
#relate files to scores
file_dict = {}
review_list=os.listdir(folder_path)
for index in range(len(review_list)):
#pathname = os.path.join(folder_path, review_list[index])
#input_file = open(pathname, 'r')
#doc = input_file.read()
#d_l = doc.split('\n')
#print "title: " + d_l[0]
#print "description: " + d_l[1]
title = review_list[index]#d_l[0]
#input_file.close()
file_dict[title] = score_list[index]
#sort file scores
items = sorted(file_dict.items(), key=get_count, reverse=True)
use_cents = []
items_cents = []
aw = "The top twenty scoring books (highest to lowest) based on your query are: "
print aw
out_file = open("output.txt", "a")
out_file.write(aw)
num = 1
for item in items[:20]:
print str(num)+". ", "Title: ", item[0], " Score: ", item[1] #print results
out_file.write("\n" + str(num)+". Title: "+ str(item[0]) + " Score: "+ str(item[1]))
num = num + 1
use_cents.append(item[0])
out_file.close()
#for item in items:
# print item[0]
# print item[1]
items_cents.append(items)
items_cents.append(use_cents)
return items_cents
def get_query(q):
q_wordcount = {}
input_query = open(q, 'r')
text = input_query.read()
input_query.close()
words = re.split('\W+', text)
for word in words:
word = word.lower()
if len(word) >= 3:
if word.isalpha():
if not word in q_wordcount:
q_wordcount[word] = 1
else:
q_wordcount[word] = q_wordcount[word] + 1
return q_wordcount
def part_1(folder_path, q):
tfidf_dict_list = find_norm(create_tfidf(folder_path))
normed_q_list = norm_q(q)
score_list_tf = find_doc_tf_score(tfidf_dict_list, folder_path)
items_cen = find_q_score(tfidf_dict_list, folder_path, normed_q_list)
score_list_q = items_cen[0]
tf_q_cents_list = []
tf_q_cents_list.append(score_list_tf)
tf_q_cents_list.append(score_list_q)
tf_q_cents_list.append(items_cen[1])
return tf_q_cents_list
def get_centss(names_list, tf_items):
cents = []
for item in tf_items:
for name in names_list:
if item[0] == name:
cents.append(item[1])
return cents
def cluster(score_list, cur):
clusters = {}
for item in score_list:
#find which centroid is closest
min_dist = 0
centroid = 0
for centroid in cur:
test_dist = abs(item[1] - centroid)
if min_dist == 0:
min_dist = test_dist
c_c = centroid
else:
if test_dist < min_dist:
min_dist = test_dist
c_c = centroid
#print c_c, item
if clusters.has_key(c_c):
clusters[c_c].append(item)
else:
clusters[c_c] = [item]
return clusters
def update_cents(clusters):
new = []
clusters_ud = {}
cluster_keys = sorted(clusters.keys())
cluster_vals = []
for key in clusters:
cluster_v = []
for item in clusters[key]:
cluster_v.append(item[1])
cluster_vals.append(cluster_v)
for list in cluster_vals:
a = np.array(list)
new.append(np.mean(a, axis = 0 ))
return new
def convergence(cur, old):
ans = cmp(old, cur)
#ans = (set([tuple(a) for a in cur_points]) == set([tuple(a) for a in old_points])
return ans
def k_means(score_list, k, cent_list):
#old = random.sample(score_list, k)
#old = cent_list
#cur = random.sample(score_list, k)
#cur = cent_list
cur_points = cent_list
#for item in cur:
# cur_points.append(item[1])
old_points = cent_list
#for item in old:
# old_points.append(item)
clusters = cluster(score_list, cur_points)
cur_points = update_cents(clusters)
here = 1
while convergence(cur_points, old_points) != 0:
print "iteration: " + str(here)
old_points = cur_points
#assign items in score_list to clusters
clusters = cluster(score_list, cur_points)
#reevaluate centroids
cur_points = update_cents(clusters)
here = here + 1
#print "OLD CENTS: " , old_points, "NEW CENTS", cur_points
if convergence(cur_points, old_points) == 0:
print "CONVERGENCE REACHED"
return clusters
@app.route('/',methods=['POST', 'GET'])
def main():
if len(sys.argv) != 2:
print 'usage: part22.py folder_path_to_books'
sys.exit(1)
folder_path = sys.argv[1]
q_file = open("query.txt", "w")
if request.method == 'POST':
user_query = request.form['query']
return render_template('index.html',query=form_input)
#user_query = raw_input("PLEASE ENTER YOUR QUERY RELATED TO AUTHOR / TITLE / BOOK CONTENT: ")
q_file.write(user_query)
q_file.close()
#time.sleep(5)
q = "query.txt"
out_file = open("output.txt", "w")
out_file.close()
tf_q_cents_list = part_1(folder_path, q)
#get random seeds
#intitialize clusters with one point
#clusters (K = 5)
out_file = open("output.txt", "a")
out_file.write("\n" + "The following selections are grouped based off the top results in respective order:")
print "The following selections are grouped based off the top results in respective order:"
cent_list = get_centss(tf_q_cents_list[2], tf_q_cents_list[0])
clustt = k_means(tf_q_cents_list[0], 20, cent_list)
place = 1
for key in clustt:
print "\n"+ str(len(clustt[key])) +" ITEMS IN GROUP FOR RESULT #"+str(place)+ " ARE: "
out_file.write("\n"+ "\n" + str(len(clustt[key])) +" ITEMS IN GROUP FOR RESULT #"+str(place)+ " ARE: ")
for item in clustt[key]:
print item[0]
out_file.write("\n" + str(item[0]))
place = place + 1
out_file.close()
# if __name__ == '__main__':
# main()
# #app.run() |
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import time
import matplotlib.pyplot as plt
import os
class Config:
def __init__(self):
self.img_shape = [28,28,1]
self.filters = 16
self.z_dim = 20
self.sample_num = 49
self.batch_size = 4096
self.buffer_size = 10000
self.lr = 1e-4
self.epochs = 500
self.log_dir = '../logs/VAE/'
self.img_save_path = '../imgs/VAE/'
self.get_gpus()
def get_gpus(self):
gpus = tf.config.list_physical_devices(device_type='GPU')
if len(gpus) > 0:
tf.config.set_visible_devices(devices=gpus[0], device_type='GPU')
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.config.experimental.set_memory_growth(device=gpus[0], enable=True)
class CVAE(tf.keras.Model):
"""
Convolutional Variational Autoencoder (VAE)
sub-class of tf.keras.Model
code modified from TF2 CVAE tutorial:
https://www.tensorflow.org/alpha/tutorials/generative/cvae
"""
def __init__(self, cfg=Config()):
super(CVAE, self).__init__()
self.cfg = cfg
self.width = cfg.img_shape[0]
self.height = cfg.img_shape[1]
self.inference_net = tf.keras.Sequential([
layers.InputLayer(input_shape=cfg.img_shape),
layers.Conv2D(cfg.filters, 3, 2, 'same', activation='relu'),
layers.Conv2D(cfg.filters*2, 3, 2, 'same', activation='relu'),
layers.Flatten(),
layers.Dense(cfg.z_dim+cfg.z_dim)
])
self.generative_net = tf.keras.Sequential([
layers.InputLayer(input_shape=(cfg.z_dim)),
layers.Dense(self.width//4*self.height//4*cfg.filters*2, activation='relu'),
layers.Reshape([self.width//4, self.height//4, cfg.filters*2]),
layers.Conv2DTranspose(cfg.filters*2, 3, 2, 'same', activation='relu'),
layers.Conv2DTranspose(cfg.filters, 3, 2, 'same', activation='relu'),
layers.Conv2DTranspose(1, 3, 1, 'same')
])
@tf.function
def sample(self, eps=None):
if eps is None:
eps = tf.random.normal(100, self.cfg.z_dim)
return self.decode(eps, apply_sigmiod=True)
def encode(self, x):
mean, logvar = tf.split(self.inference_net(x), num_or_size_splits=2, axis=1)
return mean, logvar
def decode(self, z, apply_sigmiod=False):
logits = self.generative_net(z)
if apply_sigmiod:
probs = tf.sigmoid(logits)
return probs
return logits
def reparameterize(self, mean, logvar):
eps = tf.random.normal(mean.shape)
return eps * tf.exp(logvar/2) + mean
def dataLoader(cfg=Config()):
# load data
mnist = tf.keras.datasets.mnist
(train_data, train_label), (test_data, test_label) = mnist.load_data()
train_label = tf.one_hot(train_label, depth=10).numpy()
# Normalization
train_data = np.expand_dims(train_data.astype(np.float32)/255.0, axis=-1)
test_data = np.expand_dims(test_data.astype(np.float32) / 255.0, axis=-1)
#
train_data = tf.data.Dataset.from_tensor_slices((train_data, train_label))
train_batch = train_data.shuffle(cfg.buffer_size) \
.batch(cfg.batch_size) \
.prefetch(tf.data.experimental.AUTOTUNE)
test_data = tf.data.Dataset.from_tensor_slices((test_data, test_label))
test_batch = test_data.batch(cfg.batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return train_batch, test_batch
class App:
def __init__(self, model=CVAE(), img_name='minst_cvae', cfg=Config()):
self.train_db, self.test_db = dataLoader()
self.cfg = cfg
self.img_name = img_name
self.model = model
self.optimizer = tf.keras.optimizers.Adam(lr=cfg.lr)
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.test_loss = tf.keras.metrics.Mean(name='test_loss')
self.random_vector = tf.random.normal([cfg.sample_num, cfg.z_dim])
def train(self):
summary_writer = tf.summary.create_file_writer(self.cfg.log_dir)
for epoch in tf.range(1,self.cfg.epochs+1):
start_time = time.time()
for data_batch in self.train_db:
self.train_step(data_batch)
end_time = time.time()
for test_batch in self.test_db:
x, labels = test_batch
loss = self.compute_loss(x)
self.test_loss.update_state(loss)
if epoch % 10 == 0:
print('Epoch: %d|%d, train ELBO=%f, test EBLO=%f' %(
epoch, self.cfg.epochs, -self.train_loss.result(), -self.test_loss.result()))
self.gen_plot(epoch, self.random_vector)
with summary_writer.as_default():
tf.summary.scalar('train_loss', self.train_loss.result(), step=epoch.numpy())
tf.summary.scalar('test_loss', self.test_loss.result(), step=epoch.numpy())
self.train_loss.reset_states()
self.test_loss.reset_states()
@tf.function
def train_step(self, data_batch):
with tf.GradientTape() as tape:
x, labels = data_batch
loss = self.compute_loss(x)
grads = tape.gradient(loss, model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, model.trainable_variables))
self.train_loss.update_state(loss)
@tf.function
def compute_loss(self, x):
mean, logvar = self.model.encode(x)
z = self.model.reparameterize(mean, logvar)
x_logit = self.model.decode(z)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)
cross_ent = tf.reduce_sum(tf.reduce_mean(cross_ent, axis=0))
kl_div = -0.5 * (logvar + 1 - mean**2 - tf.exp(logvar))
kl_div = tf.reduce_sum(tf.reduce_mean(kl_div, axis=0))
return cross_ent + kl_div
def log_normal_pdf(self, sample, mean, logvar, raxis=1):
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(
-0.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi), axis=raxis)
def gen_plot(self, epoch, test_input):
predictions = self.model.sample(test_input)
predictions = self.reshape(predictions)
fig= plt.figure(figsize=(5,5), constrained_layout=True, facecolor='k')
plt.title('epoch ' + str(epoch.numpy()))
plt.imshow(predictions, cmap='gray')
plt.axis('off')
plt.savefig(self.cfg.img_save_path+self.img_name+"_%04d.png" % epoch)
def reshape(self, x, cols=7):
x = tf.squeeze(x, axis=-1)
x = tf.transpose(x, (1,0,2))
x = tf.reshape(x, (28, -1, 28*cols))
x = tf.transpose(x, (1,0,2))
x = tf.reshape(x, (-1, 28*cols))
return x
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
cfg = Config()
model, img_name = CVAE(), 'minst_cvae'
# model, img_name = CVAECNN(), 'minst_cnn'
app = App(model, img_name)
app.train()
# generate gif
from gen_gif import *
minst_cnn = ['../imgs/', 'VAE/*minst_cvae*.png','minst_cvae']
gen_gif(*minst_cnn)
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# import time module
# Author: Eason
import time
local_time = time.localtime(time.time())
time = time.time()
print "=" * 24
print "累计从1970年到现在的总累计时间"
print time
print "=" * 24
print "本地时间是:", local_time
print "=" * 24
|
import hug
@hug.get()
def hello_world():
return "Hello world!"
|
from topology import *
from util import *
def make_rout_xml(T, out_f):
with open(out_f, 'w') as of:
print('<filteringDatabases>', file=of)
for i in range(T.node_n):
print('\t<filteringDatabase id="switch{}">'.format(i), file=of)
print('\t\t<static>', file=of)
print('\t\t\t<forward>', file=of)
print(T.switch_list[i].connect_device)
for j in range(len(T.switch_list[i].connect_device)):
if T.switch_list[i].output_port[j] :
dst = T.switch_list[i].connect_device[j]
print('\t\t\t\t<individualAddress macAddress=\"{}\" port=\"{}\" />'.format(T.host_list[dst].rout_addr, j), file=of)
print('\t\t\t</forward>', file=of)
print('\t\t</static>', file=of)
print('\t</filteringDatabase>', file=of)
print('</filteringDatabases>', file=of)
def make_flow_xml(T, rout, index_of_type2, out_f):
with open(out_f, 'w') as of:
print('<?xml version=\"1.0\" ?>', file=of)
print('<schedules>', file=of)
print('\t<defaultcycle>120us</defaultcycle>', file=of)
if index_of_type2 < len(rout.type2):
# print("(make_flow_xml)", rout.type2[index_of_type2])
rout_now = rout.type1 + (rout.type2[index_of_type2])
for stream in rout_now:
# print(T.host_list[int(stream.src)].name, ", ", T.host_list[int(stream.dst)].name)
print('\t<host name=\"{}\">'.format(T.host_list[int(stream.src)].name), file=of)
print('\t\t<cycle>10us</cycle>', file=of)
print('\t\t<entry>', file=of)
print('\t\t\t<start>0us</start>', file=of)
print('\t\t\t<queue>7</queue>', file=of)
print('\t\t\t<dest>{}</dest>'.format(T.host_list[int(stream.dst)].sche_addr), file=of)
print('\t\t\t<size>{}B</size>'.format(int(float(stream.util)*1250)), file=of)
print('\t\t\t<flowId>1</flowId>', file=of)
print('\t\t</entry>', file=of)
print('\t</host>', file=of)
# print(stream.src, " ", stream.dst)
print('</schedules>', file=of)
if __name__ == "__main__":
T, _ = parse_topology_file("./5.in")
make_rout_xml(T, "./test_rout.xml") |
#!usr/bin/env python
import sys
"""def hello():
print "Hello, World!"
"""
def usage():
print >> sys.stderr, "Usage python %s <filename>" % (sys.argv[0])
def main():
#print "Program arguments are: ", sys.argv
#print "No of arg is: ", len(sys.argv)
if len(sys.argv) != 2:
usage()
sys.exit(1)
try:
fp = open(sys.argv[1])
except IOError, e:
print >> sys.stderr, "Argument is not a valid name"
sys.exit(2)
#print len(list(fp))
#print "No of lines in file", len(fp.readlines())
min = 11
max = -1
maxname = ""
maxfstname = ""
minname = ""
minfstname = ""
for idx, line in enumerate(list(fp)):
unit = line.split("\t")
grade = int(unit[3])
# print unit
# print "grade = ", grade
if min == grade:
minperson.append(' '.join([unit[0], unit[1]]))
if min > grade:
minname = unit[0]
minfstname = unit[1]
min = grade
minperson = [' '.join([unit[0], unit[1]])]
if max == grade:
maxperson.append(' '.join([unit[0], unit[1]]))
if max < grade:
maxname = unit[0]
maxfstname = unit[1]
max = grade
maxperson = [' '.join([unit[0], unit[1]])]
print "Cel cu nota cea mai mare este: ", maxperson
print "Cel cu nota cea mai mica este: ", minperson
if __name__ == "__main__":
sys.exit(main())
|
#coding: utf-8
#生成器,输出杨辉三角形
def triangle(n) :
b = [1]
yield(b)
t = 1
while t < n :
b = [1] + [ b[i] + b[i+1] for i in range(len(b)-1)] + [1]
t += 1
yield(b)
n = input()
for t in triangle(n) :
print t
|
#!/usr/bin/env python3
import os
# Specify the locations of the Monte Carlo simulations and data directories here.
# Edit these variables to point to the right place for you.
data_dir = "/Users/thomasedwards/Dropbox/Work/DM/Indirect/AMCs/axion-miniclusters/Andromeda_data/"
montecarlo_dir = "/Users/thomasedwards/Dropbox/Work/DM/Indirect/AMCs/axion-miniclusters/AMC_montecarlo_data/"
# if os.environ["HOME"] == "/home/kavanagh":
# data_dir = "/home/kavanagh/AMC/data/"
# montecarlo_dir = "/home/kavanagh/AMC/AMC_montecarlo_data/"
|
from flask import Flask ,jsonify,request,render_template
from flask_restful import Api , Resource
import numpy as np
import torch
import json
from sentence_transformers import SentenceTransformer
import torch
import json
import sentencepiece
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
app =Flask(__name__)
api = Api(app)
import gensim.downloader as gd
path = gd.load("word2vec-google-news-300", return_path=True)
model_path = path
from gensim.models.keyedvectors import KeyedVectors
w2v_model = KeyedVectors.load_word2vec_format(model_path, binary=True)
stopwords_path="D:/Accioibis_stuff/stopwords.txt"
with open(stopwords_path, 'r') as fh:
stopwords = fh.read().split(",")
model = T5ForConditionalGeneration.from_pretrained('t5-small')
tokenizer = T5Tokenizer.from_pretrained('t5-small')
import re
import pandas as pd
from docx import Document
import warnings
warnings.filterwarnings("ignore")
def countX(lst, x):
return lst.count(x)
from scipy import spatial
def _cosine_sim(vecA, vecB):
"""Find the cosine similarity distance between two vectors."""
try:
c_sim = 1-spatial.distance.cosine(vecA,vecB)
except:
c_sim=0
return c_sim
class DocSim:
def __init__(self, w2v_model, stopwords=None):
self.w2v_model = w2v_model
self.stopwords = stopwords if stopwords is not None else []
def vectorize(self, doc: str) -> np.ndarray:
"""
Identify the vector values for each word in the given document
:param doc:
:return:
"""
doc = doc.lower()
words = [w for w in doc.split(" ") if w not in self.stopwords]
word_vecs = []
for word in words:
try:
vec = self.w2v_model[word]
word_vecs.append(vec)
except KeyError:
# Ignore, if the word doesn't exist in the vocabulary
pass
# Assuming that document vector is the mean of all the word vectors
# PS: There are other & better ways to do it.
vector = np.mean(word_vecs, axis=0)
return vector
def _cosine_sim(self, vecA, vecB):
"""Find the cosine similarity distance between two vectors."""
csim = np.dot(vecA, vecB) / (np.linalg.norm(vecA) * np.linalg.norm(vecB))
if np.isnan(np.sum(csim)):
return 0
return csim
def calculate_similarity(self, source_doc, target_docs=None, threshold=0):
"""Calculates & returns similarity scores between given source document & all
the target documents."""
if not target_docs:
return []
if isinstance(target_docs, str):
target_docs = [target_docs]
source_vec = self.vectorize(source_doc)
results = []
for doc in target_docs:
target_vec = self.vectorize(doc)
sim_score = self._cosine_sim(source_vec, target_vec)
if sim_score > threshold:
results.append( sim_score)
# Sort results by score in desc order
#results.sort(key=lambda k: k["score"], reverse=True)
return results
ds = DocSim(w2v_model,stopwords=stopwords)
@app.route('/')
def my_form():
return render_template("input_form.html")
@app.route('/', methods=['POST'])
def Get_grades_org():
paragraph = request.form["essay"]
prob = request.form["question"]
paar=paragraph.replace("\n\n","\n")
para=paar.split("\n")
while '' in para:para.remove('')
#para.remove('')
#para.remove(' ')
#except Exception:
#abc=len(para)
abc=len(para)
#print(para)
#print(abc)
#print(prob)
lines=[]
lines_res=[]
for j in range(0,abc):
z=para[j]
y=z.split(".")
ee=y[0]
#xe=y[1].join(y[:])
lines.append(ee)
for e in y:
s=1
le=len(y)
att=''.join(y[1:le])
lines_res.append(att)
#print(lines_res)
summarys=[]
for i in para:
text=i
preprocess_text = text.strip().replace("\n","")
t5_prepared_Text = "summarize: "+preprocess_text
#print ("original text preprocessed: \n", preprocess_text)
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors="pt")
# summmarize
summary_ids = model.generate(tokenized_text,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=100,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
summarys.append(output)
texts=prob
preprocess_texts = texts.strip().replace("\n","")
t5_prepared_Texts = "summarize: "+preprocess_texts
#print ("original text preprocessed: \n", preprocess_text)
tokenized_texts = tokenizer.encode(t5_prepared_Texts, return_tensors="pt")
# summmarize
summary_idss = model.generate(tokenized_texts,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=100,
early_stopping=True)
outputs = tokenizer.decode(summary_idss[0], skip_special_tokens=True)
#print(outputs)
#first line of para1
texts_first=lines[0]
preprocess_texts_first = texts_first.strip().replace("\n","")
t5_prepared_Texts_first = "summarize: "+preprocess_texts_first
#print ("original text preprocessed: \n", preprocess_text)
tokenized_texts_first = tokenizer.encode(t5_prepared_Texts_first, return_tensors="pt")
# summmarize
summary_idsss = model.generate(tokenized_texts_first,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=100,
early_stopping=True)
outputs_firstline = tokenizer.decode(summary_idsss[0], skip_special_tokens=True)
#print(outputs_firstline)
#remaining line of para1
texts_rest=lines_res[0]
preprocess_texts_rest = texts_rest.strip().replace("\n","")
t5_prepared_Texts_rest = "summarize: "+preprocess_texts_rest
#print ("original text preprocessed: \n", preprocess_text)
tokenized_texts_rest = tokenizer.encode(t5_prepared_Texts_rest, return_tensors="pt")
# summmarize
summary_idssss = model.generate(tokenized_texts_rest,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=100,
early_stopping=True)
outputs_rest = tokenizer.decode(summary_idssss[0], skip_special_tokens=True)
#print(lines_res[0])
#print(outputs_rest)
lines_sum=[]
lines_res_sum=[]
for c in range(0,abc):
w=summarys[c]
yy=w.split(".")
eee=yy[0]
#xe=y[1].join(y[:])
lines_sum.append(eee)
for es in yy:
ss=1
lee=len(yy)
attt=''.join(yy[1:lee])
lines_res_sum.append(attt)
#print(summarys[0])
# print(lines_sum)
#print(lines_res_sum)
#while '' in lines_sum:lines_sum.remove('')
#while '' in lines_res_sum:lines_res_sum.remove('')
sim_scores1=ds.calculate_similarity(outputs, outputs_firstline)
sim_scores2=ds.calculate_similarity(outputs, outputs_rest)
try:
sim_scores3=ds.calculate_similarity(outputs, summarys[1])
except Exception as e:
sim_scores3=0
try:
sim_scores4=ds.calculate_similarity(outputs, summarys[2])
except Exception as e:
sim_scores4=0
try:
sim_scores5=ds.calculate_similarity(summarys[1], summarys[2])
except Exception as e:
sim_scores5=0
##linking words
document1 = Document('D:/Accioibis_stuff/link.docx')
link=[]
hello=['','\xa0']
for paras in document1.paragraphs:
if paras.text not in hello:
#if paras.text != '':
link.append(paras.text)
length=len(para)
words=[]
parass=[]
for ss in range(0,length):
essay=para[ss]
for j in link:
if re.search(j,essay,re.IGNORECASE):
words.append(j)
parass.append(ss)
#valu=Counter(parass)
coo=[]
for co in range(0,length):
coo.append(countX(parass,co))
a_list=zip(words,parass)
final_list=list(a_list)
df=pd.DataFrame()
df['Words']=words
df['Para_number']=parass
tot_num=df.Para_number.count()
#results1=[]
#results2=[]
try:
results1=[]
for me in coo:
#res_avg=(me/tot_num)
res1=(me/tot_num)**2
results1.append(res1)
#results2.append(res_avg)
total=sum(results1)
total_avg= tot_num/(length)
except Exception as e:
total=0
total_avg=0
##reference words
document2 = Document('D:/Djangoprojs/reference.docx')
reference=[]
hello=['','\xa0']
for paras in document2.paragraphs:
if paras.text not in hello:
#if paras.text != '':
reference.append(paras.text)
length=len(para)
wordsr=[]
parassr=[]
for ssr in range(0,length):
essayr=para[ssr]
for jr in reference:
if re.search(jr,essayr,re.IGNORECASE):
wordsr.append(jr)
parassr.append(ssr)
#valu=Counter(parass)
coor=[]
for cor in range(0,length):
coor.append(countX(parassr,cor))
a_listr=zip(wordsr,parassr)
final_listr=list(a_listr)
dfr=pd.DataFrame()
dfr['Words']=wordsr
dfr['Para_number']=parassr
tot_numr=dfr.Para_number.count()
try:
results2=[]
for mer in coor:
res2=(mer/tot_numr)**2
results2.append(res2)
totalr=sum(results2)
avg_ref= tot_numr/(length)
countr=dfr.groupby(['Para_number']).size().reset_index(name='counts')
except Exception as e:
totalr=0
countr=0
#conclusion
document3 = Document('D:/Djangoprojs/conclusion.docx')
concl=[]
hello=['','\xa0']
for paras in document3.paragraphs:
if paras.text not in hello:
#if paras.text != '':
concl.append(paras.text)
try:
sim_scores6 = ds.calculate_similarity(summarys[abc-1],outputs_rest)
except Exception as e:
sim_scores6=0
#length=len(para)
wordsc=[]
parassc=[]
essayc=para[-1]
#no=essayc.split()
#jj=len(no)
try:
for jc in concl:
if re.search(jc,essayc,re.IGNORECASE):
wordsc.append(jc)
#parassc.append(ssc)
#print(wordsc)
no_concl=len(wordsc)
except Exception as e:
no_concl=0
#print(para[0])
#print(lines[0])
#print(lines_res[0])
#print(outputs_firstline)
#print(outputs_rest)
#return exe
results=[abc,str(sim_scores1),str(sim_scores2),str(sim_scores3),str(sim_scores4),str(sim_scores5),str(no_concl),str(sim_scores6),str(total_avg),str(total),str(avg_ref),str(totalr)]
#results=[abc,a1,b1,c1,d1,e1,str(no_concl),f1,str(total_avg),str(total),str(avg_ref),str(totalr)]
output = f'len = {str(abc)} , sim_scores1 = {str(sim_scores1)} , sim_scores2 = {str(sim_scores2)} , sim_scores3 = {str(sim_scores3)} , sim_scores4 = {str(sim_scores4)} , sim_scores5 = {str(sim_scores5)} , sim_scores6 = {str(sim_scores6)} , total_avg , total , avg_ref , totalr = {str(total_avg)} , {str(total)} , {str(avg_ref)} , {str(totalr)} '
return output
if __name__ == "__main__":
app.run()
|
import numpy as np
from sklearn.datasets import load_diabetes
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score, r2_score
# from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
# 1. 데이터
dataset = load_diabetes()
x = dataset.data
y = dataset.target
print(x.shape, y.shape) # (442, 10) (442,)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, shuffle=True, random_state=45)
kfold = KFold(n_splits=5, shuffle=True)
# 2. 모델
for i in [LinearRegression, KNeighborsRegressor, DecisionTreeRegressor, RandomForestRegressor]:
print()
model = i()
# 훈련
scores = cross_val_score(model, x_train, y_train, cv=kfold)
print(i.__name__ + '\'s score(R2) :', scores)
'''
LinearRegression's score(R2) : [0.43927814 0.55264136 0.36280019 0.50007606 0.35012461]
KNeighborsRegressor's score(R2) : [0.47517088 0.46357035 0.41234169 0.30249363 0.23373401]
DecisionTreeRegressor's score(R2) : [-0.41724222 0.20656622 -0.00241595 -0.12564012 -0.04489954]
RandomForestRegressor's score(R2) : [0.3627855 0.37858172 0.37526086 0.48824071 0.47607181]
''' |
# -*- coding: utf-8 -*-
import telebot
import os
import requests
import time
import random
from yobit import get_btc
from yobit import get_money
from telebot import types
from flask import Flask, request
from flask_sslify import SSLify
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token, threaded=False)
bot.remove_webhook()
time.sleep(1)
bot.set_webhook(url='https://telegram-webhook.herokuapp.com' + '/' + token)
app = Flask(__name__)
sslify = SSLify(app)
@app.route('/')
def base():
return 'bot'
@app.route('/' + token, methods=["POST"])
def webhook():
bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode("utf-8"))])
return "ok", 200
@bot.message_handler(commands=['help'])
def helpcommand(message):
bot.send_message(message.chat.id, 'Привет *' + message.from_user.first_name + '*!', parse_mode='Markdown')
@bot.message_handler(commands=['money'])
def moneyCommand(message):
try:
text = message.text
texts = text.split(' ')
# texts[0] -> /money
# texts[1] -> asdf
# texts[2] -> 1234
# do something you want to do.
bot.send_message(message.chat.id, 'Привет *' + message.from_user.first_name + '*, на текущий момент курс ' + '*' + texts[1] + ' *' + 'составляет: ' + '*' + get_money(texts[2], texts[1]) + '*', parse_mode='Markdown')
except Exception as e:
logger.error(e.message)
if __name__ == '__main__':
app.run()
|
# -*- coding: utf-8 -*-
import numpy as np
import torch
from torch import Tensor
import matplotlib.pyplot as plt
################ Generate data ################
def generate_disc_set(nb):
"""Generate dataset
INPUT
nb: number of points to generate
OUTPUT:
data
labels with one hot encoding
"""
# Create nb samples between 0 and 1
data = Tensor(nb, 2).uniform_(0, 1)
# Points inside the circle with radius 1/2 centered in 0 have label 1, otherwise 0
label = ((data - .5) ** 2).sum(1) <= 1 / (2 * np.pi)
return data, convert_to_one_hot_labels(data, label.long())
def convert_to_one_hot_labels(input_, target):
"""Convert labels to one-hot encoding
Function taken from the course prologue
"""
tmp = input_.new(target.size(0), max(0, target.max()) + 1).fill_(0)
tmp.scatter_(1, target.view(-1, 1), 1.0)
return tmp.long()
################ Auxiliary functions ################
def plot_points(input_, target, pred=None, alpha=.5, highlight_errors=True,
errors_color="red", title=None):
"""Scatter plot of the classes in dataset
INPUT
input_: data
target: labels
pred: estimated classes of datapoints
alpha: transparency of points in plot
highlight_errors: plot the errors in prediction
errors_color: color for the errors in prediction
"""
plt.figure()
if title is not None:
plt.title(title)
# Cannot plot errors without prediction
if highlight_errors:
assert pred is not None
# Samples from each class
input_0 = input_[target[:,0].nonzero(),:].view(-1,2)
input_1 = input_[target[:,1].nonzero(),:].view(-1,2)
# Plot
plt.scatter(input_0[:,0], input_0[:,1], c="gray", alpha=1, label = "Class 0")
plt.scatter(input_1[:,0], input_1[:,1], c="lightgray", alpha=1, label = "Class 1")
# Show errors
if highlight_errors:
# Indexes of incorrect labeled points
idx = (pred != target[:,0]).nonzero()
# Plot if there are errors
if len(idx.shape):
errors = input_[idx,:].view(-1,2)
plt.scatter(errors[:,0], errors[:,1], c=errors_color, alpha=alpha, label="Errors")
plt.legend()
plt.show()
def compute_labels(predicted):
"""Compute the labels of the prediction
INPUT:
predicted: prediction with one-hot encoding
OUTPUT
predicted labels with one hot encoding
"""
# Class with biggest probability is the predicted
res = torch.max(predicted, 1, keepdim=False, out=None)[1]
# Convert to one hot labels
lbl = convert_to_one_hot_labels(Tensor(), res)
return lbl
def train_test_split(X,y,split=0.2):
'''
Divide data in train and test sets
INPUT:
X - train input
y - train target
split - proportion of test samples
OUTPUT:
trainX, testX, trainY, testY
'''
#Permute
X = X[torch.randperm(len(X))]
y = y[torch.randperm(len(y))]
#Number of test samples
test_samples=int(split*(len(X)))
testX, testY = X[:test_samples],y[:test_samples]
trainX, trainY = X[test_samples:],y[test_samples:]
return trainX,testX,trainY,testY
################ Training and testing functions ################
def test(model, loss, test_input, test_target, verbose = True):
"""Test the model
INPUT
model
loss: loss function
test_input: input
test_target: labels
verbose: if True, write accuracy
OUTPUT
accuracy
"""
model.train = False
# Get output and loss
output = model.forward(test_input)
L = loss.forward(output, test_target)
# Get predicted labels
labels = compute_labels(output)[:,0]
# Compute accuracy
errors = (test_target[:,0] != labels).sum()
accuracy = (len(test_target) - errors) / len(test_target)
if verbose:
print(" >>> Test: Loss {:.08f} Accuracy {:.02f} Errors {}".format(
L, accuracy, errors))
return accuracy, labels
def predict(model, input_):
"""Get prediction
INPUT
model
input_
OUTPUT
output of the model given the input
"""
# Get output
output = model.forward(input_)
# Get predicted labels
labels = compute_labels(output)[:,0]
return labels
def train(optimizer, model, loss, n_epochs, mini_batch_size,
train_input, train_target, test_input, test_target, verbose=True):
"""Train the model without early stopping
INPUT
optimizer
model
loss: loss function
n_epochs: number of epoch to run
mini_batch_size
train_input: input
train_target: labels
verbose: if True, write accuracy and loss per epoch
OUTPUT
accuracy per epoch and final
"""
output_vals = Tensor()
max_range = None
# If mini_batch_size is a multiple of the number of samples, use them all
if train_input.size(0) % mini_batch_size == 0:
max_range = train_input.size(0)
# If not, last samples are not used
else:
max_range = train_input.size(0) - mini_batch_size
acc_train = []
acc_test = []
#Iterate through epochs
for e in range(n_epochs):
model.train = True
# Variables for loss, number of errors and prediction
L_tot = 0
errors_tot = 0
pred_acc = Tensor().long()
#Iterate through minibatches
for b in range(0, max_range, mini_batch_size):
#Input data
d = train_input.narrow(0, b, mini_batch_size)
#Labels
l = train_target.narrow(0, b, mini_batch_size)
# Forward pass
output = model.forward(d)
L = loss.forward(output, l)
# Backward pass
grad = loss.backward()
model.backward(grad)
#Step
optimizer.step(model, loss)
# Compute total loss
L_tot += L
# Compute metrics
r = compute_labels(output)[:,0]
pred_acc = torch.cat([pred_acc, r])
errors = (l[:,0] != r).sum()
errors_tot += errors
# Total accuracy
accuracy = (len(train_target) - errors_tot) / len(train_target)
if verbose:
print("Train: Epoch {:d} Loss {:.08f} Accuracy {:.02f} Errors {}".format(
e, L_tot, accuracy, errors_tot))
acc_test.append(test(model, loss, test_input, test_target, verbose = verbose)[0])
acc_train.append(accuracy)
return accuracy, pred_acc, acc_train, acc_test
|
import os
import sys
import pdb
# You might need to run this in the query images folder:
# sips -r -90 *.JPG && sips -r 90 *.JPG or sips -r 270 *.JPG
build_retrieval_database = sys.argv[1]
create_correspondences = sys.argv[2]
run_direct_matching_3D_points_feature_builder = sys.argv[3]
query_image_arg = sys.argv[4] # i.e IMG_7932.JPG
data_dir = sys.argv[5] # i.e data/coop3
intrinsics_matrix_path = sys.argv[6]
extract_3D_points_from_sparse = sys.argv[7]
benchmarking = sys.argv[8] # use this if you are testing an images already in the dataset
query_image_arg_no_ext = query_image_arg.split(".")[0]
os.system("mkdir results/")
os.system("mkdir results/"+query_image_arg_no_ext)
if(build_retrieval_database == '1'):
print "Creating retrieval database"
print "python2.7 image_retrieval_database_creation.py "+data_dir+"/model_images"
os.system("rm -rf "+data_dir+"/model_images_vocab_out")
os.system("python2.7 image_retrieval_database_creation.py "+data_dir+"/model_images")
if(create_correspondences == '1'):
print "Creating correspondences"
print "python2.7 correspondences_builder.py "+data_dir+"/model_images_database "+data_dir+"/points_correspondences"
os.system("python2.7 correspondences_builder.py "+data_dir+"/model_images_database "+data_dir+"/points_correspondences")
if(run_direct_matching_3D_points_feature_builder == '1'):
print "Creating Direct Matching 3D features matches"
print "python2.7 direct_matching_3D_points_feature_builder.py "+data_dir
os.system("python2.7 direct_matching_3D_points_feature_builder.py "+data_dir)
print "Running script 1/4"
print "python2.7 image_retrieval_query_image.py "+data_dir+"/query_images/"+query_image_arg
os.system("python2.7 image_retrieval_query_image.py "+data_dir+"/query_images/"+query_image_arg)
print "Running script 2/4"
print "python2.7 query_image_feature_extraction.py "+data_dir+" "+query_image_arg
os.system("python2.7 query_image_feature_extraction.py "+data_dir+" "+query_image_arg)
print "Running script 3/4"
print "python2.7 query_matcher_improved_ransac.py "+data_dir+" "+query_image_arg+" "+benchmarking + " " + intrinsics_matrix_path
os.system("python2.7 query_matcher_improved_ransac.py "+data_dir+" "+query_image_arg+" "+benchmarking + " " + intrinsics_matrix_path)
print "Running script 4/4"
print "python2.7 visualizer.py "+data_dir+"/query_images/"+query_image_arg + " " + benchmarking
os.system("python2.7 visualizer.py "+data_dir+"/query_images/"+query_image_arg + " " + benchmarking)
if(extract_3D_points_from_sparse == "1"):
print "Extracting 3D points from sparse model folder.."
print "python2.7 model_points3D_extractor.py "+data_dir
os.system("python2.7 model_points3D_extractor.py "+data_dir)
|
#/usr/bin python
from findblobsXGC import findblobsXGC
from trackblobsXGC import trackblobsXGC
import adios as ad
import numpy as np
from matplotlib.tri import Triangulation,LinearTriInterpolator
from IPython.parallel import Client
rc = Client()
dview = rc[:]
with dview.sync_imports(): #these required by findblobsXGC
import matplotlib.pyplot as plt
import numpy as np
from findblobsXGC import findblobsXGC
#get data from f3d
fileDir = '/ccs/home/rchurchi/scratch/ti252_ITER_new_profile/'
#mesh
fm = ad.file(fileDir + 'xgc.mesh.bp')
RZ = fm['/coordinates/values'][...]
tri = fm['nd_connect_list'][...]
psi = fm['psi'][...]
psi_x= 11.10093394162000
psin = psi/psi_x
eq_x_z= -3.442893939000000
fm.close()
spaceinds = (psin>0.95) & (psin<1.05) & ( (RZ[:,1]>=eq_x_z) | (psin>=1) )
tmp=spaceinds[tri] #rzspaceinds T/F array, same size as R
goodTri=np.all(tmp,axis=1) #only use triangles who have all vertices in rzInds
tri=tri[goodTri,:]
#remap indices in triangulation
indices=np.where(spaceinds)[0]
#for i in range(len(indices)):
# tri[tri==indices[i]]=i
imap = np.empty((indices.max()+1))
imap[indices] = np.arange(0,indices.size)
triGrid = imap[tri]
Rgrid = RZ[spaceinds,0]
Zgrid = RZ[spaceinds,1]
psinGrid = psin[spaceinds]
print 'Mesh loaded'
#bfield
fb = ad.file(fileDir + 'xgc.bfield.bp')
bfield = fb['/node_data[0]/values'][spaceinds,:]
fb.close()
#tindex
f1d = ad.file(fileDir+'xgc.oneddiag.bp')
time = np.unique(f1d['time'][:])[50:]
tindex = np.unique(f1d['tindex'][:])[50:] #remove first 50
f1d.close()
Ntimes = tindex.size
Nplanes = 32
triObj = Triangulation(Rgrid,Zgrid,triGrid)
fBR = LinearTriInterpolator(triObj,bfield[:,0])
fBZ = LinearTriInterpolator(triObj,bfield[:,1])
#put the required things into the parallel workers
#dview.push(dict(Rgrid=Rgrid,Zgrid=Zgrid,triGrid=triGrid))
## find blobs in each plane, time
#blobInds = np.empty((Nplanes,Ntimes),dtype=object)
#blobPaths = np.empty((Nplanes,Ntimes),dtype=object)
#blobParams = np.empty((Nplanes,Ntimes),dtype=object)
#holeInds = np.empty((Nplanes,Ntimes),dtype=object)
#holePaths = np.empty((Nplanes,Ntimes),dtype=object)
#holeParams = np.empty((Nplanes,Ntimes),dtype=object)
#for (it,t) in enumerate(tindex):
# print 'Starting time ind '+str(t)
# try:
# f3d = ad.file(fileDir+'xgc.f3d.'+str(t).zfill(5)+'.bp')
# except Exception as e:
# print e
# continue
# ne = f3d['e_den'][spaceinds,:]
# f3d.close()
#
# ne0 = np.mean(ne,axis=1)
# data = ne/ne0[:,np.newaxis]
# #out = dview.map_sync(lambda d: findblobsXGC(Rgrid,Zgrid,triGrid,d,blobHt=1.02,holeHt=0.98),np.rollaxis(data,-1))
# out = dview.map_sync(lambda d: findblobsXGC(Rgrid,Zgrid,triGrid,d),np.rollaxis(data,-1))
# out = np.array(out)
# blobInds[:,it] = out[:,0]
# blobPaths[:,it] = out[:,1]
# blobParams[:,it] = out[:,2]
# holeInds[:,it] = out[:,3]
# holePaths[:,it] = out[:,4]
# holeParams[:,it] = out[:,5]
# # for p in range(Nplanes):
# # data = neOverne0[:,p,t]
# # blobInds[p,t],blobPaths[p,t],blobParams[p,t],\
# # holeInds[p,t],holePaths[p,t],holeParams[p,t] = findblobsXGC(Rgrid,Zgrid,triGrid,data)
#
#np.savez('trackBlobs_example_preTracking.npz',Rgrid=Rgrid,Zgrid=Zgrid,triGrid=triGrid,psinGrid=psinGrid,spaceinds=spaceinds,\
# tindex=tindex,\
# blobInds=blobInds,blobPaths=blobPaths,blobParams=blobParams,\
# holeInds=holeInds,holePaths=holePaths,holeParams=holeParams)
f = np.load('trackBlobs_example_preTracking.npz')
blobParams = f['blobParams']
holeParams = f['holeParams']
## populate the thetaHat unit vector
for t in range(Ntimes):
for p in range(Nplanes):
if blobParams[p,t] is not None:
BR = fBR(blobParams[p,t]['R0'],blobParams[p,t]['Z0'])
BZ = fBZ(blobParams[p,t]['R0'],blobParams[p,t]['Z0'])
Bpol = np.vstack((BR,BZ)).T
blobParams[p,t]['thetaHat'] = Bpol / np.sqrt(np.sum(Bpol**2.,axis=1)[:,np.newaxis])
dview.push(dict(time=time,blobParams=blobParams,holeParams=holeParams))
with dview.sync_imports(): #these required by findblobsXGC
from trackblobsXGC import trackblobsXGC
out = dview.map_sync(lambda d: trackblobsXGC(time,d,isTwoSpeed=True),blobParams)
blobParams = np.array(blobParams)
out = dview.map_sync(lambda d: trackblobsXGC(time,d,isTwoSpeed=True),holeParams)
holeParams = np.array(holeParams)
#for p in range(Nplanes):
# blobParams[p,:] = trackblobsXGC(time,blobParams[p,:],isTwoSpeed=True)
# holeParams[p,:] = trackblobsXGC(time,holeParams[p,:],isTwoSpeed=True)
np.savez('trackBlobs_example_finalParams.npz',blobParams=blobParams,holeParams=holeParams)
|
import torch
from torch import nn
from einops import rearrange, repeat
##################################
# Linformer
##################################
def get_EF(input_size, dim, method="learnable", head_dim=None, bias=True):
"""
Retuns the E or F matrix, initialized via xavier initialization.
This is the recommended way to do it according to the authors of the paper.
Includes a method for convolution, as well as a method for no additional params.
"""
assert method == "learnable" or method == "convolution" or method == "no_params", "The method flag needs to be either 'learnable', 'convolution', or 'no_params'!"
if method == "convolution":
conv = nn.Conv1d(head_dim, head_dim, kernel_size=int(input_size/dim), stride=int(input_size/dim))
return conv
if method == "no_params":
mat = torch.zeros((input_size, dim))
torch.nn.init.normal_(mat, mean=0.0, std=1/dim)
return mat
lin = nn.Linear(input_size, dim, bias)
torch.nn.init.xavier_normal_(lin.weight)
return lin
class linformerAttention(nn.Module):
def __init__(
self,
dim,
dropout,
input_size,
dim_k = 20, # Probably 20? Maybe the dimantion we want K and V be
full_attention = False, # If False it will use linformer implementation
parameter_sharing = None, # The `parameter_sharing` flag has to be either 'none', 'headwise', 'kv', or 'layerwise'."
):
super().__init__()
self.dim = dim
self.dropout = nn.Dropout(dropout)
self.dim_k = dim_k
self.full_attention = full_attention
self.input_size = input_size
self.print_dim = False
self.E = get_EF(input_size, dim = self.dim_k, method = "learnable", head_dim = self.dim)
self.F = get_EF(input_size, dim = self.dim_k, method = "learnable", head_dim = self.dim) if parameter_sharing == "none" or parameter_sharing == "headwise" else self.E
self.is_proj_tensor = isinstance(self.E, torch.Tensor)
def forward(self, q, k, v):
if self.print_dim:
print("matmul(k, e)")
print("k:"+str(k.shape))
print("E:"+str(self.input_size)+", "+str(self.dim_k))
if not self.full_attention:
if self.is_proj_tensor:
# Always go to else
self.E = self.E.to(k.device)
#k = torch.matmul(k, self.E)
b, h, *_ = q.shape
projection_E = repeat(self.E, 'j d -> b h j d', b = b, h = h)
k = torch.einsum('...di,...dj->...ij', k, projection_E)
else:
k = torch.einsum('...ij->...ji', k)
k = self.E(k)
if self.print_dim:
print("matmul(q, k)")
print("q:"+str(q.shape))
print("K:"+str(k.shape))
q = torch.einsum('...id,...dj->...ij', q, k)
P_bar = q/torch.sqrt(torch.tensor(self.dim_k).type(q.type())).to(q.device)
P_bar = P_bar.softmax(dim=-1)
P_bar = self.dropout(P_bar)
if not self.full_attention:
if self.is_proj_tensor:
# WRONG!
self.F = self.F.to(v.device)
v = torch.matmul(v, self.F)
else:
v = torch.einsum('...ij->...ji', v)
v = self.F(v)
out = torch.einsum('...id,...jd->...ij', P_bar, v)
return out
|
import copy, os, sys
from RootTools.core.Sample import Sample
import ROOT
# Logging
import logging
logger = logging.getLogger(__name__)
from TopEFT.samples.color import color
# Data directory
try:
data_directory = sys.modules['__main__'].data_directory
except:
#from TopEFT.Tools.user import data_directory as user_data_directory
data_directory = '/afs/hephy.at/data/rschoefbeck01/cmgTuples/'
# Take post processing directory if defined in main module
try:
import sys
postProcessing_directory = sys.modules['__main__'].postProcessing_directory
except:
postProcessing_directory = "TopEFT_PP_v14/dilep/"
logger.info("Loading MC samples from directory %s", os.path.join(data_directory, postProcessing_directory))
dirs = {}
dirs['ttGamma0j_ll'] = ['ttGamma0j_ll']
dirs['ttGamma0j_ll_DAG_0p176700_DVG_0p176700'] = ['ttGamma0j_ll_DAG_0p176700_DVG_0p176700']
dirs['ttGamma0j_ll_DAG_0p176700_DVG_m0p176700'] = ['ttGamma0j_ll_DAG_0p176700_DVG_m0p176700']
dirs['ttGamma0j_ll_DAG_0p250000'] = ['ttGamma0j_ll_DAG_0p250000']
dirs['ttGamma0j_ll_DAG_0p500000'] = ['ttGamma0j_ll_DAG_0p500000']
dirs['ttGamma0j_ll_DAG_m0p176700_DVG_0p176700'] = ['ttGamma0j_ll_DAG_m0p176700_DVG_0p176700']
dirs['ttGamma0j_ll_DAG_m0p176700_DVG_m0p176700']= ['ttGamma0j_ll_DAG_m0p176700_DVG_m0p176700']
dirs['ttGamma0j_ll_DAG_m0p250000'] = ['ttGamma0j_ll_DAG_m0p250000']
dirs['ttGamma0j_ll_DAG_m0p500000'] = ['ttGamma0j_ll_DAG_m0p500000']
dirs['ttGamma0j_ll_DVG_0p250000'] = ['ttGamma0j_ll_DVG_0p250000']
dirs['ttGamma0j_ll_DVG_0p500000'] = ['ttGamma0j_ll_DVG_0p500000']
dirs['ttGamma0j_ll_DVG_m0p250000'] = ['ttGamma0j_ll_DVG_m0p250000']
dirs['ttGamma0j_ll_DVG_m0p500000'] = ['ttGamma0j_ll_DVG_m0p500000']
directories = { key : [ os.path.join( data_directory, postProcessing_directory, dir) for dir in dirs[key]] for key in dirs.keys()}
#ewkDMGZ
ttGamma0j_ll = Sample.fromDirectory(name="ttGamma0j_ll", treeName="Events", isData=False, color=1, texName="SM", directory=directories['ttGamma0j_ll'])
ttGamma0j_ll_DAG_0p176700_DVG_0p176700 = Sample.fromDirectory(name="ttGamma0j_ll_DAG_0p176700_DVG_0p176700", treeName="Events", isData=False, color=1, texName="C_{2,A} = C_{2,V} = 0.1767", directory=directories['ttGamma0j_ll_DAG_0p176700_DVG_0p176700'])
ttGamma0j_ll_DAG_0p176700_DVG_m0p176700 = Sample.fromDirectory(name="ttGamma0j_ll_DAG_0p176700_DVG_m0p176700", treeName="Events", isData=False, color=1, texName="C_{2,A} = -C_{2,V} = 0.1767", directory=directories['ttGamma0j_ll_DAG_0p176700_DVG_m0p176700'])
ttGamma0j_ll_DAG_0p250000 = Sample.fromDirectory(name="ttGamma0j_ll_DAG_0p250000", treeName="Events", isData=False, color=1, texName="C_{2,A} = 0.25", directory=directories['ttGamma0j_ll_DAG_0p250000'])
ttGamma0j_ll_DAG_0p500000 = Sample.fromDirectory(name="ttGamma0j_ll_DAG_0p500000", treeName="Events", isData=False, color=1, texName="C_{2,A} = 0.5", directory=directories['ttGamma0j_ll_DAG_0p500000'])
ttGamma0j_ll_DAG_m0p176700_DVG_0p176700 = Sample.fromDirectory(name="ttGamma0j_ll_DAG_m0p176700_DVG_0p176700", treeName="Events", isData=False, color=1, texName="C_{2,A} = -C_{2,V} = -0.1767", directory=directories['ttGamma0j_ll_DAG_m0p176700_DVG_0p176700'])
ttGamma0j_ll_DAG_m0p176700_DVG_m0p176700= Sample.fromDirectory(name="ttGamma0j_ll_DAG_m0p176700_DVG_m0p176700", treeName="Events", isData=False, color=1, texName="C_{2,A} = C_{2,V} = -0.1767", directory=directories['ttGamma0j_ll_DAG_m0p176700_DVG_m0p176700'])
ttGamma0j_ll_DAG_m0p250000 = Sample.fromDirectory(name="ttGamma0j_ll_DAG_m0p250000", treeName="Events", isData=False, color=1, texName="C_{2,A} = -0.25", directory=directories['ttGamma0j_ll_DAG_m0p250000'])
ttGamma0j_ll_DAG_m0p500000 = Sample.fromDirectory(name="ttGamma0j_ll_DAG_m0p500000", treeName="Events", isData=False, color=1, texName="C_{2,A} = -0.5", directory=directories['ttGamma0j_ll_DAG_m0p500000'])
ttGamma0j_ll_DVG_0p250000 = Sample.fromDirectory(name="ttGamma0j_ll_DVG_0p250000", treeName="Events", isData=False, color=1, texName="C_{2,V} = 0.25", directory=directories['ttGamma0j_ll_DVG_0p250000'])
ttGamma0j_ll_DVG_0p500000 = Sample.fromDirectory(name="ttGamma0j_ll_DVG_0p500000", treeName="Events", isData=False, color=1, texName="C_{2,V} = 0.5", directory=directories['ttGamma0j_ll_DVG_0p500000'])
ttGamma0j_ll_DVG_m0p250000 = Sample.fromDirectory(name="ttGamma0j_ll_DVG_m0p250000", treeName="Events", isData=False, color=1, texName="C_{2,V} = -0.25", directory=directories['ttGamma0j_ll_DVG_m0p250000'])
ttGamma0j_ll_DVG_m0p500000 = Sample.fromDirectory(name="ttGamma0j_ll_DVG_m0p500000", treeName="Events", isData=False, color=1, texName="C_{2,V} = -0.5", directory=directories['ttGamma0j_ll_DVG_m0p500000'])
allSignals = [\
ttGamma0j_ll,
ttGamma0j_ll_DAG_0p176700_DVG_0p176700,
ttGamma0j_ll_DAG_0p176700_DVG_m0p176700,
ttGamma0j_ll_DAG_0p250000,
ttGamma0j_ll_DAG_0p500000,
ttGamma0j_ll_DAG_m0p176700_DVG_0p176700,
ttGamma0j_ll_DAG_m0p176700_DVG_m0p176700,
ttGamma0j_ll_DAG_m0p250000,
ttGamma0j_ll_DAG_m0p500000,
ttGamma0j_ll_DVG_0p250000,
ttGamma0j_ll_DVG_0p500000,
ttGamma0j_ll_DVG_m0p250000,
ttGamma0j_ll_DVG_m0p500000,
]
|
from django.apps import AppConfig
class RecommendAppConfig(AppConfig):
name = 'recommend_app'
|
from rest_framework import serializers
from kratos.apps.pipeline.models import Pipeline
from kratos.apps.task.serializers import TaskSerializer as TaskField
from kratos.apps.app.serializers import AppSerializer as AppField
class PipelineListSerializer(serializers.ModelSerializer):
class Meta:
model = Pipeline
fields = ('id', 'name', 'description', 'created_at', 'updated_at')
class PipelineSerializer(serializers.ModelSerializer):
appinfo = AppField(read_only=True, source='app')
tasks = serializers.SerializerMethodField()
def get_tasks(self, instance):
tasks = instance.task.all().order_by('stage', 'seq')
return TaskField(tasks, many=True).data
class Meta:
model = Pipeline
fields = ('id', 'name', 'app', 'tasks', 'description', 'appinfo')
extra_kwargs = {'app': {'write_only': True, 'required': False}}
|
'''from math import sqrt
n = int(input('miximal number'))
for a in range(1,n+1):
for b in range(a,n):
c_square = a**2 + b**2
c = int(sqrt(c_square))
if ((c_square - c**2)) == 0:
print(a, b, c)'''
#ex of nested for
#program for printing pythogorean number
travelling = input('yes, no')
while travelling == ('yes'):
num = int(input('enter the number of people travelling:'))
for num in range(1, num + 1):
name = input('name:')
age = input('age:')
gender = input('male or female:')
print(name)
print(age)
print(gender)
travelling = input('forgot someone!!!')
|
import cv2
import numpy as np
import argparse
from kernels import kernels
import time
def filter(image2D,kernel,norm=True):
padding = int((kernel.shape[0]-1)/2)
image2D_pad = np.pad( image2D, padding )
new_image = np.zeros( image2D.shape )
for x in range( image2D.shape[0] - padding ):
for y in range ( image2D.shape[1] - padding):
window = image2D_pad[ x : x + kernel.shape[0] , y : y + kernel.shape[0] ]
new_image[x][y] = ( window * kernel ).sum()
if not norm: return new_image
new_image = np.where(new_image>255,255,new_image)
new_image = np.where(new_image<0,0,new_image)
return new_image.astype(np.uint8)
def combine(image2D,k1,k2):
f1 = filter(image2D,k1,norm=False)
f2 = filter(image2D,k2,norm=False)
new_image = np.sqrt(f1**2 + f2**2)
new_image = np.where(new_image>255,255,new_image)
new_image = np.where(new_image<0,0,new_image)
return new_image.astype(np.uint8)
if __name__ == "__main__" :
parser = argparse.ArgumentParser(description="Filter 2D Gray Image Processing - UNICAMP")
parser.add_argument("file", help="input file image .png" )
parser.add_argument("-k","--kernel", help="Choose number of kernel to apply", type=str, choices=["1","2","3","4","5","6","7","8","9","10"] , default=0)
parser.add_argument("-s","--save", help="Save Image", type=str )
args = parser.parse_args()
# print( "Kernel: " )
# print( kernels[args.kernel] )
print( "File: ",args.file )
image = cv2.imread( args.file ,0)
t1 = time.time()
# print("asd",args.kernel)
if(args.kernel == "10"):
image_new = combine(image,kernels["1"],kernels["2"])
else:
image_new = filter(image,kernels[args.kernel])
t2 = time.time()
print("Time: ", t2-t1 ,"s")
if ( args.save ):
cv2.imwrite(args.save, image_new)
else:
cv2.imshow(args.file,image_new)
print("Press any key to exit")
cv2.waitKey()
|
# Testing Local Webpages
##Localhost Test
* Goto "http://bs-local.com:45691/check"
* Page title
|
print("{}, {}".format("Hello","World"))
print("{0}, {1}, {0}".format("Hello", "World"))
print("{first}, {last}".format(first="Hello", last="World"))
# Since 3.6
var = 8
print(f"{var}")
# < align to left, ^ center, > right
print('{a:<10}|{a:^10}|{a:>10}'.format(a='test'))
# padding
print('{a:*<10}|{a:*^10}|{a:*>10}'.format(a='test'))
data = range(100)
print("{d[0]}...{d[99]}".format(d=data))
print("normal:{num:d}".format(num=33))
print("normal:{num:f}".format(num=33))
print("binary:{num:b}".format(num=33))
print("binary:{num:08b}".format(num=33))
print("hex:{num:x}".format(num=33))
print("hex:0x{num:0<4x}".format(num=33))
print("hex:0x{num:0>4x}".format(num=33))
print("octal:{num:o}".format(num=33))
print("{num:f}".format(num=22/7))
print("{num:0.2f}".format(num=22/7))
print("{num:.2e}".format(num=22/7))
print("{num:.1%}".format(num=22/7))
print("{num:g}".format(num=5.1200001))
|
import bleach
import psycopg2
import datetime
def most_popular_articles():
"""Return the most popular three articles of all time from 'news' , most viewed first."""
db = psycopg2.connect(database="news")
c = db.cursor()
c.execute("SELECT path, count(*) AS num FROM log "
+"WHERE path = path AND status = '200 OK' AND path !='/'"
+"GROUP BY path ORDER BY num DESC LIMIT 3")
q_result = c.fetchall()
posts = ""
for post,view in q_result:
posts += ("\""+post[9:].replace('-',' ').title()+"\""+" — "
+str(view)+" views"+"\n")
db.close()
return posts
def most_popular_authors():
"""Return the most popular article authors of all time from the 'news', most viewed first."""
db = psycopg2.connect(database="news")
c = db.cursor()
c.execute("SELECT name, count(*) AS num "
+"FROM articles AS a, authors AS au, log AS l "
+"WHERE a.author = au.id "
+"AND l.path = CONCAT('/article/',a.slug) "
+"AND status = '200 OK' AND path !='/' "
+"GROUP BY au.name ORDER BY num DESC")
q_result = c.fetchall()
result = ""
for auth,view in q_result:
result += ("\""+auth.replace('-',' ').title()+"\""+" — "
+str(view)+" views"+"\n")
db.close()
return result
def errors_percentage():
"""Return the most popular article authors of all time from the 'news', most viewed first."""
db = psycopg2.connect(database="news")
c = db.cursor()
""" CREATE VIEW date_by_days AS SELECT CAST(time AS DATE) FROM log "
+"WHERE status != '200 OK' ;"""
c.execute("SELECT time,(COUNT(time)* 100.00 / (SELECT COUNT(*) FROM date_by_days)) as num "
+"FROM date_by_days "
+"GROUP BY time "
+"HAVING (COUNT(time)* 100.00 / (SELECT COUNT(*) FROM date_by_days)) > 1 "
+"ORDER BY num DESC")
q_result = c.fetchall()
result=""
for tim,err in q_result:
time_re = str(tim)
dt = datetime.datetime(int(time_re[:4]), int(time_re[5:7]), int(time_re[8:10]))
result += dt.strftime('%b %d, %Y')+" - "+str(err)[0:3]+"% errors\n"
db.close()
return result
if __name__ == '__main__':
print("The most popular three articles of all time:\n"
+most_popular_articles())
print("The most popular article authors of all time:\n"
+most_popular_authors())
print("The days where more than 1% of requests lead to errors:\n"
+str(errors_percentage()))
|
from tabulate import tabulate
def print_ip_table(reach_ip, unreach_ip):
table = {"Reachable": reach_ip, "Unreachable": unreach_ip}
print(tabulate(table, headers="keys"))
if __name__ == "__main__":
reach_ip = ["10.1.1.1", "10.1.1.2"]
unreach_ip = ["10.1.1.7", "10.1.1.8", "10.1.1.9"]
print_ip_table(reach_ip, unreach_ip) |
import requests
import jwt
# TODO: rewrite tests with Flask, properly
r = requests.get(
'http://188.120.249.89/',
headers={
'X-Auth-Token': jwt.encode(
{'user_id': -1}, key='_1R*Ng_K3Y', algorithm='HS256'
).decode('utf-8')
},
params={'url': 'https://github.com/linuxwacom/input-wacom'},
)
print(r.content)
|
from decimal import Decimal
import boto3
dynamodb = boto3.resource('dynamodb',region_name='eu-west-2')
table = dynamodb.Table('Samples')
table.put_item(
Item={
'timestamp': int(1),
'values': [Decimal('1.2'), Decimal('1.3'), Decimal('2.4'),
Decimal('0.0'), Decimal('0.4'), Decimal('0.5'),
Decimal('0.7'), Decimal('0.9')],
'read': False,
}
) |
import cv2
# importing the Coco Classname
classNames = []
classFile = 'coco.names'
with open(classFile, 'rt') as cocoNames:
classNames = cocoNames.read().rstrip('\n').split('\n')
# importing the configuration files
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
# Required Settings
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0/127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
# function that turn detected objects into white rectangles
def getFrame(videoCapture, visible):
success, img = videoCapture.read()
img = cv2.flip(img, 1)
if visible:
classIds, confs, bbox = net.detect(img, confThreshold=0.7)
if len(classIds):
for classId, conf, box in zip(classIds.flatten(), confs.flatten(), bbox):
if classId != 1:
cv2.rectangle(img, box, (255, 255, 255), -1)
cv2.putText(img, classNames[classId-1], (box[0] + (box[0]//2), box[1] + (box[1]//2)), cv2.FONT_HERSHEY_COMPLEX,
0.5, (0, 0, 0), 1)
return success, img
|
def story_intro():
"""
Introduction to the game and game name
"""
print(" *****************************************")
print()
print("Adventure game that brings you through a Wonderland")
print("with Alice. Meet fantastic creatures along the way")
print("and help restore balance to Wonderland by helping")
print("Alice to slay the Jabberwocky!")
print()
print("******************************************************")
print("* *")
print("* * * *")
print("* Welcome * * * * *")
print("* to * * * * *")
print("* the * * * * *")
print("* Adventures of Alice * * * * *")
print("* * *''''* * *")
print("* * * *")
print("******************************************************")
print()
|
#!/usr/bin/env python3
from control import *
inst = 'cpu-cycles,instructions'
cache_l0 = 'raw-l1-dcache,raw-l1-icache'
cache_l1d = 'raw-l1-dcache,raw-l1-dcache-refill,raw-l1-dcache-wb'
cache_l1i = 'raw-l1-icache,raw-l1-icache-refill'
cache_l1 = 'raw-l1-dcache-refill,raw-l1-icache-refill,raw-l1-dcache-wb'
cache_l2 = 'raw-l2-dcache,raw-l2-dcache-refill,raw-l2-dcache-wb'
cache_l2w = 'raw-l2-dcache-wb,raw-l2-dcache-allocate'
cache_l3 = 'raw-l3-dcache,raw-l3-dcache-refill,raw-l3-dcache-wb'
tlb = 'raw-l1-dtlb,raw-l1-dtlb-refill,raw-l1-itlb,raw-l1-itlb-refill,raw-l2-dtlb,raw-l2-dtlb-refill'
#cache = ','.join([inst,cache_l1d, cache_l1i, cache_l2, cache_l3,tlb])
cache = ','.join([cache_l0,cache_l1, cache_l2,cache_l3, tlb])
#cache = ','.join([cache_l2, cache_l2w, cache_l3])
#time = '--duration 10 --interval 1000'
time = '--duration 1'
out = adb_dut('su -c simpleperf stat -a -e %s %s'%(cache,time))
print('\n'.join(out))
|
def bank(dec1, dec2):
def Bank(func):
def wrapper(func1, func2):
print('This is my first choice of {} :{} at {} and my second choice is {} at {} '.format(func.__name__, dec1, dec2, func1, func2))
return func(func1, func2)
return wrapper
return Bank
#This will collect my first choice of bank and the location
dec1 = input('Enter your first choice of bank here: ')
dec2 = input('Enter the location here: ')
@bank(dec1 , dec2)
def bank_list2(func1, func2):
print('My bank is {} at {}'.format(func1, func2))
#This will collect my second choice of bank
func1 = input('Enter your second choice of bank here: ')
func2 = input('Enter your location here: ')
bank_list2(func1, func2) |
# -*- encoding: utf-8 -*-
# 敏感词文本文件 filtered_words.txt,里面的内容为以下内容,当用户输入敏感词语时,用*号替换
__author__ = 'Administrator'
import re;
import os;
def initWords():
words = list()
with open("filtered_words.txt", "r") as f:
for line in f:
aline = re.findall(r'\w+', line)
for senceWords in aline:
wList = senceWords.strip().split()
for _w in wList:
words.insert(len(words), _w)
return words
if __name__ == "__main__":
text = input("请输入语句:")
words = initWords()
flag = 1
for word in words:
if (word in text):
print("有敏感哦!")
text = text.replace(word, (len(word) * "*"))
flag = 0
if (flag):
print("没有敏感词!")
else:
print("处理后的语句:" + text)
|
from io import BytesIO
from PIL import Image
from django.core.files.storage import default_storage
FORMATS = {
"jpeg": "JPEG",
"png": "PNG",
"webp": "WebP",
"bmp": "BMP",
"tiff": "TIFF",
}
CONVERTIBLE_FORMATS = {
"jpeg": ["png", "webp", "bmp", "tiff"],
"png": ["jpeg", "webp", "bmp", "tiff"],
"webp": ["jpeg", "png", "bmp", "tiff"],
"bmp": ["jpeg", "png", "webp", "tiff"],
"tiff": ["jpeg", "png", "webp", "bmp"],
}
def convert_image_format(i, source_format, target_format):
"""Convert image file from :source_format: to :target_format: format."""
with Image.open(i) as image:
# Convert from RGBA to RGB
if source_format == "png":
image = image.convert("RGB")
# Image bytes buffer
im_bytes = BytesIO()
# Convert file format
image.save(im_bytes, format=target_format)
# File name without extension
name = i.name.split(".")[0]
# Store image in file storage
storage_name = default_storage.save(f"images/{name}.{target_format}", im_bytes)
return storage_name
|
#/bin/env python3
# Copyright (c) Moises Martinez by Fictizia. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import json
from database.people_table import People
from database import connector
from datetime import datetime
def get_record(person):
tmp = dict()
tmp['id'] = person.id
tmp['name'] = person.name
tmp['height'] = person.height
tmp['mass'] = person.mass
tmp['hair_color'] = person.hair_color
tmp['skin_color'] = person.skin_color
tmp['eye_color'] = person.eye_color
tmp['birth_year'] = person.birth_year
tmp['gender'] = person.gender
tmp['planet_id'] = person.planet_id
tmp['created'] = person.created
tmp['edited'] = person.edited
return tmp
def get_all():
result = dict()
for person in People.query.all():
result[person.id] = get_record(person)
return result, 200
def add_person(name,
height,
mass,
hair_color,
skin_color,
eye_color,
birth_year,
gender,
planet_id):
person = People()
person.name = name
person.height = height
person.mass = mass
person.hair_color = hair_color
person.skin_color = skin_color
person.eye_color = eye_color
person.birth_year = birth_year
person.gender = gender
person.planet_id = planet_id
person.created = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
connector.db_session.add(person)
result = connector.db_session.commit()
if result is None:
return get_record(person), 200
else:
return {'message': 'No se ha podido crear el nuevo planera ' + str(name)}, 404
def get_person(id):
person = People.query.filter(People.id == id).first()
if person is None:
return {'message': 'No existe ningún personaje con id ' + str(id)}, 404
else:
return get_record(person), 200
def delete_person(id):
person = People.query.filter(People.id == id).delete()
result = connector.db_session.commit()
if result is None:
return {'message': 'Se ha eliminado el personaje con id ' + str(id)}, 200
else:
return {'message': 'No existe ningún personaje con id ' + str(id)}, 404
def update_person(id,
name=None,
height=None,
mass=None,
hair_color=None,
skin_color=None,
eye_color=None,
birth_year=None,
gender=None,
planet_id=None):
person = People.query.filter(People.id == id).first()
if person is not None:
if name is not None:
person.name = name
if height is not None:
person.height = height
if mass is not None:
person.mass = mass
if hair_color is not None:
person.hair_color = hair_color
if skin_color is not None:
person.skin_color = skin_color
if eye_color is not None:
person.eye_color = eye_color
if birth_year is not None:
person.birth_year = birth_year
if gender is not None:
person.gender = gender
if planet_id is not None:
person.planet_id = planet_id
person.updated = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
connector.db_session.commit()
return get_record(person), 200
else:
return {'message': 'No existe ningún personaje con id ' + str(id)}, 404
|
'''
全局配置文件
'''
target_list = ["172.17.{}.1:8080".format(i) for i in range(1,5)]
self_host = "172.17.5.1"
# shell_pass = "4rk1i9ht" |
"""python Mini Project #1 | Python Tutorials For Absolute Beginners In Hindi #71
As we have nearly completed our Python object-oriented programming concepts, now it is time to do a mini-project.
Statement:-
The task is to create an “Online Library Management System”. For this, you have to create a library class that includes the following methods:
Displaybook() : To display the available books
Lendbook(): To lend a book to a user
Addbook(): To add a book to the library
Returnbook(): To return the book to the library.
As you have created a library class, now you will create an object and pass the following parameters in the constructor.
HarryLibrary=Library(listofbooks, library_name)
After that, create a main function and run an infinite while loop that asks the users for their input that whether they want to display, lend, add or return a book.
Optional:-
Maintain a dictionary for the users who own a book. Dictionary should take book name as a key and name of the person as a value. Whenever you lend a book to a user, you should maintain a dictionary.
Code as described/written in the video
# Create a library class
# display book
# lend book - (who owns the book if not present)
# add book
# return book
# HarryLibrary = Library(listofbooks, library_name)
#dictionary (books-nameofperson)
# create a main function and run an infinite while loop asking
# users for their input
"""
class Library:
def __init__(self, list_of_books, name_of_library):
self. list_of_books=list_of_books
self. name_of_library= name_of_library
self.lended_book=[]
def displaybook(self):
for items in self.list_of_books:
print(items)
return f"this books available in library "
def add_book(self, new_book):
self.list_of_books.append(new_book.capitalize())
def lend_book(self, books):
if books in self.list_of_books:
self.lended_book.append(books)
self.list_of_books.remove(books)
print("Book lending permission approved...")
elif book in self.lended_book:
print("Sorry the book is already been lended\n"
"Currently we only have these:- ")
for items in self.list_of_books:
print(items)
else:
print("Book's name entered is not available or name is wrong "
"and the available books are:- ")
for items in self.list_of_books:
print(items)
def returnbook(self):
pass
def system():
try:
value=True
gnlin = Library(["marry2","kharibis","pavelretur","mazprem"],"gnlib")
#print(gnlin.displaybook())
while value == True:
username = input("enter Your name")
userInput = eval(input(f"Welcome {username} to {gnlin.name_of_library} Enter following number\n"
f"1:display books\n"
f"2:add books\n"
f"3:Lend books\n"
f"4:return book\n"
f"5:exit:\n"))
if userInput == 1:
print(gnlin.displaybook())
elif userInput == 2:
new_book = input("Enter name of book you want to donate:- ")
gnlin.add_book(new_book.capitalize())
print("Book has been added thank you for your donation")
elif userInput == 3:
books = input("Enter the name of book you want to lend:- ")
gnlin.lend_book(books.capitalize())
else:
print("you enter wrong keyword")
except :
print("error..")
system()
system() |
import json
from datetime import datetime
from decimal import Decimal
from ..exceptions import NoRawTXData
from unittest import TestCase
from unittest.mock import MagicMock, patch
from xml.dom.minidom import parseString
from dicttoxml import dicttoxml
from apprisetransactions import settings
from apprisetransactions.transactions import MoneroTransaction
class TestCase_Notify(TestCase):
raw_data = {
"transfer": {
"address": "599BXkvzAPeD6EojXjW99gGeE19rxuS4g99o5xevp3jTFQYTNtSyYtqWAt2jg9MTP8aLTJKiuCJXg1Ro6xLtEUEm9rWHL7V",
"amount": 100000000,
"amounts": [100000000],
"confirmations": 21,
"double_spend_seen": False,
"fee": 1006740000,
"height": 597061,
"locked": False,
"note": "",
"payment_id": "0000000000000000",
"subaddr_index": {"major": 0, "minor": 0},
"subaddr_indices": [{"major": 0, "minor": 0}],
"suggested_confirmations_threshold": 1,
"timestamp": 1591546700,
"txid": "d0fb667f2975f79495e973d1437200f7b6c464956d33cd89289411e07a8c0b3a",
"type": "in",
"unlock_time": 0,
},
"transfers": [
{
"address": "599BXkvzAPeD6EojXjW99gGeE19rxuS4g99o5xevp3jTFQYTNtSyYtqWAt2jg9MTP8aLTJKiuCJXg1Ro6xLtEUEm9rWHL7V",
"amount": 100000000,
"amounts": [100000000],
"confirmations": 21,
"double_spend_seen": False,
"fee": 1006740000,
"height": 597061,
"locked": False,
"note": "",
"payment_id": "0000000000000000",
"subaddr_index": {"major": 0, "minor": 0},
"subaddr_indices": [{"major": 0, "minor": 0}],
"suggested_confirmations_threshold": 1,
"timestamp": 1591546700,
"txid": "d0fb667f2975f79495e973d1437200f7b6c464956d33cd89289411e07a8c0b3a",
"type": "in",
"unlock_time": 0,
}
],
}
def setUp(self):
settings.init()
self.transaction = MoneroTransaction(
tx_id="4ea70add5d0c7db33557551b15cd174972fcfc73bf0f6a6b47b7837564b708d3",
payment_provider="Monero",
amount=Decimal("4.000000000000"),
fee=Decimal("0.000962550000"),
note="",
recipient="9tQoHWyZ4yXUgbz9nvMcFZUfDy5hxcdZabQCxmNCUukKYicXegsDL7nQpcUa3A1pF6K3fhq3scsyY88tdB1MqucULcKzWZC",
timestamp=datetime(2018, 1, 29, 13, 17, 18),
confirmations=1,
_raw_data=self.raw_data,
)
@patch("apprisetransactions.transactions.transaction.Apprise")
def test_notify_json(self, mock_apprise):
def notify_json_sideeffect(json_body):
self.assertEqual(json_body, json.dumps(self.raw_data))
return True
mock_apprise.notify = MagicMock(side_effect=notify_json_sideeffect)
mock_apprise.return_value = mock_apprise
self.assertTrue(self.transaction.notify(urls=["json://blah", "sns://blah"]))
self.assertEqual(mock_apprise.notify.call_count, 1)
def test_notify_exception(self):
with self.assertRaises(NoRawTXData):
MoneroTransaction(tx_id="txexample").notify(
urls=["json://blah", "sns://blah"]
)
with self.assertRaises(NoRawTXData):
MoneroTransaction(tx_id="txexample").notify(urls=["xml://blah"])
@patch("apprisetransactions.transactions.transaction.Apprise")
def test_notify_xml(self, mock_apprise):
def notify_xml_sideeffect(xml_body):
self.assertEqual(
xml_body, parseString(dicttoxml(self.raw_data)).toprettyxml()
)
return True
mock_apprise.notify = MagicMock(side_effect=notify_xml_sideeffect)
mock_apprise.return_value = mock_apprise
self.assertTrue(self.transaction.notify(urls=["xml://blah"]))
self.assertEqual(mock_apprise.notify.call_count, 1)
@patch("apprisetransactions.transactions.transaction.Apprise")
def test_notify_other(self, mock_apprise):
pass
def notify_sideeffect(body, title, attach):
self.assertEqual(body, "New XMR received")
self.assertEqual(title, "Private Notification")
return True
mock_apprise.notify = MagicMock(side_effect=notify_sideeffect)
mock_apprise.return_value = mock_apprise
self.assertTrue(
self.transaction.notify(
urls=["pbul://asdfasdf"],
body="New {currency} received",
title="Private Notification",
)
)
self.assertEqual(mock_apprise.notify.call_count, 1)
@patch("apprisetransactions.transactions.transaction.parse_placeholders")
@patch("apprisetransactions.transactions.transaction.Apprise")
def test_znotify_other(self, mock_apprise, mock_parse_placeholders):
def notify_sideeffect(body, title, attach):
self.assertEqual(body, "New XMR received")
self.assertEqual(title, "Private Notification")
return True
# prase_placeholders is tested in test_utils
mock_parse_placeholders.return_value = (
"New XMR received",
"Private Notification",
)
mock_apprise.notify = MagicMock(side_effect=notify_sideeffect)
mock_apprise.return_value = mock_apprise
self.assertTrue(
self.transaction.notify(
urls=["pbul://asdfasdf"],
body="New {currency} received",
title="Private Notification",
)
)
self.assertEqual(mock_apprise.notify.call_count, 1)
self.assertEqual(mock_parse_placeholders.call_count, 1)
|
import unittest
from app import database_connector, user
import sqlite3
class DatabaseTest(unittest.TestCase):
def test_access(self):
filename = 'test_database.db'
db = database_connector.DatabaseConnector(filename)
self.assertEqual(type(db.c), sqlite3.Cursor)
db.close()
def test_execute(self):
filename='test_database.db'
db = database_connector.DatabaseConnector(filename)
try:
db.execute_statement("drop table test")
except sqlite3.OperationalError:
pass
db.execute_statement("create table test (cola text, colb text, colc text)")
db.execute_statement("insert into test values ('hi', 'hello', 'goodbye')")
rows = db.execute_query("select * from test")
self.assertEqual(len(rows), 1)
for row in rows:
self.assertEqual(row[0], "hi")
self.assertEqual(row[1], "hello")
self.assertEqual(row[2], "goodbye")
db.execute_statement("drop table test")
db.close()
def test_script(self):
filename = 'test_database.db'
db = database_connector.DatabaseConnector(filename)
db.run_script('schema.sql')
db.execute_statement("INSERT INTO users VALUES ('dr', 'bob', 'smith', 'b@dr.com', '10-10-2017', 'hospital', 'US', 'abc123', NULL )")
rows = db.execute_query("SELECT * FROM users WHERE(title=?)", "dr")
self.assertEqual(len(rows), 1)
for row in rows:
self.assertEqual(row[0], "dr")
self.assertEqual(row[2], "smith")
db.close()
def test_get_user_by_id(self):
filename= 'test_database.db'
db = database_connector.DatabaseConnector(filename)
db.run_script('schema.sql')
db.run_script('populate_database.sql')
user = db.get_user_by_id(1)
self.assertEqual(user.user_id, 1)
self.assertEqual(user.fname, "Bob")
self.assertEqual(user.email, "bsmith@dr.com")
self.assertEqual(user.title, "Dr")
db.close()
def test_log_user_in(self):
filename='test_database.db'
db = database_connector.DatabaseConnector(filename)
db.run_script('schema.sql')
db.run_script('populate_database.sql')
user_email = "bsmith@dr.com"
user_pword = "abc123"
self.assertTrue(db.log_user_in(user_email, user_pword))
self.assertFalse(db.log_user_in(user_email, "abc1234"))
if __name__ == '__main__':
unittest.main() |
import matplotlib.pyplot as plt
import numpy as np
inputFolder = "../Data/"
outputFolder = "../Plots/"
showImage = True
#TODO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! need to think about what plots should start at 0
def endPlot():
if showImage:
plt.show()
else:
plt.close()
def makePerfAnalysis(filePath, title, outputName):
fig = plt.figure() # an empty figure with no axes
#load:
(branchFactor, leafSize, subdivision, primaryNodeIntersections, primaryLeafIntersections, secondaryNodeIntersections,
secondaryLeafIntersections, averageLeafDepth, primaryAabb, primaryPrimitive, nodeSah, leafSah, nodeEpo, leafEpo,
leafVolume, leafSurfaceArea, nodeFullness, primaryAabbSuccessRatio, primaryTriangleSuccessRatio, secondaryAabbSuccessRatio,
secondaryTriangleSuccessRatio, secondaryAabb, secondaryPrimitive, primaryWasteFactor, secondaryWasteFactor, primaryNodeCachelines,
secondaryNodeCachelines, totalTime, nodeTime, leafTime, perNodeCost, perLeafCost, sahNodeFactor) = np.loadtxt(filePath, delimiter=',', unpack=True, skiprows=1)
#Total time by branch factor.
plt.title(title)
for i in range(4,17,4):
filter2 = totalTime[leafSize == i]
filter1 = branchFactor[leafSize == i]
plt.plot(filter1, filter2, label='L' + str(i))
plt.xlabel('Node size')
plt.ylabel('Render time')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "RenderTimePerBranch.pdf")
plt.savefig(outputFolder + outputName + "RenderTimePerBranch.pgf")
endPlot()
#total time by leaf size
plt.title(title)
for i in range(4,17,4):
filter2 = totalTime[branchFactor == i]
filter1 = leafSize[branchFactor == i]
plt.plot(filter1, filter2, label='N' + str(i))
plt.xlabel('Leaf size')
plt.ylabel('Render time')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "RenderTimePerLeaf.pdf")
plt.savefig(outputFolder + outputName + "RenderTimePerLeaf.pgf")
endPlot()
#Total time by branch factor.
plt.title(title)
for i in range(4,17,4):
filter2 = nodeTime[leafSize == i]
filter1 = branchFactor[leafSize == i]
plt.plot(filter1, filter2, label='L' + str(i))
plt.xlabel('Node size')
plt.ylabel('Node render time')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "NodeRenderTime.pdf")
plt.savefig(outputFolder + outputName + "NodeRenderTime.pgf")
endPlot()
#total time by leaf size
plt.title(title)
for i in range(4,17,4):
filter2 = leafTime[branchFactor == i]
filter1 = leafSize[branchFactor == i]
plt.plot(filter1, filter2, label='N' + str(i))
plt.xlabel('Leaf size')
plt.ylabel('Leaf render time')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "LeafRenderTime.pdf")
plt.savefig(outputFolder + outputName + "LeafRenderTime.pgf")
endPlot()
#Node factor
plt.title(title)
for i in range(4,17,4):
filter2 = sahNodeFactor[leafSize == i]
filter1 = branchFactor[leafSize == i]
plt.plot(filter1, filter2, label='L' + str(i))
plt.xlabel('Node size')
plt.ylabel('Sah Node Factor')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "NodeFactorBranch.pdf")
plt.savefig(outputFolder + outputName + "NodeFactorBranch.pgf")
endPlot()
#Node factor
plt.title(title)
for i in range(4,17,4):
filter2 = sahNodeFactor[branchFactor == i]
filter1 = leafSize[branchFactor == i]
plt.plot(filter1, filter2, label='N' + str(i))
plt.xlabel('Leaf size')
plt.ylabel('Sah Node Factor')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "NodeFactorLeaf.pdf")
plt.savefig(outputFolder + outputName + "NodeFactorLeaf.pgf")
endPlot()
def makeIntersectionAnalysis(filePath, title, outputName):
#load the workload file and visualize it.
#load:
(branchFactor, leafSize, subdivision, primaryNodeIntersections, primaryLeafIntersections, secondaryNodeIntersections,
secondaryLeafIntersections, averageLeafDepth, primaryAabb, primaryPrimitive, nodeSah, leafSah, nodeEpo, leafEpo,
leafVolume, leafSurfaceArea, nodeFullness, primaryAabbSuccessRatio, primaryTriangleSuccessRatio, secondaryAabbSuccessRatio,
secondaryTriangleSuccessRatio, secondaryAabb, secondaryPrimitive, primaryWasteFactor, secondaryWasteFactor, primaryNodeCachelines,
secondaryNodeCachelines, totalTime, nodeTime, leafTime, perNodeCost, perLeafCost, sahNodeFactor) = np.loadtxt(filePath, delimiter=',', unpack=True, skiprows=1)
x = np.arange(branchFactor.size)
#Node intersections by branching factor.
plt.title(title)
filter2 = primaryNodeIntersections[leafSize == 1]
filter1 = branchFactor[leafSize == 1]
plt.plot(filter1, filter2, label='L1')
for i in range(4,17,4):
filter2 = primaryNodeIntersections[leafSize == i]
filter1 = branchFactor[leafSize == i]
plt.plot(filter1, filter2, label='L' + str(i))
plt.xlabel('Node size')
plt.ylabel('Primary Node intersections')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "PrimaryNodeIntersection.pdf")
plt.savefig(outputFolder + outputName + "PrimaryNodeIntersection.pgf")
endPlot()
#aabb intersections by branching factor.
plt.title(title)
for i in range(4,17,4):
filter2 = primaryAabb[leafSize == i]
filter1 = branchFactor[leafSize == i]
plt.plot(filter1, filter2, label='L' + str(i))
plt.xlabel('Node size')
plt.ylabel('Primary Aabb intersections')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "PrimaryAabbIntersection.pdf")
plt.savefig(outputFolder + outputName + "PrimaryAabbIntersection.pgf")
endPlot()
#Leaf intersections by Leaf size.
plt.title(title)
for i in range(4,17,4):
filter2 = primaryLeafIntersections[branchFactor == i]
filter1 = leafSize[branchFactor == i]
plt.plot(filter1, filter2, label='N' + str(i))
plt.xlabel('Leaf size')
plt.ylabel('Primary Leaf intersections')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "PrimaryLeafIntersection.pdf")
plt.savefig(outputFolder + outputName + "PrimaryLeafIntersection.pgf")
endPlot()
#Leaf intersections by Leaf size.
plt.title(title)
for i in range(4,17,4):
filter2 = primaryPrimitive[branchFactor == i]
filter1 = leafSize[branchFactor == i]
plt.plot(filter1, filter2, label='N' + str(i))
plt.xlabel('Leaf size')
plt.ylabel('Primary Triangle intersections')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + "PrimaryTriIntersection.pdf")
plt.savefig(outputFolder + outputName + "PrimaryTriIntersection.pgf")
endPlot()
def makeWorkGroupWiskerPlots(filePath, title, workGroupSize, outputName):
filePath = filePath[0] + str(workGroupSize) + filePath[1]
title = title + str(workGroupSize)
#This needs a fix
#load:
y, z, a, b, c = np.loadtxt(filePath, delimiter=',', unpack=True, skiprows =1)
(stepId, avgPrimaryNodeWork, avgPrimaryNodeUnique, avgPrimaryLeafWork, avgPrimaryLeafUnique,
avgPrimaryRayTermination, primaryNodeWorkMax, primaryNodeWorkMin, primaryLeafWorkMax,
primaryLeafWorkMin, avgSecondaryNodeWork, avgSecondaryNodeUnique, avgSecondaryLeafWork,
avgSecondaryLeafUnique, avgSecondaryRayTermination, secondaryNodeWorkMax, secondaryNodeWorkMin,
secondaryLeafWorkMax, secondaryLeafWorkMin) = np.loadtxt(filePath, delimiter=',', unpack=True, skiprows =1)
x = np.arange(y.size)
plt.plot(x,z, label='min', zorder=1)
plt.plot(x,a, label='max', zorder=2)#
plt.fill_between(x, b,c, label = "mean + - standard deviation", color='m',zorder=3)
plt.plot(x,y, label='median', linewidth=2, color = 'k', zorder=10)
#plt.plot(x,c, label='mean + standart deviation')
plt.xlabel('x label')
plt.ylabel('y label')
plt.legend()
#save to file
plt.savefig(outputFolder + outputName + '.pdf')
plt.savefig(outputFolder + outputName + '.pgf')
endPlot()
def mask(array):
#prepare for masking arrays - 'conventional' arrays won't do it
maskArray = np.ma.array(array)
#mask values below a certain threshold, but i want the axis to touch 0
#array = np.ma.masked_where(array <= 0 , array)
for i in range(len(array)-2):
index = i+1
if(array[index-1] == 0 and array[index] == 0 and array[index+1] == 0):
maskArray[index] = np.ma.masked
if array[0] == 0 and array[1] == 0:
maskArray [0] = np.ma.masked
if array[-1] == 0 and array[-2] == 0:
maskArray [-1] = np.ma.masked
return maskArray
def makeWorkGroupAnalysis(filePath, workGroupSize, outputName):
filePath = filePath[0] + str(workGroupSize) + filePath[1]
outputName = outputName[0] + str(workGroupSize) + outputName[1]
(stepId, avgPrimaryNodeWork, avgPrimaryNodeUnique, avgPrimaryLeafWork, avgPrimaryLeafUnique,
avgPrimaryRayTermination, primaryNodeWorkMax, primaryNodeWorkMin, primaryLeafWorkMax,
primaryLeafWorkMin, avgSecondaryNodeWork, avgSecondaryNodeUnique, avgSecondaryLeafWork,
avgSecondaryLeafUnique, avgSecondaryRayTermination, secondaryNodeWorkMax, secondaryNodeWorkMin,
secondaryLeafWorkMax, secondaryLeafWorkMin) = np.loadtxt(filePath, delimiter=',', unpack=True, skiprows =1)
#find highest step id where secondary data is != 0
secondaryEndId = 0
for(currentId, a, b, c, d, e) in zip(stepId , avgSecondaryNodeWork, avgSecondaryNodeUnique, avgSecondaryLeafWork, avgSecondaryLeafUnique, avgSecondaryRayTermination) :
if(a + b + c + d + e != 0):
secondaryEndId = currentId
secondaryEndId = secondaryEndId + 5
#mask those arrays i want to mask:
avgPrimaryNodeWork = mask(avgPrimaryNodeWork)
avgPrimaryNodeUnique = mask(avgPrimaryNodeUnique)
avgPrimaryLeafWork = mask(avgPrimaryLeafWork)
avgPrimaryLeafUnique = mask(avgPrimaryLeafUnique)
avgPrimaryRayTermination = mask(avgPrimaryRayTermination)
avgSecondaryNodeWork = mask(avgSecondaryNodeWork)
avgSecondaryNodeUnique = mask(avgSecondaryNodeUnique)
avgSecondaryLeafWork = mask(avgSecondaryLeafWork)
avgSecondaryLeafUnique = mask(avgSecondaryLeafUnique)
avgSecondaryRayTermination = mask(avgSecondaryRayTermination)
#First plot is about how much is done in each step
plt.figure(figsize=(7,7))
plt.suptitle("Average Node and Leaf intersections per Step")
plt.subplot(2,1,1)
plt.title("Primary Ray")
plt.axhline(linewidth=1, color='0.5')
plt.axhline(y = workGroupSize * workGroupSize, linewidth=1, color='0.5')
plt.plot(stepId, avgPrimaryNodeWork, label = "Node Intersections")
plt.plot(stepId, avgPrimaryLeafWork, label = "Leaf Intersections")
plt.plot(stepId, avgPrimaryRayTermination, label = "Finished Rays")
xlimSave = plt.xlim()
plt.legend()
plt.subplot(2,1,2)
plt.title("Secondary Ray")
plt.axhline(linewidth=1, color='0.5')
plt.axhline(y = workGroupSize * workGroupSize, linewidth=1, color='0.5')
plt.plot(stepId, avgSecondaryNodeWork, label = "Nodes Intersections")
plt.plot(stepId, avgSecondaryLeafWork, label = "Leafs Intersections")
plt.plot(stepId, avgSecondaryRayTermination, label = "Finished Rays")
plt.xlim(xlimSave)
plt.xlabel("Step Id")
plt.legend()
plt.savefig(outputFolder + outputName + "_NodeLeaf.pdf")
plt.savefig(outputFolder + outputName + "_NodeLeaf.pgf")
endPlot()
# second plot is about how many unique nodes and leafs where loaded
plt.figure(figsize=(15,7))
plt.suptitle("Unique Nodes and Leafs loaded per Step")
# first section contains left side with nodes in primary(top) and secondary(below)
yMax = max(secondaryNodeWorkMax.max(), primaryNodeWorkMax.max())
newyLim = ( 0 - yMax * 0.05, yMax *1.05)
plt.subplot(2,2,3)
plt.title("Unique Nodes loaded per Step (Secondary ray)")
plt.axhline(linewidth=1, color='0.5')
plt.plot(stepId, avgSecondaryNodeUnique, color=(0.9,0.5,0.13, 1), label = "Unique Nodes")
plt.fill_between(stepId, secondaryNodeWorkMin, secondaryNodeWorkMax, label = "min max unique Nodes", color=(0.9,0.5,0.13, 0.5), zorder=-1)
plt.xlim(xlimSave)
plt.ylim(newyLim)
plt.legend()
plt.subplot(2,2,1)
plt.title("Unique Nodes loaded per Step (Primary Ray)")
plt.axhline(linewidth=1, color='0.5')
plt.plot(stepId, avgPrimaryNodeUnique, color=(0.9,0.5,0.13, 1), label = "Unique Nodes")
plt.fill_between(stepId, primaryNodeWorkMin, primaryNodeWorkMax, label = "min max unique Nodes", color=(0.9,0.5,0.13, 0.5), zorder=-1)
plt.xlim(xlimSave)
plt.ylim(newyLim)
plt.legend()
# second section contains right side with leafs in primary(top) and secondary(below)
yMax = max(secondaryLeafWorkMax.max(), primaryLeafWorkMax.max())
newyLim = ( 0 - yMax * 0.05, yMax *1.05)
plt.subplot(2,2,4)
plt.title("Unique Leafs loaded per Step (Secondary ray)")
plt.axhline(linewidth=1, color='0.5')
plt.plot(stepId, avgSecondaryLeafUnique, color=(0.13,0.5,0.9, 1), label = "Unique Leafs")
plt.fill_between(stepId, secondaryLeafWorkMin, secondaryLeafWorkMax, label = "min max unique Leafs", color=(0.13,0.5,0.9, 0.5), zorder= -1)
plt.xlim(xlimSave)
plt.ylim(newyLim)
plt.legend()
plt.subplot(2,2,2)
plt.title("Unique Leafs loaded per Step (Primary Ray)")
plt.axhline(linewidth=1, color='0.5')
plt.plot(stepId, avgPrimaryLeafUnique, color=(0.13,0.5,0.9, 1), label = "Unique Leafs")
plt.fill_between(stepId, primaryLeafWorkMin, primaryLeafWorkMax, label = "min max unique Leafs", color=(0.13,0.5,0.9, 0.5), zorder=-1)
plt.xlim(xlimSave)
plt.ylim(newyLim)
plt.legend()
plt.savefig(outputFolder + outputName + "_Unique.pdf")
plt.savefig(outputFolder + outputName + "_Unique.pgf")
endPlot()
def makeWorkGroupUniqueAnalysis(filePath, outName, workGroupSize):
print("workGroup unique analysis")
workSquare = workGroupSize * workGroupSize
#additional analysis about unique nodes per workgroup, not per step:
(loadedPrimaryNodes, loadedPrimaryLeafs, loadedPrimaryNodesMax, loadedPrimaryLeafsMax, loadedPrimaryNodesMin,
loadedPrimaryLeafsMin, loadedSecondaryNodes, loadedSecondaryLeafs, loadedSecondaryNodesMax, loadedSecondaryLeafsMax,
loadedSecondaryNodesMin, loadedSecondaryLeafsMin, loadedWidePrimaryNodes, loadedWidePrimaryLeafs, loadedWideSecondaryNodes,
loadedWideSecondaryLeafs) = np.loadtxt(filePath, delimiter=',', unpack=True, skiprows=1)
x = np.arange(len(loadedPrimaryNodes))
#sort everything by ??
#best look would be if i sort the wide and the single seperately, but thats kidna "wrong"?
p = (loadedPrimaryNodes).argsort()
loadedPrimaryNodes = loadedPrimaryNodes[p]
loadedPrimaryNodesMax = loadedPrimaryNodesMax[p]
loadedPrimaryNodesMin = loadedPrimaryNodesMin[p]
#loadedWidePrimaryNodes = loadedWidePrimaryNodes[p]
npArrayAnalysis(loadedPrimaryNodes, "loadedPrimaryNodes ")
npArrayAnalysis(loadedWidePrimaryNodes, "loadedWidePrimaryNodes ")
p = (loadedPrimaryLeafs).argsort()
loadedPrimaryLeafs = loadedPrimaryLeafs[p]
loadedPrimaryLeafsMax = loadedPrimaryLeafsMax[p]
loadedPrimaryLeafsMin = loadedPrimaryLeafsMin[p]
#loadedWidePrimaryLeafs = loadedWidePrimaryLeafs[p]
npArrayAnalysis(loadedPrimaryLeafs, "loadedPrimaryLeafs ")
npArrayAnalysis(loadedWidePrimaryLeafs, "loadedWidePrimaryLeafs ")
p = (loadedSecondaryNodes).argsort()
loadedSecondaryNodes = loadedSecondaryNodes[p]
loadedSecondaryNodesMin = loadedSecondaryNodesMin[p]
loadedSecondaryNodesMax = loadedSecondaryNodesMax[p]
#loadedWideSecondaryNodes = loadedWideSecondaryNodes[p]
npArrayAnalysis(loadedSecondaryNodes, "loadedSecondaryNodes ")
npArrayAnalysis(loadedWideSecondaryNodes, "loadedWideSecondaryNodes")
p = (loadedSecondaryLeafs).argsort()
loadedSecondaryLeafs = loadedSecondaryLeafs[p]
loadedSecondaryLeafsMax = loadedSecondaryLeafsMax[p]
loadedSecondaryLeafsMin = loadedSecondaryLeafsMin[p]
#loadedWideSecondaryLeafs = loadedWideSecondaryLeafs[p]
npArrayAnalysis(loadedSecondaryLeafs, "loadedSecondaryLeafs ")
npArrayAnalysis(loadedWideSecondaryLeafs, "loadedWideSecondaryLeafs")
#sort the wide arrays by themself so we can at least see anything
loadedWidePrimaryNodes.sort()
loadedWidePrimaryLeafs.sort()
loadedWideSecondaryNodes.sort()
loadedWideSecondaryLeafs.sort()
plt.figure(figsize=(15, 9))
plt.suptitle("Loaded nodes and leafs.")
#title -> workgroup size
ax0 = plt.subplot(2, 2, 1)
plt.axhline(linewidth=1, color='0.5')
plt.fill_between(x, loadedPrimaryNodesMin, loadedPrimaryNodesMax, label="min - max unique Nodes", color=(0.9,0.5,0.13, 0.5), zorder=-1)
plt.plot(x, loadedPrimaryNodes / workSquare, color=(0.9, 0.5, 0.13, 1), label="unique Nodes")
plt.plot(x, loadedWidePrimaryNodes / workSquare, label = "unique Nodes in wideRenderer")
plt.legend(loc='upper left')
plt.title("Primary ray Nodes")
plt.ylabel("avg per ray loaded Nodes")
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax1 = plt.subplot(2, 2, 2)
plt.axhline(linewidth=1, color='0.5')
plt.fill_between(x, loadedPrimaryLeafsMin, loadedPrimaryLeafsMax, label = "min - max unique Leafs", color=(0.9,0.5,0.13, 0.5), zorder=-1)
plt.plot(x, loadedPrimaryLeafs / workSquare, color=(0.9, 0.5, 0.13, 1), label="Unique Leafs")
plt.plot(x, loadedWidePrimaryLeafs / workSquare, label="unique Leafs in wideRenderer")
plt.legend(loc='upper left')
plt.title("Primary ray Leafs")
plt.ylabel("avg per ray loaded Leafs")
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax1 = plt.subplot(2, 2, 3)
plt.axhline(linewidth=1, color='0.5')
plt.fill_between(x, loadedSecondaryNodesMin, loadedSecondaryNodesMax, label="min - max unique Nodes", color=(0.9,0.5,0.13, 0.5), zorder=-1)
plt.plot(x, loadedSecondaryNodes / workSquare, color=(0.9, 0.5, 0.13, 1), label="unique Nodes")
plt.plot(x, loadedWideSecondaryNodes / workSquare, label = "unique Nodes in wideRenderer")
plt.legend(loc='upper left')
plt.title("Secondary ray Nodes")
plt.ylabel("avg per ray loaded Nodes")
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax1 = plt.subplot(2, 2, 4)
plt.axhline(linewidth=1, color='0.5')
plt.fill_between(x, loadedSecondaryLeafsMin, loadedSecondaryLeafsMax, label = "min - max unique Leafs", color=(0.9,0.5,0.13, 0.5), zorder=-1)
plt.plot(x, loadedSecondaryLeafs / workSquare, color=(0.9, 0.5, 0.13, 1), label="Unique Leafs")
plt.plot(x, loadedWideSecondaryLeafs / workSquare, label="unique Leafs in wideRenderer")
plt.legend(loc='upper left')
plt.title("Secondary ray Leafs")
plt.ylabel("avg per ray loaded Leafs")
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
plt.savefig(outputFolder + "UniqueLoadedAnalysis" + outName + "s"+ str(workGroupSize) + ".pdf")
plt.savefig(outputFolder + "UniqueLoadedAnalysis" + outName + "s"+ str(workGroupSize) + ".pgf")
endPlot()
def workGroupUniqueLoadedCachelines():
filePath0 = inputFolder + "WorkGroups/WorkGroupSize_16_Version_1/amazonLumberyardInterior_b4_l4_c0_WorkGroupUniqueWork.txt"
filePath1 = inputFolder + "WorkGroups/WorkGroupSize_16_Version_1/amazonLumberyardInterior_b4_l4_c0_WorkGroupData.txt"
workGroupSize = 16
workSquare = workGroupSize * workGroupSize
#additional analysis about unique nodes per workgroup, not per step:
(loadedPrimaryNodes, loadedPrimaryLeafs, loadedPrimaryNodesMax, loadedPrimaryLeafsMax, loadedPrimaryNodesMin,
loadedPrimaryLeafsMin, loadedSecondaryNodes, loadedSecondaryLeafs, loadedSecondaryNodesMax, loadedSecondaryLeafsMax,
loadedSecondaryNodesMin, loadedSecondaryLeafsMin, loadedWidePrimaryNodes, loadedWidePrimaryLeafs, loadedWideSecondaryNodes,
loadedWideSecondaryLeafs) = np.loadtxt(filePath0, delimiter=',', unpack=True, skiprows=1)
x = np.arange(len(loadedPrimaryNodes))
#Those values are hardcoded:
nodeCachelines = 2
leafCachelines = nodeCachelines + 3
primaryLoadedCachelines = (loadedPrimaryNodes * nodeCachelines + loadedPrimaryLeafs * leafCachelines).mean() / workSquare
secondaryLoadedCachelines = (loadedSecondaryNodes * nodeCachelines + loadedSecondaryLeafs * leafCachelines).mean() / workSquare
print("Single ray traveral, cachelines loaded per ray")
print("average primary loaded Cachelines " + str(primaryLoadedCachelines))
print("average secondary loaded Cachelines " + str(secondaryLoadedCachelines))
maxPrimaryLoadedCachelines = (loadedPrimaryNodesMax * nodeCachelines + loadedPrimaryLeafsMax * leafCachelines).max()
maxSecondaryLoadedCachelines = (loadedSecondaryNodesMax * nodeCachelines + loadedSecondaryLeafsMax * leafCachelines).max()
print("Max primary loaded Cachelines " + str(maxPrimaryLoadedCachelines))
print("Max secondary loaded Cachelines " + str(maxSecondaryLoadedCachelines))
print("Wide ray traveral, cachelines loaded workgroup Per step")
widePrimaryLoadedCachelines = loadedWidePrimaryNodes * nodeCachelines + loadedWidePrimaryLeafs * leafCachelines
wideSecondaryLoadedCachelines = loadedWideSecondaryNodes * nodeCachelines + loadedWideSecondaryLeafs * leafCachelines
#npArrayAnalysis(widePrimaryLoadedCachelines, "widePrimaryLoadedCachelines")
#npArrayAnalysis(wideSecondaryLoadedCachelines, "secondaryWideLoadedCachelines")
(stepId, avgPrimaryNodeWork, avgPrimaryNodeUnique, avgPrimaryLeafWork, avgPrimaryLeafUnique,
avgPrimaryRayTermination, primaryNodeWorkMax, primaryNodeWorkMin, primaryLeafWorkMax,
primaryLeafWorkMin, avgSecondaryNodeWork, avgSecondaryNodeUnique, avgSecondaryLeafWork,
avgSecondaryLeafUnique, avgSecondaryRayTermination, secondaryNodeWorkMax,
secondaryNodeWorkMin, secondaryLeafWorkMax, secondaryLeafWorkMin) = np.loadtxt(filePath1, delimiter=',', unpack=True, skiprows=1)
#plot should show how many cachelines are loaded per step.
avgPrimaryNodeLoadedCachelines = avgPrimaryNodeUnique * nodeCachelines
avgPrimaryLeafLoadedCachelines = avgPrimaryLeafUnique * leafCachelines
avgSecondaryNodeLoadedCachelines = avgSecondaryNodeUnique * nodeCachelines
avgSecondaryLeafLoadedCachelines = avgSecondaryLeafUnique * leafCachelines
maxPrimaryNodeLoadedCachelines = primaryNodeWorkMax * nodeCachelines
maxPrimaryLeafLoadedCachelines = primaryLeafWorkMax * leafCachelines
maxSecondaryNodeLoadedCachelines = secondaryNodeWorkMax * nodeCachelines
maxSecondaryLeafLoadedCachelines = secondaryLeafWorkMax * leafCachelines
maxAvgPrim = max(avgPrimaryNodeLoadedCachelines.max() , avgPrimaryLeafLoadedCachelines.max())
maxAvgSec = max(avgSecondaryNodeLoadedCachelines.max() , avgSecondaryLeafLoadedCachelines.max())
maxPrim = max(maxPrimaryNodeLoadedCachelines.max() , maxPrimaryLeafLoadedCachelines.max())
maxSec = max(maxSecondaryNodeLoadedCachelines.max() , maxSecondaryLeafLoadedCachelines.max())
print("Wide: Average Max primary loaded Cachelines " + str(maxAvgPrim))
print("Wide: Average Max secondary loaded Cachelines " + str(maxAvgSec))
print("Wide: Max primary loaded Cachelines " + str(maxPrim))
print("Wide: Max secondary loaded Cachelines " + str(maxSec))
# plot to show how many cachelines are loaded for what.
# add single traversal per ray average as reference line?
plt.plot(stepId, avgPrimaryNodeLoadedCachelines, label = "Avg Primary Node loaded cachelines")
plt.plot(stepId, avgPrimaryLeafLoadedCachelines, label = "Avg Primary Leaf loaded cachelines")
plt.plot(stepId, avgSecondaryNodeLoadedCachelines, label = "Avg Secondary Node loaded cachelines")
plt.plot(stepId, avgSecondaryLeafLoadedCachelines, label = "Avg Secondary Leaf loaded cachelines")
plt.axhline(linewidth=1, color='0.5')
plt.ylabel("loaded cachelines")
plt.xlabel("steps")
plt.axhline(y=primaryLoadedCachelines, linewidth=1, color='0.1')
plt.axhline(y=secondaryLoadedCachelines, linewidth=1, color='0')
plt.legend()
endPlot()
plt.plot(stepId, maxPrimaryNodeLoadedCachelines, label = "Max Primary Node loaded cachelines")
plt.plot(stepId, maxPrimaryLeafLoadedCachelines, label = "Max Primary Leaf loaded cachelines")
plt.plot(stepId, maxSecondaryNodeLoadedCachelines, label = "Max Secondary Node loaded cachelines")
plt.plot(stepId, maxSecondaryLeafLoadedCachelines, label = "Max Secondary Leaf loaded cachelines")
plt.axhline(linewidth=1, color='0.5')
plt.ylabel("loaded cachelines")
plt.xlabel("steps")
plt.axhline(y=maxPrimaryLoadedCachelines, linewidth=1, color='0.1')
plt.axhline(y=maxSecondaryLoadedCachelines, linewidth=1, color='0')
plt.legend()
endPlot()
def perRayPlot(filePath):
plt.figure(figsize=(15,7))
plt.suptitle("N4L4 workGroup 16")
tmpFilePath = filePath + "WideV0_c0.txt"
(totalTimeV0, nodeTimeV0, leafTimeV0) = np.loadtxt(tmpFilePath , delimiter=',', unpack=True, skiprows = 1)
tmpFilePath = filePath + "WideV1_c0.txt"
(totalTimeV1, nodeTimeV1, leafTimeV1) = np.loadtxt(tmpFilePath , delimiter=',', unpack=True, skiprows = 1)
x = np.arange(totalTimeV0.size)
#sort array by total time:
p = totalTimeV0.argsort()
totalTimeV0 = totalTimeV0[p]
nodeTimeV0 = nodeTimeV0[p]
leafTimeV0 = leafTimeV0[p]
p = totalTimeV1.argsort()
totalTimeV1 = totalTimeV1[p]
nodeTimeV1 = nodeTimeV1[p]
leafTimeV1 = leafTimeV1[p]
ax0 = plt.subplot(2,2,1)
plt.title("V0 camera0")
plt.axhline(linewidth=1, color='0.5')
plt.plot(x , totalTimeV0, label = "total Ray Time")
plt.plot(x , nodeTimeV0, label = "node Time")
plt.plot(x, leafTimeV0, label="leaf Time")
plt.ylabel(("Ray time in ms (for each workgroup)"))
plt.legend()
ax0 = plt.subplot(2,2,3, sharex = ax0, sharey = ax0)
plt.title("V1 camera0")
plt.axhline(linewidth=1, color='0.5')
plt.plot(x , totalTimeV1, label = "total Ray Time")
plt.plot(x , nodeTimeV1, label = "node Time")
plt.plot(x , leafTimeV1, label = "leaf Time")
plt.ylabel(("Ray time in ms (for each workgroup)"))
plt.legend()
npArrayAnalysis(totalTimeV0, "V0C0")
npArrayAnalysis(totalTimeV1, "V1C0")
tmpFilePath = filePath + "WideV0_c1.txt"
(totalTimeV0, nodeTimeV0, leafTimeV0) = np.loadtxt(tmpFilePath , delimiter=',', unpack=True, skiprows = 1)
tmpFilePath = filePath + "WideV1_c1.txt"
(totalTimeV1, nodeTimeV1, leafTimeV1) = np.loadtxt(tmpFilePath , delimiter=',', unpack=True, skiprows = 1)
#sort array by total time:
p = totalTimeV0.argsort()
totalTimeV0 = totalTimeV0[p]
nodeTimeV0 = nodeTimeV0[p]
leafTimeV0 = leafTimeV0[p]
p = totalTimeV1.argsort()
totalTimeV1 = totalTimeV1[p]
nodeTimeV1 = nodeTimeV1[p]
leafTimeV1 = leafTimeV1[p]
ax2 = plt.subplot(2, 2, 2, sharex = ax0, sharey = ax0)
plt.title("V0 camera1")
plt.axhline(linewidth=1, color='0.5')
plt.plot(x , totalTimeV0, label = "total Ray Time")
plt.plot(x , nodeTimeV0, label = "node Time")
plt.plot(x , leafTimeV0, label = "leaf Time")
plt.legend()
ax3 = plt.subplot(2, 2, 4, sharex = ax0, sharey = ax0)
plt.title("V1 camera1")
plt.axhline(linewidth=1, color='0.5')
plt.plot(x , totalTimeV1, label = "total Ray Time")
plt.plot(x , nodeTimeV1, label = "node Time")
plt.plot(x , leafTimeV1, label = "leaf Time")
plt.legend()
plt.savefig(outputFolder + 'rayTimingAnalysis.pdf')
plt.savefig(outputFolder + 'rayTimingAnalysis.pgf')
endPlot()
def npArrayAnalysis(a, title):
#some analysis for me: min, max, sd and variance, median, average
text = ': mean: {:0.3f} median: {:1.3f} min: {:2.3f} max: {:3.3f} sd: {:4.3f} var: {:5.3f}'.format(a.mean(), np.median(a), a.min(),a.max(), a.std(), a.var())
print(title + text)
def rayTotalAnalysis():
def rayTotalAnalysisHelperComparison(ax, totalTimeV1, leaf, branch, leafSize, width):
maskV1 = np.ma.masked_where(leaf != leafSize, totalTimeV1)
maskBranch = np.ma.masked_where(leaf != leafSize, branch)
maskV1 = maskV1.compressed()
maskBranch = maskBranch.compressed()
plt.xticks(np.arange(4, 20, step=4))
plt.title("Leafsize " + str(leafSize))
ax.bar(maskBranch, maskV1, width=width, bottom=1, label="Wide renderer")
ax.axhline(linewidth=1, y = 1, color='0.5')
plt.legend()
def rayTotalAnalysisHelperOverview(ax, totalTime, totalTimeV1, leaf, branch, leafSize, width):
mask = np.ma.masked_where(leaf != leafSize, totalTime)
maskV1 = np.ma.masked_where(leaf != leafSize, totalTimeV1)
maskBranch = np.ma.masked_where(leaf != leafSize, branch)
mask = mask.compressed()
maskV1 = maskV1.compressed()
maskBranch = maskBranch.compressed()
plt.xticks(np.arange(4, 20, step=4))
plt.title("Leafsize " + str(leafSize))
ax.bar(maskBranch - width / 2, maskV1, width=width, label="Wide ray")
ax.bar(maskBranch + width / 2, mask, width=width, label="Single ray Traversal")
#ax.axhline(linewidth=1, y = 0, color='0.5')
plt.legend(loc = 'lower left')
#overview of the 3 raytracer version, normal , wideV0 and wideV1 for N,L 4 to 16
(branch, leaf, totalTime) = np.loadtxt(inputFolder + "rayTotalTime.txt" , delimiter=',', unpack=True, skiprows = 1)
(branch, leaf, totalTimeV1) = np.loadtxt(inputFolder + "rayTotalTimeV1.txt" , delimiter=',', unpack=True, skiprows = 1)
width = 1
#first do not normalized plot
fig = plt.figure(figsize=(12, 8))
fig.suptitle("General performance overview")
ax0 = plt.subplot(2,2,1)
rayTotalAnalysisHelperOverview(ax0, totalTime, totalTimeV1, leaf, branch, 4, width)
plt.ylabel("Render time in seconds")
ax1 = plt.subplot(2,2,2, sharex = ax0, sharey = ax0)
rayTotalAnalysisHelperOverview(ax1, totalTime, totalTimeV1, leaf, branch, 8, width)
ax2 = plt.subplot(2,2,3, sharex = ax0, sharey = ax0)
rayTotalAnalysisHelperOverview(ax2, totalTime, totalTimeV1, leaf, branch, 12, width)
plt.ylabel("Render time in seconds")
plt.xlabel("Nodesize")
ax3 = plt.subplot(2,2,4, sharex = ax0, sharey = ax0)
rayTotalAnalysisHelperOverview(ax3, totalTime, totalTimeV1, leaf, branch, 16, width)
plt.xlabel("Nodesize")
plt.savefig(outputFolder + "PerformanceOverview.pdf")
plt.savefig(outputFolder + "PerformanceOverview.pgf")
#Now plot that is normalized by "smallest" (single ray traversal)
fig = plt.figure(figsize=(12, 8))
fig.suptitle("Performance comparison of single ray traversal to wide traversal")
totalTimeV1 = (totalTimeV1 / totalTime) - 1
ax0 = plt.subplot(2,2,1)
plt.ylabel("time relative to single ray traversal")
rayTotalAnalysisHelperComparison(ax0, totalTimeV1, leaf, branch, 4, width)
ax1 = plt.subplot(2,2,2, sharex = ax0, sharey = ax0)
rayTotalAnalysisHelperComparison(ax1, totalTimeV1, leaf, branch, 8, width)
ax2 = plt.subplot(2,2,3, sharex = ax0, sharey = ax0)
plt.xlabel("Nodesize")
plt.ylabel("time relative to single ray traversal")
rayTotalAnalysisHelperComparison(ax2, totalTimeV1, leaf, branch, 12, width)
ax3 = plt.subplot(2,2,4, sharex = ax0, sharey = ax0)
plt.xlabel("Nodesize")
rayTotalAnalysisHelperComparison(ax3, totalTimeV1, leaf, branch, 16, width)
plt.savefig(outputFolder + "PerformanceComparison.pdf")
plt.savefig(outputFolder + "PerformanceComparison.pgf")
endPlot()
def rayTotalAnalysisPadding():
def helperOverview(padding, branch, totalTime, totalTimeV0, totalTimeV1, nodeSize):
mask = np.ma.masked_where(branch != nodeSize, totalTime)
maskV0 = np.ma.masked_where(branch != nodeSize, totalTimeV0)
maskV1 = np.ma.masked_where(branch != nodeSize, totalTimeV1)
maskBranch = np.ma.masked_where(branch != nodeSize, branch)
maskPadding = np.ma.masked_where(branch != nodeSize, padding)
mask = mask.compressed()
maskV0 = maskV0.compressed()
maskV1 = maskV1.compressed()
maskBranch = maskBranch.compressed()
maskPadding = maskPadding.compressed()
plt.xticks((0,1,2,5,10,20))
plt.title("NodeSize " + str(nodeSize))
plt.bar(maskPadding - width, maskV0, width = width, label = "Wide ray V0")
plt.bar(maskPadding, maskV1, width = width, label = "Wide ray V1")
plt.bar(maskPadding + width, mask, width = width, label = "Singe ray traversal")
#plt.axhline(linewidth=1, y = 1, color='0.5')
plt.legend()
def helperRelativeOverview(padding, branch, totalTimeV0, totalTimeV1, nodeSize):
maskV0 = np.ma.masked_where(branch != nodeSize, totalTimeV0)
maskV1 = np.ma.masked_where(branch != nodeSize, totalTimeV1)
maskBranch = np.ma.masked_where(branch != nodeSize, branch)
maskPadding = np.ma.masked_where(branch != nodeSize, padding)
maskV0 = maskV0.compressed()
maskV1 = maskV1.compressed()
maskBranch = maskBranch.compressed()
maskPadding = maskPadding.compressed()
plt.xticks((0,1,2,5,10,20))
plt.title("NodeSize " + str(nodeSize))
plt.bar(maskPadding - width / 2, maskV0, width = width, bottom = 1, label = "V0")
plt.bar(maskPadding + width / 2, maskV1, width = width, bottom = 1, label = "V1")
plt.axhline(linewidth=1, y = 1, color='0.5')
plt.legend()
(padding, branch, leaf, totalTime) = np.loadtxt(inputFolder + "PaddingResults/PadSummary_rayTotalTime.txt" , delimiter=',', unpack=True, skiprows = 1)
(padding, branch, leaf, totalTimeV0) = np.loadtxt(inputFolder + "PaddingResults/PadSummary_rayTotalTimeV0.txt" , delimiter=',', unpack=True, skiprows = 1)
(padding, branch, leaf, totalTimeV1) = np.loadtxt(inputFolder + "PaddingResults/PadSummary_rayTotalTimeV1.txt" , delimiter=',', unpack=True, skiprows = 1)
width = 0.3
plt.figure(figsize=(12,8))
ax0 = plt.subplot(2,2,1)
plt.suptitle("Performance effect of padding")
plt.ylabel("Render time in seconds")
helperOverview(padding, branch, totalTime, totalTimeV0, totalTimeV1, 4)
ax1 = plt.subplot(2,2,2, sharex = ax0, sharey = ax0)
helperOverview(padding, branch, totalTime, totalTimeV0, totalTimeV1, 8)
ax2 = plt.subplot(2,2,3, sharex = ax0, sharey = ax0)
plt.ylabel("Render time in seconds")
plt.xlabel("Padding")
helperOverview(padding, branch, totalTime, totalTimeV0, totalTimeV1, 12)
ax3 = plt.subplot(2,2,4, sharex = ax0, sharey = ax0)
plt.xlabel("Padding")
helperOverview(padding, branch, totalTime, totalTimeV0, totalTimeV1, 16)
plt.savefig(outputFolder + "PerformanceOverviewPadding.pdf")
plt.savefig(outputFolder + "PerformanceOverviewPadding.pgf")
#normalize by "smallest"
totalTimeV0 = (totalTimeV0 / totalTime) - 1
totalTimeV1 = (totalTimeV1 / totalTime) - 1
plt.figure(figsize=(12,8))
ax0 = plt.subplot(2,2,1)
plt.suptitle("Performance comparison of single ray traversal to wide traversal with different paddings.")
plt.ylabel("time relative to single ray traversal")
helperRelativeOverview(padding, branch, totalTimeV0, totalTimeV1, 4)
ax1 = plt.subplot(2,2,2, sharex = ax0, sharey = ax0)
helperRelativeOverview(padding, branch, totalTimeV0, totalTimeV1, 8)
ax2 = plt.subplot(2,2,3, sharex = ax0, sharey = ax0)
plt.ylabel("time relative to single ray traversal")
plt.xlabel("Padding")
helperRelativeOverview(padding, branch, totalTimeV0, totalTimeV1, 12)
ax3 = plt.subplot(2,2,4, sharex = ax0, sharey = ax0)
plt.xlabel("Padding")
helperRelativeOverview(padding, branch, totalTimeV0, totalTimeV1, 16)
plt.savefig(outputFolder + "PerformanceRelativeOverviewPadding.pdf")
plt.savefig(outputFolder + "PerformanceRelativeOverviewPadding.pgf")
endPlot()
#makePerfAnalysis(inputFolder + "amazonLumberyardInterior_4To16Table.txt", "Amazon Lumberyard Interior Sse", "AmazonLumberyardInterior_4To16Perf")
#makeIntersectionAnalysis(inputFolder + "amazonLumberyardInterior_1To16Table.txt" , "Amazon Lumberyard Interior", "AmazonLumberyardInterior_1To16")
#makeIntersectionAnalysis(inputFolder + "amazonLumberyardInterior_1To16Table.txt" , "Amazon Lumberyard Interior", "AmazonLumberyardInterior_1To16")
#makeWorkGroupWiskerPlots((inputFolder + "WorkGroups/WorkGroupSize_" , "_Version_0/amazonLumberyardInterior_b4_l4_c0_WorkGroupData.txt"), "Primary N4L4S", 16, ("N4L4S" ,"WorkGroupAnalysisC0_Old"))
#Analysis about workgroup per step stuff and -> per step unique loaded nodes is not that usefull since i average over alive rays. not sure what to do else
#makeWorkGroupAnalysis((inputFolder + "WorkGroups/WorkGroupSize_" , "_Version_0/amazonLumberyardInterior_b4_l4_c0_WorkGroupData.txt"), 16, ("N4L4S" ,"WorkGroupAnalysisC0_Old"))
#makeWorkGroupAnalysis((inputFolder + "WorkGroups/WorkGroupSize_" , "_Version_1/amazonLumberyardInterior_b4_l4_c0_WorkGroupData.txt"), 16, ("N4L4S" ,"WorkGroupAnalysisC0_New"))
#general workgroup unique node analysis (comparison to single ray traversal)
#makeWorkGroupUniqueAnalysis(inputFolder + "WorkGroups/WorkGroupSize_16_Version_0/amazonLumberyardInterior_b4_l4_c0_WorkGroupUniqueWork.txt", "c0", 16)
#makeWorkGroupUniqueAnalysis(inputFolder + "WorkGroups/WorkGroupSize_16_Version_0/amazonLumberyardInterior_b4_l4_c1_WorkGroupUniqueWork.txt", "c1", 16)
#workGroupUniqueLoadedCachelines()
#perRayPlot(inputFolder + "amazonLumberyardInteriorRayPerformance")
rayTotalAnalysis()
#rayTotalAnalysisPadding() |
# coding: utf-8
'''
# Gestures for Pythonista
This is a convenience class for enabling gestures in Pythonista UI applications, including built-in views. Main intent here has been to make them Python friendly, hiding all the Objective-C stuff. All gestures correspond to the standard Apple gestures, except for the custom force press gesture.
Run the file on its own to see a demo of the supported gestures.

Get it from [GitHub](https://github.com/mikaelho/pythonista-gestures).
## Example
For example, do something when user swipes left on a TextView:
def swipe_handler(data):
print ‘I was swiped, starting from ‘ + str(data.location)
tv = ui.TextView()
Gestures().add_swipe(tv, swipe_handler, direction = Gestures.LEFT)
Your handler method gets one `data` argument that always contains the attributes described below. Individual gestures may provide more information; see the API documentation for the `add_` methods.
* `recognizer` - (ObjC) recognizer object
* `view` - (Pythonista) view that captured the object
* `location` - Location of the gesture as a `ui.Point` with `x` and `y` attributes
* `state` - State of gesture recognition; one of `Gestures.POSSIBLE/BEGAN/RECOGNIZED/CHANGED/ENDED/CANCELLED/FAILED`
* `number_of_touches` - Number of touches recognized
For continuous gestures, check for `data.state == Gestures.ENDED` in the handler if you are just interested that a pinch or a force press happened.
All of the `add_x` methods return a `recognizer` object that can be used to remove or disable the gesture as needed, see the API. You can also remove all gestures from a view with `remove_all_gestures(view)`.
#docgen-toc
## Fine-tuning gesture recognition
By default only one gesture recognizer will be successful, but if you want to, for example, enable both zooming (pinch) and panning at the same time, allow both recognizers:
g = Gestures()
g.recognize_simultaneously = lambda gr, other_gr: gr == Gestures.PAN and other_gr == Gestures.PINCH
The other methods you can override are `fail` and `fail_other`, corresponding to the other [UIGestureRecognizerDelegate](https://developer.apple.com/reference/uikit/uigesturerecognizerdelegate?language=objc) methods.
All regular recognizers have convenience names that you can use like in the example above: `Gestures.TAP/PINCH/ROTATION/SWIPE/PAN/SCREEN_EDGE_PAN/LONG_PRESS`.
If you need to set these per gesture, instantiate separate `Gestures` objects.
## Notes
* Adding a gesture to a view automatically sets `touch_enabled=True` for that view, to avoid counter-intuitive situations where adding a gesture recognizer to e.g. ui.Label produces no results.
* It can be hard to add gestures to ui.ScrollView, ui.TextView and the like, because they have complex multi-view structures and gestures already in place.
* To facilitate the gesture handler callbacks from Objective-C to Python, the Gestures instance used to create the gesture must be live. You do not need to manage that as objc_util.retain_global is used to keep a global reference around. If you for some reason must track the reference manually, you can turn this behavior off with a `retain_global_reference=False` parameter for the constructor.
* Single Gestures instance can be used to add any number of gestures to any number of views, but you can just as well create a new instance whenever and wherever you need to add a new handler.
* If you need to create millions of dynamic gestures in a long-running app, it can be worthwhile to explicitly `remove` them when no longer needed, to avoid a memory leak.
'''
import ui
from objc_util import *
import uuid
from types import SimpleNamespace
from functools import partial
# https://developer.apple.com/library/prerelease/ios/documentation/UIKit/Reference/UIGestureRecognizer_Class/index.html#//apple_ref/occ/cl/UIGestureRecognizer
class Gestures():
TYPE_REGULAR = 0
TYPE_FORCE = 1
TYPE_STYLUS = 4
TYPE_ANY = 8
TAP = b'UITapGestureRecognizer'
PINCH = b'UIPinchGestureRecognizer'
ROTATION = b'UIRotationGestureRecognizer'
SWIPE = b'UISwipeGestureRecognizer'
PAN = b'UIPanGestureRecognizer'
SCREEN_EDGE_PAN = b'UIScreenEdgePanGestureRecognizer'
LONG_PRESS = b'UILongPressGestureRecognizer'
POSSIBLE = 0
BEGAN = 1
RECOGNIZED = 1
CHANGED = 2
ENDED = 3
CANCELLED = 4
FAILED = 5
RIGHT = 1
LEFT = 2
UP = 4
DOWN = 8
EDGE_NONE = 0
EDGE_TOP = 1
EDGE_LEFT = 2
EDGE_BOTTOM = 4
EDGE_RIGHT = 8
EDGE_ALL = 15
def __init__(self, touch_type=TYPE_REGULAR, force_threshold=0.4, retain_global_reference = True):
self.buttons = {}
self.views = {}
self.recognizers = {}
self.actions = {}
self.touches = {}
self.touch_type = touch_type
self.force_threshold = force_threshold
if retain_global_reference:
retain_global(self)
# Friendly delegate functions
def recognize_simultaneously_default(gr_name, other_gr_name):
return False
self.recognize_simultaneously = recognize_simultaneously_default
def fail_default(gr_name, other_gr_name):
return False
self.fail = fail_default
def fail_other_default(gr_name, other_gr_name):
return False
self.fail_other = fail_other_default
# ObjC delegate functions
def simplify(func, gr, other_gr):
gr_o = ObjCInstance(gr)
other_gr_o = ObjCInstance(other_gr)
if (gr_o.view() != other_gr_o.view()):
return False
gr_name = gr_o._get_objc_classname()
other_gr_name = other_gr_o._get_objc_classname()
return func(gr_name, other_gr_name)
# Recognize simultaneously
def gestureRecognizer_shouldRecognizeSimultaneouslyWithGestureRecognizer_(_self, _sel, gr, other_gr):
return self.objc_should_recognize_simultaneously(self.recognize_simultaneously, gr, other_gr)
def objc_should_recognize_simultaneously_default(func, gr, other_gr):
return simplify(func, gr, other_gr)
self.objc_should_recognize_simultaneously = objc_should_recognize_simultaneously_default
# Fail other
def gestureRecognizer_shouldRequireFailureOfGestureRecognizer_(_self, _sel, gr, other_gr):
return self.objc_should_require_failure(self.fail_other, gr, other_gr)
def objc_should_require_failure_default(func, gr, other_gr):
return simplify(func, gr, other_gr)
self.objc_should_require_failure = objc_should_require_failure_default
# Fail
def gestureRecognizer_shouldBeRequiredToFailByGestureRecognizer_(_self, _sel, gr, other_gr):
return self.objc_should_fail(self.fail, gr, other_gr)
def objc_should_fail_default(func, gr, other_gr):
return simplify(func, gr, other_gr)
self.objc_should_fail = objc_should_fail_default
# Delegate
try:
PythonistaGestureDelegate = ObjCClass('PythonistaGestureDelegate')
except:
PythonistaGestureDelegate = create_objc_class('PythonistaGestureDelegate',
superclass=NSObject,
methods=[
#gestureRecognizer_shouldReceiveTouch_,
gestureRecognizer_shouldRecognizeSimultaneouslyWithGestureRecognizer_,
gestureRecognizer_shouldRequireFailureOfGestureRecognizer_,
gestureRecognizer_shouldBeRequiredToFailByGestureRecognizer_],
classmethods=[],
protocols=['UIGestureRecognizerDelegate'],
debug=True)
self._delegate = PythonistaGestureDelegate.new()
@on_main_thread
def add_tap(self, view, action, number_of_taps_required = None, number_of_touches_required = None):
''' Call `action` when a tap gesture is recognized for the `view`.
Additional parameters:
* `number_of_taps_required` - Set if more than one tap is required for the gesture to be recognized.
* `number_of_touches_required` - Set if more than one finger is required for the gesture to be recognized.
'''
recog = self._get_recog('UITapGestureRecognizer', view, self._general_action, action)
if number_of_taps_required:
recog.numberOfTapsRequired = number_of_taps_required
if number_of_touches_required:
recog.numberOfTouchesRequired = number_of_touches_required
return recog
@on_main_thread
def add_long_press(self, view, action, number_of_taps_required = None, number_of_touches_required = None, minimum_press_duration = None, allowable_movement = None):
''' Call `action` when a long press gesture is recognized for the `view`. Note that this is a continuous gesture; you might want to check for `data.state == Gestures.CHANGED` or `ENDED` to get the desired results.
Additional parameters:
* `number_of_taps_required` - Set if more than one tap is required for the gesture to be recognized.
* `number_of_touches_required` - Set if more than one finger is required for the gesture to be recognized.
* `minimum_press_duration` - Set to change the default 0.5 second recognition treshold.
* `allowable_movement` - Set to change the default 10 point maximum distance allowed for the gesture to be recognized.
'''
recog = self._get_recog('UILongPressGestureRecognizer', view, self._general_action, action)
if number_of_taps_required:
recog.numberOfTapsRequired = number_of_taps_required
if number_of_touches_required:
recog.numberOfTouchesRequired = number_of_touches_required
if minimum_press_duration:
recog.minimumPressDuration = minimum_press_duration
if allowable_movement:
recog.allowableMovement = allowable_movement
return recog
@on_main_thread
def add_pan(self, view, action, minimum_number_of_touches = None, maximum_number_of_touches = None):
''' Call `action` when a pan gesture is recognized for the `view`. This is a continuous gesture.
Additional parameters:
* `minimum_number_of_touches` - Set to control the gesture recognition.
* `maximum_number_of_touches` - Set to control the gesture recognition.
Handler `action` receives the following gesture-specific attributes in the `data` argument:
* `translation` - Translation from the starting point of the gesture as a `ui.Point` with `x` and `y` attributes.
* `velocity` - Current velocity of the pan gesture as points per second (a `ui.Point` with `x` and `y` attributes).
'''
recog = self._get_recog('UIPanGestureRecognizer', view, self._pan_action, action)
if minimum_number_of_touches:
recog.minimumNumberOfTouches = minimum_number_of_touches
if maximum_number_of_touches:
recog.maximumNumberOfTouches = maximum_number_of_touches
return recog
@on_main_thread
def add_screen_edge_pan(self, view, action, edges):
''' Call `action` when a pan gesture starting from the edge is recognized for the `view`. This is a continuous gesture.
`edges` must be set to one of `Gestures.EDGE_NONE/EDGE_TOP/EDGE_LEFT/EDGE_BOTTOM/EDGE_RIGHT/EDGE_ALL`. If you want to recognize pans from different edges, you have to set up separate recognizers with separate calls to this method.
Handler `action` receives the same gesture-specific attributes in the `data` argument as pan gestures, see `add_pan`.
'''
recog = self._get_recog('UIScreenEdgePanGestureRecognizer', view, self._pan_action, action)
recog.edges = edges
return recog
@on_main_thread
def add_pinch(self, view, action):
''' Call `action` when a pinch gesture is recognized for the `view`. This is a continuous gesture.
Handler `action` receives the following gesture-specific attributes in the `data` argument:
* `scale` - Relative to the distance of the fingers as opposed to when the touch first started.
* `velocity` - Current velocity of the pinch gesture as scale per second.
'''
recog = self._get_recog('UIPinchGestureRecognizer', view, self._pinch_action, action)
return recog
@on_main_thread
def add_rotation(self, view, action):
''' Call `action` when a rotation gesture is recognized for the `view`. This is a continuous gesture.
Handler `action` receives the following gesture-specific attributes in the `data` argument:
* `rotation` - Rotation in radians, relative to the position of the fingers when the touch first started.
* `velocity` - Current velocity of the rotation gesture as radians per second.
'''
recog = self._get_recog('UIRotationGestureRecognizer', view, self._rotation_action, action)
return recog
@on_main_thread
def add_swipe(self, view, action, direction = None, number_of_touches_required = None):
''' Call `action` when a swipe gesture is recognized for the `view`.
Additional parameters:
* `direction` - Direction of the swipe to be recognized. Either one of `Gestures.RIGHT/LEFT/UP/DOWN`, or a list of multiple directions.
* `number_of_touches_required` - Set if you need to change the minimum number of touches required.
If swipes to multiple directions are to be recognized, the handler does not receive any indication of the direction of the swipe. Add multiple recognizers if you need to differentiate between the directions.
'''
recog = self._get_recog('UISwipeGestureRecognizer', view, self._general_action, action)
if direction:
combined_dir = direction
if isinstance(direction, list):
combined_dir = 0
for one_direction in direction:
combined_dir |= one_direction
recog.direction = combined_dir
if number_of_touches_required:
recog.numberOfTouchesRequired = number_of_touches_required
return recog
@on_main_thread
def add_force_press(self, view, action, threshold=0.4):
''' Call `action` when a force press gesture is recognized for the `view`. This is a continuous gesture.
Additional parameters:
* `threshold` - How much pressure is required for the gesture to be detected, between 0 and 1. Default is 0.4.
Handler `action` receives the following gesture-specific attributes in the `data` argument:
* `force` - Force of the press, a value between `threshold` and 1.
'''
recog = self._get_recog('UILongPressGestureRecognizer', view, partial(self._force_press_action, threshold), action)
return recog
@on_main_thread
def disable(self, recognizer):
''' Disable a recognizer temporarily. '''
ObjCInstance(recognizer).enabled = False
@on_main_thread
def enable(self, recognizer):
''' Enable a disabled gesture recognizer. There is no error if the recognizer is already enabled. '''
ObjCInstance(recognizer).enabled = True
@on_main_thread
def remove(self, view, recognizer):
''' Remove the recognizer from the view permanently. '''
key = None
for id in self.recognizers:
if self.recognizers[id] == recognizer:
key = id
break
if key:
del self.buttons[key]
del self.views[key]
del self.recognizers[key]
del self.actions[key]
ObjCInstance(view).removeGestureRecognizer_(recognizer)
@on_main_thread
def remove_all_gestures(self, view):
''' Remove all gesture recognizers from a view. '''
gestures = ObjCInstance(view).gestureRecognizers()
for recog in gestures:
self.remove(view, recog)
def _get_recog(self, recog_name, view, internal_action, final_handler):
view.touch_enabled = True
button = ui.Button()
key = str(uuid.uuid4())
button.name = key
button.action = internal_action
self.buttons[key] = button
self.views[key] = view
recognizer = ObjCClass(recog_name).alloc().initWithTarget_action_(button, sel('invokeAction:')).autorelease()
self.recognizers[key] = recognizer
self.actions[key] = final_handler
ObjCInstance(view).addGestureRecognizer_(recognizer)
recognizer.delegate = self._delegate
return recognizer
class Data():
def __init__(self):
self.recognizer = self.view = self.location = self.state = self.number_of_touches = self.scale = self.rotation = self.velocity = None
def _context(self, button):
key = button.name
(view, recog, action) = (self.views[key], self.recognizers[key], self.actions[key])
data = Gestures.Data()
data.recognizer = recog
data.view = view
data.location = self._location(view, recog)
data.state = recog.state()
data.number_of_touches = recog.numberOfTouches()
#data.additional_touch_data = self.touches[recog]
return (data, action)
def _location(self, view, recog):
loc = recog.locationInView_(ObjCInstance(view))
return ui.Point(loc.x, loc.y)
def _general_action(self, sender):
(data, action) = self._context(sender)
action(data)
def _pan_action(self, sender):
(data, action) = self._context(sender)
trans = data.recognizer.translationInView_(ObjCInstance(data.view))
vel = data.recognizer.velocityInView_(ObjCInstance(data.view))
data.translation = ui.Point(trans.x, trans.y)
data.velocity = ui.Point(vel.x, vel.y)
action(data)
def _pinch_action(self, sender):
(data, action) = self._context(sender)
data.scale = data.recognizer.scale()
data.velocity = data.recognizer.velocity()
action(data)
def _rotation_action(self, sender):
(data, action) = self._context(sender)
data.rotation = data.recognizer.rotation()
data.velocity = data.recognizer.velocity()
action(data)
def _force_press_action(self, threshold, sender):
(data, action) = self._context(sender)
touch = data.recognizer.touches()[0]
force_fraction = touch.force()/touch.maximumPossibleForce()
if force_fraction > threshold:
data.force = force_fraction
action(data)
# TESTING AND DEMONSTRATION
if __name__ == "__main__":
import math, random
g = Gestures()
def random_background(view):
colors = ['#0b6623', '#9dc183', '#3f704d', '#8F9779', '#4F7942', '#A9BA9D', '#D0F0C0', '#043927', '#679267', '#2E8B57']
view.background_color = random.choice(colors)
view.text_color = 'black' if sum(view.background_color[:3]) > 1.5 else 'white'
def update_text(l, text):
l.text = '\n'.join([l.text.splitlines()[0]] + [text])
def generic_handler(data):
update_text(data.view, 'State: ' + str(data.state) + ' Touches: ' + str(data.number_of_touches))
random_background(data.view)
def long_press_handler(data):
random_background(data.view)
if data.state == Gestures.CHANGED:
update_text(data.view, 'Ongoing')
elif data.state == Gestures.ENDED:
update_text(data.view, 'Finished')
def pan_handler(data):
update_text(data.view, 'Trans: ' + str(data.translation))
random_background(data.view)
def pinch_handler(data):
random_background(data.view)
update_text(data.view, 'Scale: ' + str(round(data.scale, 6)))
def pan_or_pinch_handler(data):
random_background(data.view)
if hasattr(data, 'translation'):
update_text(data.view, 'Pan')
elif hasattr(data, 'scale'):
update_text(data.view, 'Pinch')
else:
update_text(data.view, 'Something else')
def pan_or_swipe_handler(data):
random_background(data.view)
if hasattr(data, 'translation'):
update_text(data.view, 'Pan')
else:
update_text(data.view, 'Swipe')
def force_handler(data):
base_color = (.82, .94, .75)
color_actual = [c*data.force for c in base_color]
data.view.background_color = tuple(color_actual)
data.view.text_color = 'black' if sum(color_actual) > 1.5 else 'white'
update_text(data.view, 'Force: ' + str(round(data.force, 6)))
def stylus_handler(data):
random_background(data.view)
bg = ui.View()
bg.present()
edge_l = ui.Label(
text='Edge pan (from right)',
background_color='grey',
text_color='white',
alignment=ui.ALIGN_CENTER,
number_of_lines=0,
frame=(
0, 0, bg.width, 75
))
bg.add_subview(edge_l)
g.add_screen_edge_pan(edge_l, pan_handler, edges=Gestures.EDGE_RIGHT)
v = ui.ScrollView(frame=(0, 75, bg.width, bg.height-75))
bg.add_subview(v)
label_count = -1
def create_label(title):
global label_count
label_count += 1
label_w = 175
label_h = 75
gap = 10
label_w_with_gap = label_w + gap
label_h_with_gap = label_h + gap
labels_per_line = math.floor((v.width-2*gap)/(label_w+gap))
left_margin = (v.width - labels_per_line*label_w_with_gap + gap)/2
line = math.floor(label_count/labels_per_line)
column = label_count - line*labels_per_line
l = ui.Label(
text=title,
background_color='grey',
text_color='white',
alignment=ui.ALIGN_CENTER,
number_of_lines=0,
frame=(
left_margin+column * label_w_with_gap,
gap+line * label_h_with_gap,
label_w, label_h
))
v.add_subview(l)
return l
tap_l = create_label('Tap')
g.add_tap(tap_l, generic_handler)
tap_2_l = create_label('2-finger tap')
g.add_tap(tap_2_l, generic_handler, number_of_touches_required=2)
doubletap_l = create_label('Doubletap')
g.add_tap(doubletap_l, generic_handler, number_of_taps_required=2)
long_l = create_label('Long press')
g.add_long_press(long_l, long_press_handler)
pan_l = create_label('Pan')
g.add_pan(pan_l, pan_handler)
swipe_l = create_label('Swipe (right)')
g.add_swipe(swipe_l, generic_handler, direction=Gestures.RIGHT)
pinch_l = create_label('Pinch')
g.add_pinch(pinch_l, pinch_handler)
pan_or_pinch_l = create_label('Pan or pinch')
g.add_pan(pan_or_pinch_l, pan_or_pinch_handler)
g.add_pinch(pan_or_pinch_l, pan_or_pinch_handler)
g.fail_other = lambda gr, other_gr: gr == Gestures.PAN and other_gr == Gestures.SWIPE
pan_or_swipe_l = create_label('Pan or swipe (right)')
g.add_pan(pan_or_swipe_l, pan_or_swipe_handler)
g.add_swipe(pan_or_swipe_l, pan_or_swipe_handler, direction=Gestures.RIGHT)
force_l = create_label('Force press')
g.add_force_press(force_l, force_handler)
class EventDisplay(ui.View):
def __init__(self):
self.tv = ui.TextView(flex='WH', editable=False)
self.add_subview(self.tv)
self.tv.frame = (0, 0, self.width, self.height)
g = Gestures()
g.recognize_simultaneously = lambda gr, other_gr: gr == Gestures.PAN and other_gr == Gestures.PINCH
g.fail_other = lambda gr, other_gr: other_gr == Gestures.PINCH
g.add_tap(self, self.general_handler)
g.add_long_press(self.tv, self.long_press_handler)
pan = g.add_pan(self, self.pan_handler)
#g.add_screen_edge_pan(self.tv, self.pan_handler, edges = Gestures.EDGE_LEFT)
#g.add_swipe(self.tv, self.general_handler, direction = [Gestures.DOWN])
#g.add_pinch(self, self.pinch_handler)
g.add_rotation(self.tv, self.rotation_handler)
def t(self, msg):
self.tv.text = self.tv.text + msg + '\n'
def general_handler(self, data):
self.t('General: ' + str(data.location) + ' - state: ' + str(data.state) + ' - touches: ' + str(data.number_of_touches))
def long_press_handler(self, data):
if data.state == Gestures.ENDED:
self.t('Long press: ' + str(data.location) + ' - state: ' + str(data.state) + ' - touches: ' + str(data.number_of_touches))
def pan_handler(self, data):
self.t('Pan: ' + str(data.translation) + ' - state: ' + str(data.state))
def pinch_handler(self, data):
self.t('Pinch: ' + str(data.scale) + ' state: ' + str(data.state))
def rotation_handler(self, data):
self.t('Rotation: ' + str(data.rotation))
|
# Program with a Nepali class and a subclass Newari
class Nepali:
def __init__(self):
print("Namaste")
class Newari(Nepali):
def __init__(self):
Nepali.__init__(self)
print("Jujulapa")
n1 = Newari()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.