text stringlengths 8 6.05M |
|---|
#对搜索内容进行处理,生成字典
def queryProcess(query):
tmp = query.split(' ')
print(tmp)
meta = []
query_dict = []
for x in tmp:
if x.count(':') == 0:
query_dict.append(x)
else:
x = x.replace('+', ' ')
meta = x.split(':')
query_dict.append({meta[0]: meta[1]})
return query_dict
#向es查询
# def search(query_dict):
# should_list = []
# must_list = []
# tmp = []
# for x in query_dict:
# if str(x).count(':') == 0:
# tmp = [
# {'match': {'age': x}},
# {'match': {'gender': x}},
# {'match': {'city': x}},
# {'match': {'employer': x}},
# {'match': {'state': x}},
# ]
# should_list.extend(tmp)
# else:
# must_list.append({'match': x})
#
# query_dsl = {
# 'query': {
# 'bool': {
# 'should': should_list,
# 'must': must_list
# }
# }
# }
#
# es = Elasticsearch("127.0.0.1:9200")
# responce = es.search(index="bank", body=query_dsl)
# data = responce["hits"]["hits"]
# return data |
# -*- coding: utf-8 -*-
"""
@author: Kamila Kitowska, Katarzyna Pękala
"""
#%%
#libraries and initial values
import os
os.chdir("D:/Big Data/projekt zaliczeniowy Python/projekt_zaliczeniowy/src/cost_simulator")
#import importlib
import datetime
import numpy as np
import ec2_spot_pricing_simulator as ecs
import matplotlib.pyplot as plt
from textwrap import wrap
#importlib.reload(ecs)
# simulation start and end dates
start = datetime.datetime.strptime(\
"2017-11-27 00:30:00","%Y-%m-%d %H:%M:%S")
end = datetime.datetime.strptime(\
"2017-11-27 23:30:00","%Y-%m-%d %H:%M:%S")
ec2_od = 300 #amount of on-demand servers, already bought
ec2_price_od_old = 0.42 #price of existing on-demand servers, per server per hour
users_per_server = 100 # how many users for one server
revenue = 0.00025 #per server per minute
demand_avg = 40000 #users per minute, avg, preditcion
demand_std_dev = 5000 #users per minute, standard dev, preditcion
ec2_price_od = 0.84 #current price of new on-demand servers, per server per hour
# how many new on demand servers, how many spot servers?
# inital values
ec2_od_new = 0
ec2_spot = 0
n_of_sim = 500 #number of simulations
availability_level = 0.99 # how many users/min must have access
availability_no_sim = 0.9 # how many simulations must meet avail. level
bid = 0.84
#spot prices source
# 1 - simulation
# 0 - historical
spot_prices_s = 0
#%%
def minimum_from_arrays(a,b):
c = []
for i in range(np.size(a)):
c.append(min(a[i],b[i]))
return c
def is_empty(any_structure):
if any_structure:
return False
else:
return True
if spot_prices_s == 1:
spot_file = "ceny_spot_sim.txt"
else:
spot_file = "ceny_spot_2711.txt"
import numpy
def add_on_demand_instances(spot_avail):
# vector of server availibility
res_vector = numpy.diff(spot_avail)
res_vector = numpy.append(res_vector,0)
res_vector = np.minimum(numpy.zeros_like(spot_avail),res_vector)
tmp = numpy.absolute(spot_avail-1)
res_vector = tmp + numpy.roll(res_vector,1) + numpy.roll(res_vector,2)
res_vector = np.maximum(numpy.zeros_like(spot_avail),res_vector)
res_vector[0] = 0
res_vector[1] = 0
# cost
cost = 0
j = 0
for i in range(len(tmp)):
if tmp[i] == 1 and j%60==0:
cost = cost + 1
j = j + 1
elif tmp[i] == 1:
j = j + 1
else:
j = 0
result = (res_vector,cost)
return result
#%%
# scenario "only on demand servers"
def first_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,ec2_od_new):
sim_length_minutes = int((end - start).total_seconds()/60) #how many minutes in simulation
#simulate user demand
np.random.seed(1)
user_demand = np.random.normal(demand_avg,demand_std_dev,sim_length_minutes*n_of_sim)
user_demand = np.ceil(user_demand)
user_demand = np.reshape(user_demand, (n_of_sim, sim_length_minutes))
servers_capacity = np.full(sim_length_minutes, ec2_od*users_per_server+ec2_od_new*users_per_server) #server "capacity", in users, per minute
results = []
#costs
server_costs_hour = ec2_price_od_old * ec2_od + ec2_od_new * ec2_price_od #price of old and new servers, per hour
server_costs_min = server_costs_hour/60 #price of all on demand servers, per minute
avail_counter = 0
for i in range(0, n_of_sim):
#ec2_od_new = np.ceil(max(user_demand[i]-servers_capacity)/100) #how many new on demand servers (based on user demand simulation)
diff_capacity_demand = servers_capacity - user_demand[i]
users_not_served_total = np.sum(diff_capacity_demand[diff_capacity_demand<0])*(-1)
#revenue
users_served = np.array(minimum_from_arrays(servers_capacity,user_demand[i]))
revenue_min = users_served * revenue # revenue per minute
#profit
profit_min = revenue_min - server_costs_min # profit per minute
profit_total = np.sum(profit_min) #total profit in one simulation
#print(i, "loop | profit = ",profit_total, "| users denied access = ",users_not_served_total)
avail = (np.sum(user_demand[i])-users_not_served_total)/np.sum(user_demand[i])
if avail>=availability_level:
avail_counter = avail_counter + 1
res = [profit_total, users_not_served_total,avail]
results.append(res)
return (results,avail_counter)
#%%
# simulation of first scenario
#
avg_profit = []
avg_denials = []
avail = []
final_result = ()
servers_lower_range = 50
servers_higher_range = 200
servers_no_interval = 5
servers_no_range = range(servers_lower_range, servers_higher_range, servers_no_interval)
for j in servers_no_range:
res = first_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,j)
avg_res = np.array(res[0]).mean(axis=0)
avail.append(res[1])
avg_profit.append(avg_res[0])
avg_denials.append(avg_res[1]/1000)
# print("additional on-demand servers =",j," | avg total profit =",
# avg_res[0],"| avg amount of denials-of-access", avg_res[1],
# "| availability ", avg_res[2]*100,"% | availability cond. counter",res[1])
if res[1]/n_of_sim>availability_no_sim and is_empty(final_result):
final_result = (avg_res,res[1],j)
if is_empty(final_result):
print("\n\nAvailability condition of",availability_level*100,"% in",availability_no_sim*n_of_sim,"out of",n_of_sim,"simulations wasn't satisfied.")
else:
print("\nFINAL RESULT: \nAdditional on-demand servers =",final_result[2]," | avg total profit =",
final_result[0][0],"| avg amount of denials-of-access", final_result[0][1],
"| availability ", final_result[0][2]*100,"% \nIn ",final_result[1],
"simulations out of",n_of_sim,"availability condition of",availability_level,"was met.")
#%%
#plot
fig, ax1 = plt.subplots()
plt.suptitle('First scenario: only on-demand servers')
max_profit = max(avg_profit)
avail_index = avail.index(min([i for i in avail if i>availability_no_sim*n_of_sim]))
ax2 = ax1.twinx()
ax1.plot(avg_profit,servers_no_range, 'g-')
ax1.plot(max_profit,servers_no_range[avg_profit.index(max_profit)],'ro') # max_profit point
ax1.annotate((int(max_profit),servers_no_range[avg_profit.index(max_profit)]), \
xy=(max_profit,servers_no_range[avg_profit.index(max_profit)]),xytext=(max_profit-800,servers_no_range[avg_profit.index(max_profit)]), \
arrowprops=dict(facecolor='black', shrink=0.05)) # arrow and max value annotation
ax1.plot(avg_profit[avail_index],servers_no_range[avail_index],'go')
ax1.annotate((int(avg_profit[avail_index]),servers_no_range[avail_index]), \
xy=(avg_profit[avail_index],servers_no_range[avail_index]),xytext=(avg_profit[avail_index]-600,servers_no_range[avail_index]), \
arrowprops=dict(facecolor='black', shrink=0.05)) #
ax2.plot(avg_profit,avg_denials, 'b-')
ax2.plot(max_profit,avg_denials[avg_profit.index(max_profit)],'ro') # max_profit point
ax2.annotate((int(max_profit),int(avg_denials[avg_profit.index(max_profit)])), \
xy=(max_profit,avg_denials[avg_profit.index(max_profit)]),xytext=(max_profit-800, \
avg_denials[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05)) # arrow and max value annotation
ax1.set_xlabel('Avg total profit')
ax1.set_ylabel('Number of oo-demand servers', color='g')
ax2.set_ylabel('MM denials', color='b')
#ax1.set_ylim([140,200])
plt.show()
#%%
# "old" on demand servers + spot instances only
def second_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,ec2_spot):
sim_length_minutes = int((end - start).total_seconds()/60) #how many minutes in simulation
#simulate user demand
np.random.seed(1)
user_demand = np.random.normal(demand_avg,demand_std_dev,sim_length_minutes*n_of_sim)
user_demand = np.ceil(user_demand)
user_demand = np.reshape(user_demand, (n_of_sim, sim_length_minutes))
#simulate spot instances
sim = ecs.Ec2Simulator(spot_file)
sim_res = sim.estimate_cost_d(bid,start,end,single_sim_time_s=3600)
#server capacity
servers_capacity = np.full(sim_length_minutes, ec2_od*users_per_server) #server "capacity", in users, per minute
servers_capacity = servers_capacity + sim_res[2]*ec2_spot*users_per_server
results = []
#print(sim_res[0]*ec2_spot)
#costs
server_costs_hour = ec2_price_od_old * ec2_od + ec2_od_new * ec2_price_od #price of old and new servers, per hour
server_costs_min = server_costs_hour/60 #price of all on demand servers, per minute
avail_counter = 0
for i in range(0, n_of_sim):
#ec2_od_new = np.ceil(max(user_demand[i]-servers_capacity)/100) #how many new on demand servers (based on user demand simulation)
diff_capacity_demand = servers_capacity - user_demand[i]
users_not_served_total = np.sum(diff_capacity_demand[diff_capacity_demand<0])*(-1)
#revenue
users_served = np.array(minimum_from_arrays(servers_capacity,user_demand[i]))
revenue_min = users_served * revenue # revenue per minute
#profit
profit_min = revenue_min - server_costs_min # profit per minute
profit_total = np.sum(profit_min)-sim_res[0]*ec2_spot #total profit in one simulation
#print(i, "| profit = ",profit_total, "| denied access = ",users_not_served_total)
avail = (np.sum(user_demand[i])-users_not_served_total)/np.sum(user_demand[i])
if avail>=availability_level:
avail_counter = avail_counter + 1
res = [profit_total, users_not_served_total, avail]
results.append(res)
return (results,sim_res[2],sim_length_minutes,avail_counter)
#%%
# simulation of second scenario
avg_profit = []
avg_denials = []
avail = []
final_result = ()
servers_lower_range = 50
servers_higher_range = 200
servers_no_interval = 5
servers_no_range = range(servers_lower_range, servers_higher_range, servers_no_interval)
for j in servers_no_range:
res = second_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,j)
avg_res = np.array(res[0]).mean(axis=0)
avail.append(res[3])
avg_profit.append(avg_res[0])
avg_denials.append(avg_res[1]/1000)
print("additional spot servers =",j," | avg tot. profit =",
avg_res[0],"| avg amount of denials", avg_res[1],"| availability ",
avg_res[2]*100,"% | availability cond. counter",res[3])
if res[3]/n_of_sim>availability_no_sim and is_empty(final_result):
final_result = (avg_res,res[3],j)
if is_empty(final_result):
print("\n\nAvailability condition of",availability_level*100,"% in",availability_no_sim*n_of_sim,"out of",n_of_sim,"simulations wasn't satisfied.")
else:
print("\nFINAL RESULTS: \nadditional spot servers =",final_result[2]," | avg total profit =",
final_result[0][0],"| avg amount of denials-of-access", final_result[0][1],
"| availability ", final_result[0][2]*100,"% \nIn ",final_result[1],
"simulations out of",n_of_sim,"availability condition of",availability_level,"was met.")
spot_min = np.sum(res[1])
sim_min = res[2]
print("---")
print("Spot servers were working for", spot_min, "minutes (",float(spot_min)/sim_min*100,"% of simulation time)")
print("For", sim_min-spot_min, "minutes only 300 on-demand servers were working (",(sim_min-spot_min)/sim_min*100,"% of simulation time)")
max_profit = max(avg_profit)
avail_index = avail.index(min([i for i in avail if i>availability_no_sim*n_of_sim]))
#%%
#plot
fig, ax1 = plt.subplots()
plt.suptitle('Second scenario: 300 reserved on-demand servers and spot instances only')
ax2 = ax1.twinx()
ax1.plot(avg_profit,servers_no_range, 'g-')
ax1.plot(max_profit,servers_no_range[avg_profit.index(max_profit)],'ro')
ax1.annotate((int(max_profit),servers_no_range[avg_profit.index(max_profit)]), \
xy=(max_profit,servers_no_range[avg_profit.index(max_profit)]),xytext=(max_profit-300, \
servers_no_range[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05))
ax1.plot(avg_profit[avail_index],servers_no_range[avail_index],'go')
ax1.annotate((int(avg_profit[avail_index]),servers_no_range[avail_index]), \
xy=(avg_profit[avail_index],servers_no_range[avail_index]),xytext=(avg_profit[avail_index]-600,servers_no_range[avail_index]), \
arrowprops=dict(facecolor='black', shrink=0.05)) #
ax2.plot(avg_profit,avg_denials, 'b-')
ax2.plot(max_profit,avg_denials[avg_profit.index(max_profit)],'ro')
ax2.annotate((int(max_profit),int(avg_denials[avg_profit.index(max_profit)])), \
xy=(max_profit,avg_denials[avg_profit.index(max_profit)]), \
xytext=(max_profit-300,avg_denials[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05))
ax1.set_xlabel('Avg total profit')
ax1.set_ylabel('Number of oo-demand servers', color='g')
ax2.set_ylabel('MM denials', color='b')
plt.show()
#%%
# "old" on demand servers + spot instances + on-demand servers when spot unavaible
# on-demand needs 2 min for startup
def third_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,ec2_spot):
sim_length_minutes = int((end - start).total_seconds()/60) #how many minutes in simulation
#simulate user demand
np.random.seed(1)
user_demand = np.random.normal(demand_avg,demand_std_dev,sim_length_minutes*n_of_sim)
user_demand = np.ceil(user_demand)
user_demand = np.reshape(user_demand, (n_of_sim, sim_length_minutes))
#simulate spot instances
sim = ecs.Ec2Simulator(spot_file)
sim_res = sim.estimate_cost_d(bid,start,end,single_sim_time_s=3600)
#simulate server capacity
servers_capacity = np.full(sim_length_minutes, ec2_od*users_per_server) #server "capacity", in users, per minute
servers_capacity = servers_capacity + sim_res[2]*ec2_spot*users_per_server #on-demand plus spot servers
new_on_demand_instances = add_on_demand_instances(sim_res[2])
#print(sim_res[2])
new_on_demand_servers = new_on_demand_instances[0]
#print(new_on_demand_servers)
servers_capacity = servers_capacity + new_on_demand_servers*ec2_spot*users_per_server #on-demand plus spot plus new on-demand
#costs
od_server_costs = ec2_price_od_old * ec2_od * sim_length_minutes / 60
od_new_servers_costs = new_on_demand_instances[1] * ec2_spot * ec2_price_od
spot_costs = sim_res[0] * ec2_spot
total_cost = od_server_costs + od_new_servers_costs + spot_costs
results = []
avail_counter = 0
for i in range(0, n_of_sim):
#ec2_od_new = np.ceil(max(user_demand[i]-servers_capacity)/100) #how many new on demand servers (based on user demand simulation)
diff_capacity_demand = servers_capacity - user_demand[i]
users_not_served_total = np.sum(diff_capacity_demand[diff_capacity_demand<0])*(-1)
#revenue
users_served = np.array(minimum_from_arrays(servers_capacity,user_demand[i]))
revenue_min = users_served * revenue # revenue per minute
#profit
profit_total = np.sum(revenue_min)-total_cost #total profit in one simulation
#print(i, "| profit = ",profit_total, "| denied access = ",users_not_served_total)
avail = (np.sum(user_demand[i])-users_not_served_total)/np.sum(user_demand[i])
if avail>=availability_level:
avail_counter = avail_counter + 1
res = [profit_total, users_not_served_total, avail]
results.append(res)
return (results,sim_res[2],new_on_demand_servers,sim_length_minutes,avail_counter)
#%%
avg_profit = []
avg_denials = []
avail = []
final_result = ()
servers_lower_range = 50
servers_higher_range = 200
servers_no_interval = 5
servers_no_range = range(servers_lower_range, servers_higher_range, servers_no_interval)
for j in servers_no_range:
res = third_scenario(start,end,demand_avg,demand_std_dev,n_of_sim,j)
avail.append(res[4])
avg_res = np.array(res[0]).mean(axis=0)
avg_profit.append(avg_res[0])
avg_denials.append(avg_res[1]/1000)
print("additional spot/on-demand servers =",j," | avg tot. profit =",
avg_res[0],"| avg amount of denials", avg_res[1],"| availability ",
avg_res[2]*100,"% | availability cond. counter",res[4])
if res[4]/n_of_sim>availability_no_sim and is_empty(final_result):
final_result = (avg_res,res[4],j)
if is_empty(final_result):
print("\n\nAvailability condition of",availability_level*100,"% in",availability_no_sim*n_of_sim,"out of",n_of_sim,"simulations wasn't satisfied.")
else:
print("\nFINAL RESULTS: \nAdditional spot servers =",final_result[2]," | avg total profit =",
final_result[0][0],"| avg amount of denials-of-access", final_result[0][1],
"| availability ", final_result[0][2]*100,"% \nIn ",final_result[1],
"simulations out of",n_of_sim,"availability condition of",availability_level,"was met.")
spot_min = np.sum(res[1])
nod_min = np.sum(res[2])
sim_min = res[3]
print("---")
print("Spot servers were working for", spot_min, "minutes (",float(spot_min)/sim_min*100,"% of simulation time)")
print("Additional on demand servers were working for", nod_min, "minutes (",nod_min/sim_min*100,"% of simulation time)")
print("For", sim_min-nod_min-spot_min, "minutes only 300 on-demand servers were working (",(sim_min-nod_min-spot_min)/sim_min*100,"% of simulation time)")
max_profit = max(avg_profit)
avail_index = avail.index(min([i for i in avail if i>availability_no_sim*n_of_sim]))
#%%
#plot
fig, ax1 = plt.subplots()
plt.suptitle("\n".join(wrap('Third scenario: 300 reserved on-demand servers, spot instances only and on-demand servers when spot unavaible', 60)))
max_profit = max(avg_profit)
ax2 = ax1.twinx()
ax1.plot(avg_profit,servers_no_range, 'g-')
ax1.plot(max_profit,servers_no_range[avg_profit.index(max_profit)],'ro')
ax1.annotate((int(max_profit),servers_no_range[avg_profit.index(max_profit)]),xy=(max_profit,servers_no_range[avg_profit.index(max_profit)]),xytext=(max_profit-700,servers_no_range[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05))
ax1.plot(avg_profit[avail_index],servers_no_range[avail_index],'go')
ax1.annotate((int(avg_profit[avail_index]),servers_no_range[avail_index]), \
xy=(avg_profit[avail_index],servers_no_range[avail_index]),xytext=(avg_profit[avail_index]-650,servers_no_range[avail_index]), \
arrowprops=dict(facecolor='black', shrink=0.05)) #
ax2.plot(avg_profit,avg_denials, 'b-')
ax2.plot(max_profit,avg_denials[avg_profit.index(max_profit)],'ro')
ax2.annotate((int(max_profit),int(avg_denials[avg_profit.index(max_profit)])),xy=(max_profit,avg_denials[avg_profit.index(max_profit)]),xytext=(max_profit-700,avg_denials[avg_profit.index(max_profit)]),arrowprops=dict(facecolor='black', shrink=0.05))
ax1.set_xlabel('Avg total profit')
ax1.set_ylabel('Number of oo-demand servers', color='g')
ax2.set_ylabel('MM denials', color='b')
plt.show()
#%%
|
###############################################################################
# YOU DO NOT NEED TO MODIFY THIS FILE #
###############################################################################
import argparse
import logging
import math
import os
import random
import textwrap
from collections import namedtuple
from multiprocessing.pool import ThreadPool as Pool
from isolation import Isolation, Agent, play
from sample_players import RandomPlayer, GreedyPlayer, MinimaxPlayer
from my_custom_player import CustomPlayer
|
from pprint import pprint
import os
import telepot
from telepot.loop import MessageLoop
class CalendarBot:
def __init__(self):
file = open(os.getcwd() + '/token', 'r')
self.token = file.read()
self.bot = telepot.Bot(self.token)
def run(self):
MessageLoop(self.bot, self.handle).run_as_thread()
def handle(self, message):
methodName = message['text'].replace('/','')
chatId = message['chat']['id']
try:
getattr(self, methodName)(chatId)
except AttributeError:
raise NotImplementedError(
f"Class {self.__class__.__name__} does not implement {methodName}")
def fromtoevents(self, id):
self.bot.sendMessage(id, f"fromtoevents{id}")
def todayevents(self, id):
self.bot.sendMessage(id, 'todayevents')
def lastnevents(self, id):
self.bot.sendMessage(id, 'lastnevents')
def newevent(self, id):
self.bot.sendMessage(id, 'newevent')
def icsofevent(self, id):
self.bot.sendMessage(id, 'icsofevent')
def csvevents(self, id):
self.bot.sendMessage(id, 'csvevents')
|
import os, subprocess, shutil
date = []
date.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[11:15]))
date.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[16:18]))
date.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[19:21]))
date.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[22:24]))
date.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[25:27]))
date.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[28:30]))
if(subprocess.check_output('find -name "qgis2web*"', shell=True)[33:61] != ""):
date2 = []
date2.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[42:46]))
date2.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[47:49]))
date2.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[50:52]))
date2.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[53:55]))
date2.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[56:58]))
date2.append(int(subprocess.check_output('find -name "qgis2web*"', shell=True)[59:61]))
if(cmp(date, date2) == 1):
foldername = subprocess.check_output('find -name "qgis2web*"', shell=True)[2:30]
foldername2 = subprocess.check_output('find -name "qgis2web*"', shell=True)[33:61]
elif(cmp(date, date2) == -1):
foldername = subprocess.check_output('find -name "qgis2web*"', shell=True)[33:61]
foldername2 = subprocess.check_output('find -name "qgis2web*"', shell=True)[2:30]
shutil.rmtree("/home/user/Desktop/3308/Project/CSCI3308Project-Team-Vision/WebFiles/" + foldername2)
else:
foldername = subprocess.check_output('find -name "qgis2web*"', shell=True)[2:30]
os.chdir("/home/user/Desktop/3308/Project/CSCI3308Project-Team-Vision/WebFiles/" + foldername)
f = open("index.html", 'r')
os.chdir("/home/user/Desktop/3308/Project/CSCI3308Project-Team-Vision/WebFiles")
nf = open("next.html", 'rw+')
for line in f:
if("</body>" in line):
nf.write(' <form action = "web2.html">\n')
nf.write(' #<input type = "text" name = "hashtag">\n')
nf.write(" </form>\n")
nf.write(" </body>\n")
nf.write("</html>")
break
elif('<script src=".' in line):
nf.write('<script src="./')
nf.write(foldername)
nf.write("/")
nf.write(line[line.index('=')+4:])
nf.write("\n")
elif('<script src="' in line):
nf.write('<script src="')
nf.write(foldername)
nf.write("/")
nf.write(line[line.index('=')+2:])
else:
nf.write(line)
f.close()
nf.close()
|
import sys
from levenshtein import levenshtein as levenshtein
from collections import deque
from pm4py.objects.log import log as event_log
from scipy.optimize import linear_sum_assignment
import random
import numpy as np
class TraceMatcher:
def __init__(self,tv_query_log,log):
print("trace_matcher kreiiert")
self.__timestamp = "time:timestamp"
self.__allTimestamps = list()
self.__allTimeStampDifferences = list()
self.__distanceMatrix = dict()
self.__trace_variants_query = self.__addTraceToAttribute(tv_query_log)
self.__trace_variants_log = self.__addTraceToAttribute(log)
attributeBlacklist = self.__getBlacklistOfAttributes()
self.__distributionOfAttributes,self.__eventStructure = self.__getDistributionOfAttributesAndEventStructure(log, attributeBlacklist)
self.__query_log = tv_query_log
self.__log = log
def __addTraceToAttribute(self, log):
trace_variants = dict()
for trace in log:
variant = ""
for event in trace:
variant = variant + "@" + event["concept:name"]
trace.attributes["variant"] = variant
traceSet = trace_variants.get(variant,set())
traceSet.add(trace)
trace_variants[variant] = traceSet
return trace_variants
def __getBlacklistOfAttributes(self):
blacklist = set()
blacklist.add("concept:name")
blacklist.add(self.__timestamp)
blacklist.add("variant")
blacklist.add("EventID")
blacklist.add("OfferID")
blacklist.add("matricola")
return blacklist
def __handleVariantsWithSameCount(self,variants,traceMatching):
for variant in variants:
for trace in self.__trace_variants_query[variant]:
traceMatching[trace.attributes["concept:name"]] = self.__trace_variants_log[variant].pop()
del self.__trace_variants_log[variant]
del self.__trace_variants_query[variant]
def __handleVariantsUnderrepresentedInQuery(self,variants,traceMatching):
for variant in variants:
if variant in self.__trace_variants_query:
for trace in self.__trace_variants_query.get(variant,list()):
traceMatching[trace.attributes["concept:name"]] = self.__trace_variants_log[variant].pop()
del self.__trace_variants_query[variant]
def __handleVariantsOverrepresentedInQuery(self,variants,traceMatching):
for variant in variants:
for trace in self.__trace_variants_log[variant]:
traceFromQuery = self.__trace_variants_query[variant].pop()
traceMatching[traceFromQuery.attributes["concept:name"]] = trace
del self.__trace_variants_log[variant]
def __getDistanceVariants(self,variant1,variant2):
if variant1 not in self.__distanceMatrix:
self.__distanceMatrix[variant1] = dict()
if variant2 not in self.__distanceMatrix[variant1]:
distance = levenshtein(variant1, variant2)
self.__distanceMatrix[variant1][variant2] = distance
else:
distance = self.__distanceMatrix[variant1][variant2]
return distance
def __findCLosestVariantInLog(self,variant,log):
closestVariant = None
closestDistance = sys.maxsize
for comparisonVariant in log.keys():
distance = self.__getDistanceVariants(variant,comparisonVariant)
if distance < closestDistance:
closestVariant = comparisonVariant
closestDistance = distance
return closestVariant
def __findOptimalMatches(self):
rows = list()
for traceQuery in self.__query_log:
row = list()
for traceLog in self.__log:
row.append(self.__getDistanceVariants(traceQuery.attributes["variant"],traceLog.attributes["variant"]))
rows.append(row)
distanceMatrix = np.array(rows)
row_ind, col_ind = linear_sum_assignment(distanceMatrix)
traceMatching = dict()
for (traceQueryPos, traceLogPos) in zip(row_ind, col_ind):
traceMatching[self.__query_log[traceQueryPos].attributes["concept:name"]] = self.__log[traceLogPos]
return traceMatching
def __matchTraces(self,traceMatching):
for variant in self.__trace_variants_query.keys():
closestVariant = self.__findCLosestVariantInLog(variant,self.__trace_variants_log)
for trace in self.__trace_variants_query[variant]:
traceMatching[trace.attributes["concept:name"]] = self.__trace_variants_log[closestVariant].pop()
if not self.__trace_variants_log[closestVariant]:
del self.__trace_variants_log[closestVariant]
if self.__trace_variants_log:
closestVariant = self.__findCLosestVariantInLog(variant, self.__trace_variants_log)
else:
return
def __getTraceMatching(self):
traceMatching = dict()
variantsWithSameCount = set()
variantsUnderepresentedInQuery = set()
variantsOverepresentedInQuery = set()
for variant in self.__trace_variants_log.keys():
if len(self.__trace_variants_log[variant]) == len(self.__trace_variants_query.get(variant,set())):
variantsWithSameCount.add(variant)
elif len(self.__trace_variants_log[variant]) > len(self.__trace_variants_query.get(variant,set())) and len(self.__trace_variants_query.get(variant,set())) != set():
variantsUnderepresentedInQuery.add(variant)
elif len(self.__trace_variants_log[variant]) < len(self.__trace_variants_query.get(variant,0)):
variantsOverepresentedInQuery.add(variant)
self.__handleVariantsWithSameCount(variantsWithSameCount,traceMatching)
self.__handleVariantsUnderrepresentedInQuery(variantsUnderepresentedInQuery,traceMatching)
self.__handleVariantsOverrepresentedInQuery(variantsOverepresentedInQuery,traceMatching)
self.__matchTraces(traceMatching)
return traceMatching
def __resolveTrace(self,traceInQuery,correspondingTrace,distributionOfAttributes):
eventStacks = self.__transformTraceInEventStack(correspondingTrace)
previousEvent = None
for eventNr in range(0,len(traceInQuery)):
currentEvent = traceInQuery[eventNr]
activity = currentEvent["concept:name"]
latestTimeStamp = self.__getLastTimestampTraceResolving(traceInQuery,eventNr)
if activity in eventStacks:
currentEvent = self.__getEventAndUpdateFromEventStacks(activity,eventStacks)
if currentEvent[self.__timestamp] < latestTimeStamp:
currentEvent[self.__timestamp] = self.__getNewTimeStamp(previousEvent,currentEvent, eventNr,distributionOfAttributes)
else:
currentEvent = self.__createRandomNewEvent(currentEvent,activity,distributionOfAttributes,previousEvent,eventNr)
traceInQuery[eventNr] = currentEvent
previousEvent = currentEvent
self.__debugCheckTimeStamp(traceInQuery, eventNr)
return traceInQuery
def __getEventAndUpdateFromEventStacks(self,activity,eventStacks):
event = eventStacks[activity].popleft()
if not eventStacks[activity]:
del eventStacks[activity]
return event
def __debugTraceTimestamps(self,trace):
for eventNr in range(0):
self.__debugCheckTimeStamp(trace,eventNr)
def __debugCheckTimeStamp(self,trace,eventNr):
if eventNr > 0:
if trace[eventNr -1][self.__timestamp] > trace[eventNr][self.__timestamp]:
print("Fuck")
def __getLastTimestampTraceResolving(self,trace,eventNr):
if eventNr == 0:
latestTimeStamp = trace[eventNr][self.__timestamp]
else:
latestTimeStamp = trace[eventNr - 1][self.__timestamp]
return latestTimeStamp
def __transformTraceInEventStack(self,trace):
eventStacks = dict()
for event in trace:
stack = eventStacks.get(event["concept:name"],deque())
stack.append(event)
eventStacks[event["concept:name"]] = stack
return eventStacks
def __createRandomNewEvent(self,event,activity,distributionOfAttributes,previousEvent,eventNr):
for attribute in self.__eventStructure[activity]:
if attribute in distributionOfAttributes and attribute not in event and attribute != self.__timestamp:
event[attribute] = random.choice(distributionOfAttributes[attribute])
elif attribute == self.__timestamp:
event[self.__timestamp] = self.__getNewTimeStamp(previousEvent,event, eventNr,distributionOfAttributes)
return event
def __getNewTimeStamp(self,previousEvent,currentEvent,eventNr,distributionOfAttributes):
if eventNr == 0:
timestamp = random.choice(self.__allTimestamps)
else:
timestamp = previousEvent[self.__timestamp] + random.choice(distributionOfAttributes[self.__timestamp][previousEvent["concept:name"]].get(currentEvent["concept:name"], self.__allTimeStampDifferences))
return timestamp
def __resolveTraceMatching(self,traceMatching,distributionOfAttributes,fillUp):
log = event_log.EventLog()
for trace in self.__query_log:
traceID = trace.attributes["concept:name"]
if fillUp or traceID in traceMatching:
matchedTrace = self.__resolveTrace(trace,traceMatching.get(traceID,list()),distributionOfAttributes)
self.__debugTraceTimestamps(matchedTrace)
log.append(matchedTrace)
return log
def __handleAttributesOfDict(self, dictOfAttributes, distributionOfAttributes, attributeBlacklist,previousEvent=None):
for attribute in dictOfAttributes.keys():
if attribute not in attributeBlacklist:
distribution = distributionOfAttributes.get(attribute, list())
distribution.append(dictOfAttributes[attribute])
distributionOfAttributes[attribute] = distribution
elif attribute == self.__timestamp and previousEvent is not None:
self.__handleTimeStamp(distributionOfAttributes,previousEvent,dictOfAttributes)
def __handleTimeStamp(self, distributionOfAttributes, previousEvent, currentEvent):
timeStampsDicts = distributionOfAttributes.get(self.__timestamp, dict())
activityDict = timeStampsDicts.get(previousEvent["concept:name"],dict())
timeStampsDicts[previousEvent["concept:name"]] = activityDict
distribution = activityDict.get(currentEvent["concept:name"], list())
timeStampDifference = currentEvent[self.__timestamp] - previousEvent[self.__timestamp]
distribution.append(timeStampDifference)
activityDict[currentEvent["concept:name"]] = distribution
distributionOfAttributes[self.__timestamp] = timeStampsDicts
self.__allTimestamps.append(currentEvent[self.__timestamp])
self.__allTimeStampDifferences.append(timeStampDifference)
def __getDistributionOfAttributesAndEventStructure(self, log, attributeBlacklist):
distributionOfAttributes = dict()
eventStructure = dict()
for trace in log:
self.__handleAttributesOfDict(trace.attributes,distributionOfAttributes,attributeBlacklist)
previousEvent = None
currentEvent = None
for eventNr in range(0,len(trace)):
if currentEvent is not None:
previousEvent = currentEvent
currentEvent = trace[eventNr]
self.__handleAttributesOfDict(currentEvent,distributionOfAttributes,attributeBlacklist,previousEvent)
if not currentEvent["concept:name"] in eventStructure:
attributesOfEvent = set(currentEvent.keys())
attributesOfEvent.remove("concept:name")
eventStructure[currentEvent["concept:name"]] = attributesOfEvent
return distributionOfAttributes, eventStructure
def matchQueryToLog(self,fillUp=True,greedy=False):
print("tm1")
if greedy:
traceMatching = self.__getTraceMatching()
print("tm2")
else:
traceMatching = self.__findOptimalMatches()
print("tm3")
matched_log = self.__resolveTraceMatching(traceMatching,self.__distributionOfAttributes,fillUp)
print("tm4")
return matched_log
def getAttributeDistribution(self):
return self.__distributionOfAttributes
def getTimeStampData(self):
return self.__allTimestamps,self.__allTimeStampDifferences |
""" python plotting json data"""
""" September 29, 2017, bx wrote it """
import matplotlib.pyplot as plt
import math
import json
import numpy as np
import random
import os
# s it the data array, accumulating data from all blocks.
#subjects = ['ar','bx','cz','hn','sk','ju','ww']
#for nSubject in range(len(subjects)):
#subject = subjects[nSubject]
subject = 'bx'
script_dir = os.path.dirname(__file__)
results_dir = os.path.join(script_dir, 'outputplot/')
s = {}
print script_dir
# read the data from json file.
for i in range(2):
filename = subject+"_block_"+str(i+1)+'_results.json'
with open(filename) as json_file:
s[i] = json.load(json_file)
# concatenate the data across the blocks
trialnumber = s[0]['trialnumber']+s[1]['trialnumber']
responses = s[0]['choices']+s[1]['choices']
leftdensity = s[0]['targetvalue']+s[1]['targetvalue']
rightdensity = s[0]['matchvalue']+s[1]['matchvalue']
blur = s[0]['targetblurvalues']+s[1]['targetblurvalues']
height = s[0]['targetheightsvalues']+s[1]['targetheightsvalues']
lighting = s[0]['targetilluminations']+s[1]['targetilluminations']
totalconditions = len(leftdensity)
correct = 0
lighting_response = {}
lighting_response['backlit'] = 0
lighting_response['sidelit'] = 0
height_response = {}
height_response['0.01'] = 0
height_response['0.02'] = 0
height_response['0.03'] = 0
height_response['0.04'] = 0
height_response['0.05'] = 0
blur_response = {}
response_list = []
# compute overall percent correctness.
for i in range(totalconditions):
if ((int(leftdensity[i]) < int(rightdensity[i])) and (responses[i+1] =="left")):
correct = correct+1
response_list.append(1)
elif (int((leftdensity[i]) > int(rightdensity[i])) and (responses[i+1] == "right")):
correct = correct+1
response_list.append(1)
if lighting[i] == 'backlit':
lighting_response['backlit'] = lighting_response['backlit']+1
else:
lighting_response['sidelit'] = lighting_response['sidelit']+1
# height
if height[i] == '0.01':
height_response['0.01']+=1
elif height[i] == '0.02':
height_response['0.02']+=1
elif height[i] == '0.03':
height_response['0.03']+=1
elif height[i] == '0.04':
height_response['0.04']+=1
else:
height_response['0.05']+=1
else:
print leftdensity[i],rightdensity[i],responses[i+1]
print 'here'
percent_correct = correct/float(140)
print "number of correct responses", percent_correct
blur_list = zip(response_list,blur)
#print blur_list
# initialize the dictionary
for key,value in blur_list:
blur_response[value] = 0
# make histograms
for key,value in blur_list:
if key == 1:
blur_response[value]+=1
plt.figure()
x = np.arange(len(blur_response))
plt.bar(x,blur_response.values(), align='center', width=0.5)
plt.xticks(x, blur_response.keys())
ymax = max(blur_response.values()) + 1
plt.title('Histogram of correct responses over blur levels')
plt.ylim(0, ymax+1)
plt.show()
x = np.arange(len(height_response))
plt.bar(x,height_response.values(), align='center', width=0.5)
plt.xticks(x, blur_response.keys())
ymax = max(blur_response.values()) + 1
plt.title('Histogram of correct responses over heights')
plt.ylim(0, ymax+1)
plt.show()
x = np.arange(len(lighting_response))
plt.bar(x,lighting_response.values(), align='center', width=0.5)
plt.xticks(x, lighting_response.keys())
ymax = max(lighting_response.values()) + 1
plt.title('Histogram of correct responses over lighting')
plt.ylim(0, ymax+1)
plt.show()
|
import RPi.GPIO as GPIO
import time
class GPIOInteractor:
def button_state_changed(self, channel):
print(GPIO.input(18))
if(not GPIO.input(18)):
self.button_pressed()
else:
self.button_released()
def button_pressed(self):
self.pressed_at = time.time()
def button_released(self):
if(time.time() - self.pressed_at > self.time_to_hold):
self.button_held_callback()
else:
self.button_callback()
def __init__(self):
self.button_callback = None
self.button_held_callback = None
self.reset_callback = None
self.pressed_at = None
self.time_to_hold = 1
self.time_to_reset = 5
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(18, GPIO.BOTH, callback=self.button_state_changed,bouncetime=200)
def set_button_callback(self, funct):
self.button_callback = funct
def set_button_held_callback(self, funct):
self.button_held_callback = funct
def set_reset_callback(self,funct):
self.reset_callback = funct
def cleanup(self):
GPIO.cleanup() |
import unittest
from katas.kyu_8.noob_code_1_supersize_me import super_size
class SuperSizeTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(super_size(69), 96)
def test_equals_2(self):
self.assertEqual(super_size(513), 531)
def test_equals_3(self):
self.assertEqual(super_size(2017), 7210)
def test_equals_4(self):
self.assertEqual(super_size(414), 441)
def test_equals_5(self):
self.assertEqual(super_size(608719), 987610)
def test_equals_6(self):
self.assertEqual(super_size(123456789), 987654321)
def test_equals_7(self):
self.assertEqual(super_size(700000000001), 710000000000)
def test_equals_8(self):
self.assertEqual(super_size(666666), 666666)
def test_equals_9(self):
self.assertEqual(super_size(2), 2)
def test_equals_10(self):
self.assertEqual(super_size(0), 0)
|
import sbol3
import labop
#############################################
# Set up the document
doc = sbol3.Document()
LIBRARY_NAME = "plate_handling"
sbol3.set_namespace("https://bioprotocols.org/labop/primitives/" + LIBRARY_NAME)
#############################################
# Create the primitives
print("Making primitives for " + LIBRARY_NAME)
# Note: plate handling primitives operate on whole arrays only, not just fragments
p = labop.Primitive("Cover")
p.description = "Cover a set of samples to keep materials from entering or exiting"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
p.add_input("type", "http://www.w3.org/2001/XMLSchema#anyURI")
doc.add(p)
p = labop.Primitive("Seal")
p.description = "Seal a collection of samples fixing the seal using a user-selected method, in order to guarantee isolation from the external environment"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
p.add_input(
"specification", "http://bioprotocols.org/labop#ContainerSpec"
) # e.g., breathable vs. non-breathable
doc.add(p)
p = labop.Primitive("EvaporativeSeal")
p.description = "Seal a collection of samples using a user-selected method in order to prevent evaporation"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
p.add_input(
"specification", "http://bioprotocols.org/labop#ContainerSpec"
) # e.g., breathable vs. non-breathable
doc.add(p)
p = labop.Primitive("AdhesiveSeal")
p.description = "Seal a collection of samples using adhesive to fix the seal, in order to guarantee isolation from the external environment"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
p.add_input(
"type", "http://www.w3.org/2001/XMLSchema#anyURI"
) # e.g., breathable vs. non-breathable
doc.add(p)
p = labop.Primitive("ThermalSeal")
p.description = "Seal a collection of samples using heat to fix the seal, in order to guarantee isolation from the external environment"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
p.add_input(
"type", "http://www.w3.org/2001/XMLSchema#anyURI"
) # e.g., breathable vs. non-breathable
p.add_input("temperature", sbol3.OM_MEASURE)
p.add_input(
"duration", sbol3.OM_MEASURE
) # length of time to apply the sealing temperature in order to get the seal in place
doc.add(p)
p = labop.Primitive("Uncover")
p.description = "Uncover a collection of samples to allow materials to enter or exit"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
doc.add(p)
p = labop.Primitive("Unseal")
p.description = "Unseal a sealed collection of samples to break their isolation from the external environment"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
doc.add(p)
p = labop.Primitive("Incubate")
p.description = (
"Incubate a set of samples under specified conditions for a fixed period of time"
)
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
p.add_input("duration", sbol3.OM_MEASURE) # time
p.add_input("temperature", sbol3.OM_MEASURE) # temperature
p.add_input(
"shakingFrequency", sbol3.OM_MEASURE, True
) # Hertz or RPM?; in either case, defaults to zero
doc.add(p)
p = labop.Primitive("Hold")
p.description = "Incubate, store, or hold a set of samples indefinitely at the specified temperature"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray", unbounded=True)
p.add_input("temperature", sbol3.OM_MEASURE) # temperature
doc.add(p)
p = labop.Primitive("HoldOnIce")
p.description = "Incubate, store, or hold a set of samples indefinitely on ice"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray", unbounded=True)
doc.add(p)
p = labop.Primitive("Spin")
p.description = (
"Centrifuge a set of samples at a given acceleration for a given period of time"
)
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
p.add_input("duration", sbol3.OM_MEASURE) # time
p.add_input("acceleration", sbol3.OM_MEASURE) # acceleration
doc.add(p)
p = labop.Primitive("QuickSpin")
p.description = "Perform a brief centrifugation on a set of samples to pull down stray droplets or condensate into the bottom of the container"
p.add_input("location", "http://bioprotocols.org/labop#SampleArray")
doc.add(p)
print("Library construction complete")
print("Validating library")
for e in doc.validate().errors:
print(e)
for w in doc.validate().warnings:
print(w)
filename = LIBRARY_NAME + ".ttl"
doc.write(filename, "turtle")
print("Library written as " + filename)
|
import irc
import threading
import time
import imp
class plugin_thread( threading.Thread ):
plist = []
trusted = []
mods = []
users = []
prefix = "!"
def __init__( self, server, plugins, trusted ):
threading.Thread.__init__(self)
self.server = server;
self.trusted = trusted
for plug in plugins:
loaded = self.load_plugin( plug )
if loaded:
print( " loaded plugin " + plug )
def run( self ):
while True:
time.sleep(1)
cur_time = time.strftime("%d:%H:%M:%S")
for thing in self.plist:
if thing.plugin.cron_time:
t_time = thing.plugin.cron_time
if t_time == cur_time[-len(t_time):]:
thing.plugin.cron( thing.plugin )
def load_plugin( self, name ):
try:
fp, pathname, description = imp.find_module( name )
plugin = imp.load_module( name, fp, pathname, description )
self.plist.append( plugin )
if plugin.plugin.do_init:
plugin.plugin.init(plugin)
return True
except Exception as e:
print( "Could not load module " + name + ":", e )
return False
def unload_plugin( self, plug ):
if plug in self.plist:
self.plist.remove( plug )
return True
else:
return False
def unload_plugin_handle( self, name ):
for plug in self.plist:
if plug.plugin.handle == name:
self.unload_plugin( plug )
return True
break
return False
def exec_cmd( self, message ):
args = message["message"].split()
command = args[0][len(self.prefix):]
for plug in self.plist:
if plug.plugin.handle == command:
try:
if plug.plugin.method == "string":
plug.plugin.run( plug.plugin, self, self.server,
message["nick"], message["host"], message["channel"], message["message"] );
elif plug.plugin.method == "args":
plug.plugin.run( plug.plugin, self, self.server, message["nick"], message["host"], message["channel"], args );
except Exception as ie:
print( "Error in " + str(plug) + ", unloading.\nError:", ie )
self.unload_plugin( plug )
def exec_action( self, message ):
for plug in self.plist:
try:
if message["action"] in plug.plugin.hooks:
plug.plugin.hooks[ message["action"]]\
( plug.plugin, self, self.server, message["nick"], message["host"], message["channel"], message["message"] )
except Exception as ie:
print( "Error in " + str(plug) + ", unloading.\nError:", ie )
self.unload_plugin( plug )
def get_trusted( self ):
return self.trusted
def get_mods( self ):
return self.mods
def get_users( self ):
return self.users
def get_handles( self ):
handles = []
for plug in self.plist:
handles.append( plug.plugin.handle );
return handles
def get_plugin_help( self, handle ):
for plug in self.plist:
if handle == plug.plugin.handle and plug.plugin.help_str:
return plug.plugin.help_str
return False
def set_prefix( self, new_prefix ):
self.prefix = new_prefix
def add_trusted( self, name ):
self.trusted.append( name )
def add_mod( self, name ):
self.mods.append( name )
def add_user( self, name ):
self.users.append( name )
def remove_trusted( self, name ):
if name in self.trusted:
self.trusted.remove( name )
def remove_mod( self, name ):
if name in self.mods:
self.mods.remove( name )
def remove_user( self, name ):
if name in self.users:
self.users.remove( name )
|
guest = input()
host = input()
jumble = sorted(input())
print('YES' if sorted(host+guest) == jumble else 'NO')
|
import json
import binascii
import pprint
from collections import Counter
from Crypto.Cipher import AES
from base64 import *
pp = pprint.PrettyPrinter(indent=4)
def read_file():
return (open("8.txt", "rb").read())
cipher_text = read_file().splitlines()
for line in cipher_text:
chunk_size = 16
line = binascii.unhexlify(line)
chunks = []
so_far = 0
while True:
chunks.append(line[so_far:so_far+chunk_size])
so_far += chunk_size
if so_far >= len(line):
break
print json.dumps(Counter([binascii.hexlify(chunk) for chunk in chunks]), indent=4)
|
#!/usr/local/bin/python3.8
print ('\nBreak and Continue\n')
# Controlling loop execution with 'break' and 'continue'
##################################
# CONTINUE
##################################
'''
Continue: you go to the next iteration of the loop
and you won't go further with anything after the 'continue' keyword
i.e. it'll stop the current iteration of the loop and go to the next
'''
print ('CONTINUE CONDITION')
count = 0
while count <= 10:
if count % 2 == 0:
count += 1
continue
print(f'This is an odd number: {count}') # is a short hand for doing string formatting
count += 1
# print('This is an odd number:', count) == print(f'This is an odd number: {count}')
##################################
# BREAK
##################################
'''
Break: stops the execution of the loop entirely
i.e. it will not go to the next itteration.
'''
print ('\nBreak CONDITION\n')
count = 1
while count < 10:
if count % 2 == 0:
count += 1
break
print(f'This is an odd number: {count}')
count += 1
##################################
# EXAMPLE
##################################
colours = ['red', 'pink', 'blue', 'purple']
for colour in colours:
if colour == 'red':
continue
elif colour == 'blue':
break
else:
print(f'\nThe colour: {colour}\n')
# only 'pink' should be printed
# red ==> continue = cancel current itteration
# pink ==> print the colour
# blue ==> break = cancel the entire parent looping system |
# -*- coding: utf-8 -*-
import datetime, pytz
import dateutil, dateutil.tz, dateutil.parser
import logging
class Date:
def isAware(self, date):
"""
Tiene zona definida?
"""
return (date.tzinfo != None) and (date.tzinfo.utcoffset(date) != None)
def isNaive(self, date):
"""
No tiene zona definida?
"""
return not self.isAware(date)
""" transforma un datetime naive a uno aware con la zona pasada """
def localize(self, timezone, naive):
tz = pytz.timezone(timezone)
local = tz.localize(naive)
return local
""" retorna la zona del servidor """
def getLocalTimezone(self):
return dateutil.tz.tzlocal()
""" cambia la zona de un aware a la zona local del servidor """
def localizeAwareToLocal(self,aware):
tz = dateutil.tz.tzlocal()
return aware.astimezone(tz)
""" supongo la fecha en utc y retorno un datetime con la zona de utc """
def localizeUtc(self,naive):
return naive.replace(tzinfo=pytz.utc)
""" supnog que la fecha esta en la timezone local. """
def localizeLocal(self,naive):
tz = dateutil.tz.tzlocal()
local = naive.replace(tzinfo=tz)
return local
""" retorna el datetime transformado a utc """
def awareToUtc(self, date):
return date.astimezone(pytz.utc)
""" retorna la fecha/hora en zona local """
def now(self):
date = datetime.datetime.now()
return self.localizeLocal(date)
""" retorna la fecha/hora corregida y con zona utc """
def utcNow(self):
return datetime.datetime.now(pytz.utc)
def isUTC(self,date):
#logging.debug(date.tzinfo)
return date.tzinfo != None and date.tzinfo.utcoffset(date) == datetime.timedelta(0)
"""
parsea una fecha y hora y la retorna el la zona local del servidor.
si viene sin timezone entonces supone que esta en la zona del server.
"""
def parse(self, datestr):
dt = dateutil.parser.parse(datestr)
if self.isNaive(dt):
dt = self.localizeLocal(dt)
return dt
|
# -*- coding: utf-8 -*-
{
'name': 'Sales Margin in Product / Sales Orders / Invoice',
'version':'1.0',
'category': 'Sales/Sales',
'author':'Aneesh AV',
'description': """
This module adds the 'Margin' on Product / Sales Orders / Invoice.
""",
'depends':['sale_management','account'],
'demo':['data/sale_margin_demo.xml'],
'data':[
'security/ir.model.access.csv',
'views/product_view.xml',
'views/sales_view.xml',
'views/invoice_view.xml',
'report/sale_report_view.xml'
],
}
|
""" HDF5 geometry. """
import os
import numpy as np
import h5pickle as h5py
from .converted import SeismicGeometryConverted
class SeismicGeometryHDF5(SeismicGeometryConverted):
""" Infer or create an `HDF5` file with multiple projections of the same data inside. """
#pylint: disable=attribute-defined-outside-init
def process(self, mode='r', projections='ixh', shape=None, **kwargs):
""" Detect available projections in the cube and store handlers to them in attributes. """
if mode == 'a':
mode = 'r+' if os.path.exists(self.path) else 'w-'
self.mode = mode
if self.mode in ['r', 'r+']:
self.file = h5py.File(self.path, mode=mode)
elif self.mode=='w-':
# TODO Create new HDF5 file with required projections
pass
# Check available projections
self.available_axis = [axis for axis, name in self.AXIS_TO_NAME.items()
if name in self.file]
self.available_names = [self.AXIS_TO_NAME[axis] for axis in self.available_axis]
# Save cube handlers to instance
self.axis_to_cube = {}
for axis in self.available_axis:
name = self.AXIS_TO_NAME[axis]
cube = self.file[name]
self.axis_to_cube[axis] = cube
setattr(self, name, cube)
# Parse attributes from meta / set defaults
self.add_attributes(**kwargs)
def add_projection(self):
""" TODO. """
raise NotImplementedError
def __getitem__(self, key):
""" Select the fastest axis and use native `HDF5` slicing to retrieve data. """
key, shape, squeeze = self.process_key(key)
axis = self.get_optimal_axis(shape)
cube = self.axis_to_cube[axis]
order = self.AXIS_TO_ORDER[axis]
transpose = self.AXIS_TO_TRANSPOSE[axis]
slc = np.array(key)[order]
crop = cube[tuple(slc)].transpose(transpose)
if self.dtype == np.int8:
crop = crop.astype(np.float32)
if squeeze:
crop = np.squeeze(crop, axis=tuple(squeeze))
return crop
def __setitem__(self, key, value):
""" TODO. """
raise NotImplementedError
|
#!/usr/bin/env python3
"""
Utilities used internally by proplot.
"""
import inspect
import numpy as np
from . import benchmarks, dependencies, docstring, rcsetup, warnings # noqa: F401
from .dependencies import _version, _version_cartopy, _version_mpl # noqa: F401
from .docstring import _snippet_manager # noqa: F401
try: # print debugging
from icecream import ic
except ImportError: # graceful fallback if IceCream isn't installed
ic = lambda *args: print(*args) # noqa: E731
INTERNAL_PARAMS = { # silently pop these if we don't reach certain internal utilities
'to_centers',
'line_plot',
'contour_plot',
'default_cmap',
'default_discrete',
'skip_autolev',
}
# Alias dictionaries. This package only works with a subset of available artists
# and keywords so we simply create our own system rather than working with
# matplotlib's normalize_kwargs and _alias_maps.
# WARNING: Add pseudo-props 'edgewidth' and 'fillcolor' for patch edges and faces
# WARNING: Critical that alias does not appear in key dict or else _translate_kwargs
# will overwrite settings with None after popping them!
_alias_dicts = {
'rgba': {
'red': ('r',),
'green': ('g',),
'blue': ('b',),
'alpha': ('a',),
},
'hsla': {
'hue': ('h',),
'saturation': ('s', 'c', 'chroma'),
'luminance': ('l',),
'alpha': ('a',),
},
'line': { # copied from lines.py but expanded to include plurals
'antialiased': ('aa',),
'alpha': ('a', 'alphas'),
'color': ('c', 'colors'),
'linewidth': ('lw', 'linewidths'),
'linestyle': ('ls', 'linestyles'),
'drawstyle': ('ds', 'drawstyles'),
'dashes': ('d',),
'marker': ('m', 'markers'),
'markersize': ('s', 'ms', 'markersizes'),
'markeredgecolor': ('mec', 'markeredgecolors'),
'markeredgewidth': ('mew', 'markeredgewidths'),
'markerfacecolor': ('mfc', 'markerfacecolors', 'mc', 'markercolor', 'markercolors'), # noqa: E501
'fillstyle': ('fs', 'fillstyles', 'mfs', 'markerfillstyle', 'markerfillstyles'),
'zorder': ('z', 'zorders'),
},
'collection': { # NOTE: face color is ignored for line collections
'alphas': ('a', 'alpha'),
'colors': ('c', 'color'),
'edgecolors': ('ec', 'edgecolor'),
'facecolors': ('fc', 'fillcolor', 'fillcolors'),
'linewidths': ('lw', 'linewidth', 'ew', 'edgewidth', 'edgewidths'),
'linestyles': ('ls', 'linestyle'),
'zorder': ('z', 'zorders'),
},
'patch': {
'alpha': ('a', 'alphas', 'facealpha', 'facealphas', 'fillalpha', 'fillalphas'),
'color': ('c', 'colors'),
'edgecolor': ('ec', 'edgecolors'),
'facecolor': ('fc', 'facecolors', 'fillcolor', 'fillcolors'),
'linewidth': ('lw', 'linewidths', 'ew', 'edgewidth', 'edgewidths'),
'linestyle': ('ls', 'linestyles'),
'zorder': ('z', 'zorders'),
'hatch': ('h', 'hatching'),
},
'text': {
'text': (),
'color': ('c', 'fontcolor'), # NOTE: see text.py source code
'fontfamily': ('family',),
'fontname': ('name',),
'fontsize': ('size',),
'fontstretch': ('stretch',),
'fontstyle': ('style',),
'fontvariant': ('variant',),
'fontweight': ('weight',),
'fontproperties': ('fp', 'font', 'font_properties'),
'zorder': ('z', 'zorders'),
},
}
def _not_none(*args, default=None, **kwargs):
"""
Return the first non-``None`` value. This is used with keyword arg aliases and
for setting default values. Use `kwargs` to issue warnings when multiple passed.
"""
first = default
if args and kwargs:
raise ValueError('_not_none can only be used with args or kwargs.')
elif args:
for arg in args:
if arg is not None:
first = arg
break
elif kwargs:
for name, arg in list(kwargs.items()):
if arg is not None:
first = arg
break
kwargs = {name: arg for name, arg in kwargs.items() if arg is not None}
if len(kwargs) > 1:
warnings._warn_proplot(
f'Got conflicting or duplicate keyword args: {kwargs}. '
'Using the first one.'
)
return first
def _keyword_to_positional(options, *args, allow_extra=False, **kwargs):
"""
Translate keyword arguments to positional arguments. Permit omitted
arguments so that plotting functions can infer values.
"""
nargs, nopts = len(args), len(options)
if nargs > nopts and not allow_extra:
raise ValueError(f'Expected up to {nopts} positional arguments. Got {nargs}.')
args = list(args)
args.extend(None for _ in range(nopts - nargs)) # fill missing args
for idx, keys in enumerate(options):
if isinstance(keys, str):
keys = (keys,)
opts = {}
if args[idx] is not None: # positional args have first priority
opts[keys[0] + '_positional'] = args[idx]
for key in keys: # keyword args
opts[key] = kwargs.pop(key, None)
args[idx] = _not_none(**opts) # may reassign None
return args, kwargs
def _translate_kwargs(input, output, *keys, **aliases):
"""
The driver function.
"""
aliases.update({key: () for key in keys})
for key, aliases in aliases.items():
aliases = (aliases,) if isinstance(aliases, str) else aliases
opts = {key: input.pop(key, None) for key in (key, *aliases)}
value = _not_none(**opts)
if value is not None:
output[key] = value
return output
def _translate_props(input, output, *categories, prefix=None, ignore=None): # noqa: E501
"""
The driver function.
"""
# Get properties
prefix = prefix or '' # e.g. 'box' for boxlw, boxlinewidth, etc.
for category in categories:
if category not in _alias_dicts:
raise ValueError(f'Invalid alias category {category!r}.')
for key, aliases in _alias_dicts[category].items():
if isinstance(aliases, str):
aliases = (aliases,)
opts = {prefix + alias: input.pop(prefix + alias, None) for alias in (key, *aliases)} # noqa: E501
prop = _not_none(**opts)
if prop is not None:
output[key] = prop
# Ignore properties (e.g., ignore 'marker' properties)
ignore = ignore or ()
if isinstance(ignore, str):
ignore = (ignore,)
for string in ignore:
for key in tuple(output):
if string in key:
value = output.pop(key)
warnings._warn_proplot(f'Ignoring property {key}={value!r}.')
return output
def _pop_kwargs(src, *keys, **aliases):
"""
Pop out input properties and return them in a new dictionary.
"""
return _translate_kwargs(src, {}, *keys, **aliases)
def _process_kwargs(src, *keys, **aliases):
"""
Translate input properties and add translated names to the original dictionary.
"""
return _translate_kwargs(src, src, *keys, **aliases)
def _pop_props(src, *categories, **kwargs):
"""
Pop out registered properties and return them in a new dictionary.
"""
return _translate_props(src, {}, *categories, **kwargs)
def _process_props(src, *categories, **kwargs):
"""
Translate registered properties and add translated names to the original dictionary.
"""
return _translate_props(src, src, *categories, **kwargs)
def _pop_params(kwargs, *funcs, ignore_internal=False):
"""
Pop parameters of the input functions or methods.
"""
output = {}
for func in funcs:
sig = inspect.signature(func)
for key in sig.parameters:
value = kwargs.pop(key, None)
if ignore_internal and key in INTERNAL_PARAMS:
continue
if value is not None:
output[key] = value
return output
def _fill_guide_kw(kwargs, **pairs):
"""
Add the keyword arguments to the dictionary if not already present.
"""
aliases = (
('title', 'label'),
('locator', 'ticks'),
('format', 'formatter', 'ticklabels')
)
for key, value in pairs.items():
if value is None:
continue
keys = tuple(a for group in aliases for a in group if key in group) # may be ()
if not any(kwargs.get(key) is not None for key in keys): # note any(()) is True
kwargs[key] = value
def _guide_kw_to_arg(name, kwargs, **pairs):
"""
Add to the `colorbar_kw` or `legend_kw` dict if there are no conflicts.
"""
kw = kwargs.setdefault(f'{name}_kw', {})
_fill_guide_kw(kw, **pairs)
def _guide_kw_from_obj(obj, name, kwargs):
"""
Add to the dict from settings stored on the object if there are no conflicts.
"""
pairs = getattr(obj, f'_{name}_kw', None)
pairs = pairs or {} # needed for some reason
_fill_guide_kw(kwargs, **pairs)
if isinstance(obj, (tuple, list, np.ndarray)):
for iobj in obj: # possibly iterate over matplotlib tuple/list subclasses
_guide_kw_from_obj(iobj, name, kwargs)
return kwargs
def _guide_kw_to_obj(obj, name, kwargs):
"""
Add the guide keyword dict to the objects.
"""
try:
setattr(obj, f'_{name}_kw', kwargs)
except AttributeError:
pass
if isinstance(obj, (tuple, list, np.ndarray)):
for iobj in obj:
_guide_kw_to_obj(iobj, name, kwargs)
class _empty_context(object):
"""
A dummy context manager.
"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, *args): # noqa: U100
pass
class _state_context(object):
"""
Temporarily modify attribute(s) for an arbitrary object.
"""
def __init__(self, obj, **kwargs):
self._obj = obj
self._attrs_new = kwargs
self._attrs_prev = {
key: getattr(obj, key) for key in kwargs if hasattr(obj, key)
}
def __enter__(self):
for key, value in self._attrs_new.items():
setattr(self._obj, key, value)
def __exit__(self, *args): # noqa: U100
for key in self._attrs_new.keys():
if key in self._attrs_prev:
setattr(self._obj, key, self._attrs_prev[key])
else:
delattr(self._obj, key)
|
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtGui import QPixmap, QImage
from mainwindow import *
import picker
import cv2
import numpy as np
# https://qiita.com/kenasman/items/b9ca3beb25ecf87bfb06
# https://qiita.com/montblanc18/items/0188ff680acf028d4b63
# https://takacity.blog.fc2.com/blog-entry-338.html
# create "mainwindow.py" from "mainwindow.ui" using m.bat after editting "mainwindow.ui"
class PickerCalib(QMainWindow):
global config, config_org
def __init__(self, parent=None):
# https://uxmilk.jp/41600
self.ui = Ui_MainWindow()
super(PickerCalib, self).__init__(parent)
self.ui.setupUi(self)
self.img = []
self.img_type = 0
self.image = []
self.pixmap = []
self.corner = 0
self.mouseX = 0
self.mouseY = 0
self.fCaptured = False
self.MouseCursorColor = 0xffffffff
self.CornerMarkColor = 0xff00ffff
self.img_size = [0, 0]
self.window_size = [self.ui.labelView.geometry().width(), self.ui.labelView.geometry().height()]
self.mag = [1, 1]
# print(config)
# print(config_org)
# initialize tray list
for tray in config["Tray"]:
self.ui.comboBoxTrayNumber.addItem(tray)
self.trayID = self.ui.comboBoxTrayNumber.itemText(0)
# HSV range
self.ui.horizontalSliderHU1.setValue(config["HSV_Range"]["Back"]["Upper"]["H"])
self.ui.horizontalSliderHL1.setValue(config["HSV_Range"]["Back"]["Lower"]["H"])
self.ui.horizontalSliderHU2.setValue(config["HSV_Range"]["Black"]["Upper"]["H"])
self.ui.horizontalSliderHL2.setValue(config["HSV_Range"]["Black"]["Lower"]["H"])
self.ui.horizontalSliderSU1.setValue(config["HSV_Range"]["Back"]["Upper"]["S"])
self.ui.horizontalSliderSL1.setValue(config["HSV_Range"]["Back"]["Lower"]["S"])
self.ui.horizontalSliderSU2.setValue(config["HSV_Range"]["Black"]["Upper"]["S"])
self.ui.horizontalSliderSL2.setValue(config["HSV_Range"]["Black"]["Lower"]["S"])
self.ui.horizontalSliderVU1.setValue(config["HSV_Range"]["Back"]["Upper"]["V"])
self.ui.horizontalSliderVL1.setValue(config["HSV_Range"]["Back"]["Lower"]["V"])
self.ui.horizontalSliderVU2.setValue(config["HSV_Range"]["Black"]["Upper"]["V"])
self.ui.horizontalSliderVL2.setValue(config["HSV_Range"]["Black"]["Lower"]["V"])
self.update_tray_info(self.trayID)
self.ui.pushButtonMoveX.clicked.connect(self.onMoveX)
self.ui.pushButtonMoveY.clicked.connect(self.onMoveY)
self.ui.pushButtonMoveZ.clicked.connect(self.onMoveZ)
self.ui.comboBoxTrayNumber.currentIndexChanged.connect(self.onTrayNumberChanged)
self.ui.pushButtonAddTray.clicked.connect(self.onAddTray)
self.ui.pushButtonDeleteTray.clicked.connect(self.onDeleteTray)
self.ui.pushButtonGetTrayCamera1.clicked.connect(self.onGetTrayCamera1)
self.ui.pushButtonGetTrayCamera2.clicked.connect(self.onGetTrayCamera2)
self.ui.pushButtonGetTrayCamera3.clicked.connect(self.onGetTrayCamera3)
self.ui.pushButtonGetTrayCamera4.clicked.connect(self.onGetTrayCamera4)
self.ui.pushButtonCapture.clicked.connect(self.onCapture)
self.ui.pushButtonSave.clicked.connect(self.onSave)
self.ui.pushButtonQuit.clicked.connect(self.onQuit)
self.ui.pushButtonMoveNextBaseCorner.clicked.connect(self.onMoveNextBaseCorner)
self.ui.pushButtonMoveUp.clicked.connect(self.onMoveUp)
self.ui.pushButtonMoveTrayCamera.clicked.connect(self.onMoveTrayCamera)
self.ui.radioButtonRaw.clicked.connect(self.onSelectRaw)
self.ui.radioButtonBack.clicked.connect(self.onSelectBack)
self.ui.radioButtonBlack.clicked.connect(self.onSelectBlack)
self.ui.radioButtonComponent.clicked.connect(self.onSelectComponent)
self.ui.pushButtonCheckCmp.clicked.connect(self.onSelectComponentChk)
self.ui.pushButtonMoveTrayCorner1.clicked.connect(self.onMoveTrayCorner1)
self.ui.pushButtonMoveTrayCorner2.clicked.connect(self.onMoveTrayCorner2)
self.ui.pushButtonMoveTrayCorner3.clicked.connect(self.onMoveTrayCorner3)
self.ui.pushButtonMoveTrayCorner4.clicked.connect(self.onMoveTrayCorner4)
self.ui.horizontalSliderHU1.valueChanged.connect(self.onHU1_Changed)
self.ui.horizontalSliderHL1.valueChanged.connect(self.onHL1_Changed)
self.ui.horizontalSliderHU2.valueChanged.connect(self.onHU2_Changed)
self.ui.horizontalSliderHL2.valueChanged.connect(self.onHL2_Changed)
self.ui.horizontalSliderSU1.valueChanged.connect(self.onSU1_Changed)
self.ui.horizontalSliderSL1.valueChanged.connect(self.onSL1_Changed)
self.ui.horizontalSliderSU2.valueChanged.connect(self.onSU2_Changed)
self.ui.horizontalSliderSL2.valueChanged.connect(self.onSL2_Changed)
self.ui.horizontalSliderVU1.valueChanged.connect(self.onVU1_Changed)
self.ui.horizontalSliderVL1.valueChanged.connect(self.onVL1_Changed)
self.ui.horizontalSliderVU2.valueChanged.connect(self.onVU2_Changed)
self.ui.horizontalSliderVL2.valueChanged.connect(self.onVL2_Changed)
self.ui.pushButtonMoveDispenerHeadX.clicked.connect(self.onMoveSetDispenserHeadOffsetX)
self.ui.pushButtonMoveDispenerHeadY.clicked.connect(self.onMoveSetDispenserHeadOffsetY)
self.ui.pushButtonMoveDispenerHeadZ.clicked.connect(self.onMoveSetDispenserHeadOffsetZ)
self.ui.pushButtonTrayAutoCalcFrom1.clicked.connect(self.onTrayAutoCalcFrom1)
self.ui.plainTextEditDispenserOffsetX.setPlainText(str(config["Physical"]["DispenserHeadOffset"][0]))
self.ui.plainTextEditDispenserOffsetY.setPlainText(str(config["Physical"]["DispenserHeadOffset"][1]))
self.ui.plainTextEditDispenserOffsetZ.setPlainText(str(config["Physical"]["DispenserHeadOffset"][2]))
self.show()
def update_tray_info(self, trayID):
# Compnoent Area
self.ui.plainTextEditAreaCompL.setPlainText(str(config["Tray"][trayID]["Area"]["Component"]["Lower"]))
self.ui.plainTextEditAreaCompU.setPlainText(str(config["Tray"][trayID]["Area"]["Component"]["Upper"]))
self.ui.plainTextEditAreaBlackL.setPlainText(str(config["Tray"][trayID]["Area"]["Black"]["Lower"]))
self.ui.plainTextEditAreaBlackU.setPlainText(str(config["Tray"][trayID]["Area"]["Black"]["Upper"]))
# Corner Camera
self.ui.plainTextEditPosCamera1X.setPlainText(str(config["Tray"][trayID]["Corner"]["Camera"]["UpperLeft"][0]))
self.ui.plainTextEditPosCamera1Y.setPlainText(str(config["Tray"][trayID]["Corner"]["Camera"]["UpperLeft"][1]))
self.ui.plainTextEditPosCamera2X.setPlainText(str(config["Tray"][trayID]["Corner"]["Camera"]["UpperRight"][0]))
self.ui.plainTextEditPosCamera2Y.setPlainText(str(config["Tray"][trayID]["Corner"]["Camera"]["UpperRight"][1]))
self.ui.plainTextEditPosCamera3X.setPlainText(str(config["Tray"][trayID]["Corner"]["Camera"]["LowerRight"][0]))
self.ui.plainTextEditPosCamera3Y.setPlainText(str(config["Tray"][trayID]["Corner"]["Camera"]["LowerRight"][1]))
self.ui.plainTextEditPosCamera4X.setPlainText(str(config["Tray"][trayID]["Corner"]["Camera"]["LowerLeft"][0]))
self.ui.plainTextEditPosCamera4Y.setPlainText(str(config["Tray"][trayID]["Corner"]["Camera"]["LowerLeft"][1]))
# Corner Real
self.ui.plainTextEditPosReal1X.setPlainText(str(config["Tray"][trayID]["Corner"]["Real"]["UpperLeft"][0]))
self.ui.plainTextEditPosReal1Y.setPlainText(str(config["Tray"][trayID]["Corner"]["Real"]["UpperLeft"][1]))
self.ui.plainTextEditPosReal2X.setPlainText(str(config["Tray"][trayID]["Corner"]["Real"]["UpperRight"][0]))
self.ui.plainTextEditPosReal2Y.setPlainText(str(config["Tray"][trayID]["Corner"]["Real"]["UpperRight"][1]))
self.ui.plainTextEditPosReal3X.setPlainText(str(config["Tray"][trayID]["Corner"]["Real"]["LowerRight"][0]))
self.ui.plainTextEditPosReal3Y.setPlainText(str(config["Tray"][trayID]["Corner"]["Real"]["LowerRight"][1]))
self.ui.plainTextEditPosReal4X.setPlainText(str(config["Tray"][trayID]["Corner"]["Real"]["LowerLeft"][0]))
self.ui.plainTextEditPosReal4Y.setPlainText(str(config["Tray"][trayID]["Corner"]["Real"]["LowerLeft"][1]))
# Camera
self.ui.plainTextEditPosCameraX.setPlainText(str(config["Tray"][trayID]["Camera"][0]))
self.ui.plainTextEditPosCameraY.setPlainText(str(config["Tray"][trayID]["Camera"][1]))
self.ui.plainTextEditPosCameraZ.setPlainText(str(config["Camera"]["Height"]))
# HUV sliders
def onHU1_Changed(self, value):
config["HSV_Range"]["Back"]["Upper"]["H"] = value
self.ShowImage()
def onHL1_Changed(self, value):
config["HSV_Range"]["Back"]["Lower"]["H"] = value
self.ShowImage()
def onHU2_Changed(self, value):
config["HSV_Range"]["Black"]["Upper"]["H"] = value
self.ShowImage()
def onHL2_Changed(self, value):
config["HSV_Range"]["Black"]["Upper"]["H"] = value
self.ShowImage()
def onSU1_Changed(self, value):
config["HSV_Range"]["Back"]["Upper"]["S"] = value
self.ShowImage()
def onSL1_Changed(self, value):
config["HSV_Range"]["Back"]["Lower"]["S"] = value
self.ShowImage()
def onSU2_Changed(self, value):
config["HSV_Range"]["Black"]["Upper"]["S"] = value
self.ShowImage()
def onSL2_Changed(self, value):
config["HSV_Range"]["Black"]["Lower"]["S"] = value
self.ShowImage()
def onVU1_Changed(self, value):
config["HSV_Range"]["Back"]["Upper"]["V"] = value
self.ShowImage()
def onVL1_Changed(self, value):
config["HSV_Range"]["Back"]["Lower"]["V"] = value
self.ShowImage()
def onVU2_Changed(self, value):
config["HSV_Range"]["Black"]["Upper"]["V"] = value
self.ShowImage()
def onVL2_Changed(self, value):
config["HSV_Range"]["Black"]["Lower"]["V"] = value
self.ShowImage()
def onTrayNumberChanged(self):
self.trayID = self.ui.comboBoxTrayNumber.currentText()
self.update_tray_info(self.trayID)
def onAddTray(self):
trayID = self.ui.plainTextEditTrayNumber.toPlainText()
self.ui.comboBoxTrayNumber.addItem(trayID)
config["Tray"][trayID] = config["Tray"]["1"]
def onDeleteTray(self):
trayID = self.ui.comboBoxTrayNumber.currentText()
print("deleting "+trayID)
del config["Tray"][trayID]
self.ui.comboBoxTrayNumber.removeItem(self.ui.comboBoxTrayNumber.currentIndex())
# image type selection
def onSelectRaw(self):
self.img_type = 0
self.ShowImage()
def onSelectBack(self):
self.img_type = 1
self.ShowImage()
def onSelectBlack(self):
self.img_type = 2
self.ShowImage()
def onSelectComponent(self):
self.img_type = 3
self.ShowImage()
def onSelectComponentChk(self):
self.img_type = 4
self.ShowImage()
# draw cross
def DrawCrossOnBase(self, x, y):
'''
Zpos_draw = 45
Zpos_move = 80
L_cross = 5
picker.move_Z(Zpos_move)
picker.move_XY(x, y)
picker.move_Z(Zpos_draw)
picker.move_XY(x - L_cross, y)
picker.move_Z(Zpos_move)
picker.move_XY(x, y)
picker.move_Z(Zpos_draw)
picker.move_XY(x + L_cross, y)
picker.move_Z(Zpos_move)
picker.move_XY(x, y)
picker.move_Z(Zpos_draw)
picker.move_XY(x, y - L_cross)
picker.move_Z(Zpos_move)
picker.move_XY(x, y)
picker.move_Z(Zpos_draw)
picker.move_XY(x, y + L_cross)
picker.move_Z(Zpos_move)
'''
picker.move_Z(50)
picker.move_XY(x, y)
picker.move_Z(0)
#picker.move_Z(10)
#picker.move_Z(15) # 3mm for position calibaration with base board
def onMoveUp(self):
picker.move_Z(50)
if (self.corner == 0):
picker.move_Y(30)
elif self.corner == 1:
picker.move_Y(30)
elif self.corner == 2:
picker.move_Y(225)
else:
picker.move_Y(225)
# move to base corner
def onMoveNextBaseCorner(self):
self.corner = (self.corner + 1) % 4
print("move to base corner"+str(self.corner))
if (self.corner == 0):
self.DrawCrossOnBase(0, 0)
elif self.corner == 1:
self.DrawCrossOnBase(200, 0) # for position calibration with base board
# self.DrawCrossOnBase(230, 0) # for height calibration without base board
elif self.corner == 2:
self.DrawCrossOnBase(200, 195) # for position calibration with base board
# self.DrawCrossOnBase(230, 195) # for height calibration without base board
else:
self.DrawCrossOnBase(5, 195)
# move to tray
def onMoveTrayCamera(self):
picker.light_control(True)
print("move to tray camera " + self.trayID)
config["Tray"][self.trayID]["Camera"][0] = float(self.ui.plainTextEditPosCameraX.toPlainText())
config["Tray"][self.trayID]["Camera"][1] = float(self.ui.plainTextEditPosCameraY.toPlainText())
config["Camera"]["Height"] = float(self.ui.plainTextEditPosCameraZ.toPlainText())
picker.move_camera(self.trayID)
def onMoveTrayCorner1(self):
config["Tray"][self.trayID]["Corner"]["Real"]["UpperLeft"][0] = float(self.ui.plainTextEditPosReal1X.toPlainText())
config["Tray"][self.trayID]["Corner"]["Real"]["UpperLeft"][1] = float(self.ui.plainTextEditPosReal1Y.toPlainText())
picker.move_Z(config["Physical"]["Height"]["Motion"])
picker.move_XY(config["Tray"][self.trayID]["Corner"]["Real"]["UpperLeft"][0], config["Tray"][self.trayID]["Corner"]["Real"]["UpperLeft"][1])
def onMoveTrayCorner2(self):
config["Tray"][self.trayID]["Corner"]["Real"]["UpperRight"][0] = float(self.ui.plainTextEditPosReal2X.toPlainText())
config["Tray"][self.trayID]["Corner"]["Real"]["UpperRight"][1] = float(self.ui.plainTextEditPosReal2Y.toPlainText())
picker.move_Z(config["Physical"]["Height"]["Motion"])
picker.move_XY(config["Tray"][self.trayID]["Corner"]["Real"]["UpperRight"][0], config["Tray"][self.trayID]["Corner"]["Real"]["UpperRight"][1])
def onMoveTrayCorner3(self):
config["Tray"][self.trayID]["Corner"]["Real"]["LowerRight"][0] = float(self.ui.plainTextEditPosReal3X.toPlainText())
config["Tray"][self.trayID]["Corner"]["Real"]["LowerRight"][1] = float(self.ui.plainTextEditPosReal3Y.toPlainText())
picker.move_Z(config["Physical"]["Height"]["Motion"])
picker.move_XY(config["Tray"][self.trayID]["Corner"]["Real"]["LowerRight"][0], config["Tray"][self.trayID]["Corner"]["Real"]["LowerRight"][1])
def onMoveTrayCorner4(self):
config["Tray"][self.trayID]["Corner"]["Real"]["LowerLeft"][0] = float(self.ui.plainTextEditPosReal4X.toPlainText())
config["Tray"][self.trayID]["Corner"]["Real"]["LowerLeft"][1] = float(self.ui.plainTextEditPosReal4Y.toPlainText())
picker.move_Z(config["Physical"]["Height"]["Motion"])
picker.move_XY(config["Tray"][self.trayID]["Corner"]["Real"]["LowerLeft"][0], config["Tray"][self.trayID]["Corner"]["Real"]["LowerLeft"][1])
# manual move
def onMoveX(self):
v = float(self.ui.plainTextEditManualMove.toPlainText())
picker.move_X(v)
def onMoveY(self):
v = float(self.ui.plainTextEditManualMove.toPlainText())
picker.move_Y(v)
def onMoveZ(self):
v = float(self.ui.plainTextEditManualMove.toPlainText())
picker.move_Z(v)
# system
def onQuit(self):
#picker.light_control(False)
sys.exit()
def onSave(self):
'''
for trayID in config["Tray"]:
config["Tray"][trayID]["Area"]["Component"]["Lower"] = int(self.ui.plainTextEditAreaCompL.toPlainText())
config["Tray"][trayID]["Area"]["Component"]["Upper"] = int(self.ui.plainTextEditAreaCompU.toPlainText())
config["Tray"][trayID]["Area"]["Black"]["Lower"] = int(self.ui.plainTextEditAreaBlackL.toPlainText())
config["Tray"][trayID]["Area"]["Black"]["Upper"] = int(self.ui.plainTextEditAreaBlackU.toPlainText())
config["Tray"][trayID]["Corner"]["Camera"]["UpperLeft"][0] = float(self.ui.plainTextEditPosCamera1X.toPlainText())
config["Tray"][trayID]["Corner"]["Camera"]["UpperLeft"][1] = float(self.ui.plainTextEditPosCamera1Y.toPlainText())
config["Tray"][trayID]["Corner"]["Camera"]["UpperRight"][0] = float(self.ui.plainTextEditPosCamera2X.toPlainText())
config["Tray"][trayID]["Corner"]["Camera"]["UpperRight"][1] = float(self.ui.plainTextEditPosCamera2Y.toPlainText())
config["Tray"][trayID]["Corner"]["Camera"]["LowerRight"][0] = float(self.ui.plainTextEditPosCamera3X.toPlainText())
config["Tray"][trayID]["Corner"]["Camera"]["LowerRight"][1] = float(self.ui.plainTextEditPosCamera3Y.toPlainText())
config["Tray"][trayID]["Corner"]["Camera"]["LowerLeft"][0] = float(self.ui.plainTextEditPosCamera4X.toPlainText())
config["Tray"][trayID]["Corner"]["Camera"]["LowerLeft"][1] = float(self.ui.plainTextEditPosCamera4Y.toPlainText())
config["Tray"][trayID]["Corner"]["Real"]["UpperLeft"][0] = float(self.ui.plainTextEditPosReal1X.toPlainText())
config["Tray"][trayID]["Corner"]["Real"]["UpperLeft"][1] = float(self.ui.plainTextEditPosReal1Y.toPlainText())
config["Tray"][trayID]["Corner"]["Real"]["UpperRight"][0] = float(self.ui.plainTextEditPosReal2X.toPlainText())
config["Tray"][trayID]["Corner"]["Real"]["UpperRight"][1] = float(self.ui.plainTextEditPosReal2Y.toPlainText())
config["Tray"][trayID]["Corner"]["Real"]["LowerRight"][0] = float(self.ui.plainTextEditPosReal3X.toPlainText())
config["Tray"][trayID]["Corner"]["Real"]["LowerRight"][1] = float(self.ui.plainTextEditPosReal3Y.toPlainText())
config["Tray"][trayID]["Corner"]["Real"]["LowerLeft"][0] = float(self.ui.plainTextEditPosReal4X.toPlainText())
config["Tray"][trayID]["Corner"]["Real"]["LowerLeft"][1] = float(self.ui.plainTextEditPosReal4Y.toPlainText())
config["Tray"][trayID]["Camera"][0] = float(self.ui.plainTextEditPosCameraX.toPlainText())
config["Tray"][trayID]["Camera"][1] = float(self.ui.plainTextEditPosCameraY.toPlainText())
'''
config["HSV_Range"]["Back"]["Upper"]["H"] = self.ui.horizontalSliderHU1.value()
config["HSV_Range"]["Back"]["Lower"]["H"] = self.ui.horizontalSliderHL1.value()
config["HSV_Range"]["Back"]["Upper"]["S"] = self.ui.horizontalSliderSU1.value()
config["HSV_Range"]["Back"]["Lower"]["S"] = self.ui.horizontalSliderSL1.value()
config["HSV_Range"]["Back"]["Upper"]["V"] = self.ui.horizontalSliderVU1.value()
config["HSV_Range"]["Back"]["Lower"]["V"] = self.ui.horizontalSliderVL1.value()
config["HSV_Range"]["Black"]["Upper"]["H"] = self.ui.horizontalSliderHU2.value()
config["HSV_Range"]["Black"]["Lower"]["H"] = self.ui.horizontalSliderHL2.value()
config["HSV_Range"]["Black"]["Upper"]["S"] = self.ui.horizontalSliderSU2.value()
config["HSV_Range"]["Black"]["Lower"]["S"] = self.ui.horizontalSliderSL2.value()
config["HSV_Range"]["Black"]["Upper"]["V"] = self.ui.horizontalSliderVU2.value()
config["HSV_Range"]["Black"]["Lower"]["V"] = self.ui.horizontalSliderVL2.value()
config["Camera"]["Height"] = float(self.ui.plainTextEditPosCameraZ.toPlainText())
# picker.save_config(config_org, "config.bak")
picker.save_config(config)
def onCapture(self):
self.fCaptured = True
#self.img = picker.capture(False, 0, 0) # load raw.png instead of camera
self.img = picker.capture(True, 0) # camera capture
self.img_size = [self.img.shape[1], self.img.shape[0]]
self.mag = [self.img_size[0] / self.window_size[0], self.img_size[1] / self.window_size[1]]
self.ShowImage()
def draw_cross(self, x, y, color):
for d in range(5):
self.image.setPixel(int(x)+d, int(y), color)
self.image.setPixel(int(x)-d, int(y), color)
self.image.setPixel(int(x), int(y)-d, color)
self.image.setPixel(int(x), int(y)+d, color)
def ShowImage(self):
if self.fCaptured == True:
if self.img_type == 0:
# raw
self.image = QImage(self.img.data, self.img.shape[1], self.img.shape[0], QImage.Format_BGR888)
self.draw_cross(float(self.ui.plainTextEditPosCamera1X.toPlainText()), float(self.ui.plainTextEditPosCamera1Y.toPlainText()), self.CornerMarkColor)
self.draw_cross(float(self.ui.plainTextEditPosCamera2X.toPlainText()), float(self.ui.plainTextEditPosCamera2Y.toPlainText()), self.CornerMarkColor)
self.draw_cross(float(self.ui.plainTextEditPosCamera3X.toPlainText()), float(self.ui.plainTextEditPosCamera3Y.toPlainText()), self.CornerMarkColor)
self.draw_cross(float(self.ui.plainTextEditPosCamera4X.toPlainText()), float(self.ui.plainTextEditPosCamera4Y.toPlainText()), self.CornerMarkColor)
self.DrawPixmap()
#config["Tray"][self.trayID]["MatrixToImage"] = picker.calc_transform_to_image(self.trayID).tolist()
#config["Tray"][self.trayID]["MatrixToReal"] = picker.calc_transform_to_real(self.trayID).tolist()
elif self.img_type == 1:
# back digitized
self.image = QImage(picker.digitize(self.img, config["HSV_Range"]["Back"]).data, self.img.shape[1], self.img.shape[0], QImage.Format_Grayscale8)
elif self.img_type == 2:
# black digitized
self.image = QImage(picker.digitize(self.img, config["HSV_Range"]["Black"]).data, self.img.shape[1], self.img.shape[0], QImage.Format_Grayscale8)
elif self.img_type == 3 or self.img_type == 4:
cmp, img_cmp = picker.create_component_list(self.img, self.trayID, 2) # tray_margin=2mm
self.image = QImage(img_cmp, img_cmp.shape[1], img_cmp.shape[0], QImage.Format_BGR888)
print("Componet List:")
for c in cmp:
print(" ({0:.2f}, {1:.2f}), ang={2:.2f} / front={3:})".format(c[0],c[1], c[2], c[3]))
if self.img_type == 4:
picker.move_XY(c[0], c[1])
if c[3] == True:
picker.move_Z(35) # front-sided component
else:
picker.move_Z(50) # back-sided component
picker.move_Z(80)
self.DrawPixmap()
def DrawPixmap(self):
self.pixmap = QPixmap.fromImage(self.image)
# https://yamamon1010.hatenablog.jp/entry/qlabel_resize_image
self.pixmap = self.pixmap.scaled(640, 480, Qt.KeepAspectRatio, Qt.FastTransformation)
self.ui.labelView.setPixmap(self.pixmap)
pass
def mousePressEvent(self, event):
if (self.fCaptured == True):
x = int((event.x() - self.ui.labelView.geometry().topLeft().x()) * self.mag[0])
y = int((event.y() - self.ui.labelView.geometry().topLeft().y()) * self.mag[1])
cursor_size = 5
if cursor_size < x < self.img_size[0] - cursor_size and cursor_size < y < self.img_size[1]:
self.ui.labelMousePos.setText("({0:d}, {1:d})".format(x, y))
self.mouseX = x
self.mouseY = y
self.draw_cross(x, y, self.MouseCursorColor)
self.DrawPixmap()
def onGetTrayCamera1(self):
self.ui.plainTextEditPosCamera1X.setPlainText(str(self.mouseX))
self.ui.plainTextEditPosCamera1Y.setPlainText(str(self.mouseY))
config["Tray"][self.trayID]["Corner"]["Camera"]["UpperLeft"][0] = float(self.ui.plainTextEditPosCamera1X.toPlainText())
config["Tray"][self.trayID]["Corner"]["Camera"]["UpperLeft"][1] = float(self.ui.plainTextEditPosCamera1Y.toPlainText())
def onGetTrayCamera2(self):
self.ui.plainTextEditPosCamera2X.setPlainText(str(self.mouseX))
self.ui.plainTextEditPosCamera2Y.setPlainText(str(self.mouseY))
config["Tray"][self.trayID]["Corner"]["Camera"]["UpperRight"][0] = float(self.ui.plainTextEditPosCamera2X.toPlainText())
config["Tray"][self.trayID]["Corner"]["Camera"]["UpperRight"][1] = float(self.ui.plainTextEditPosCamera2Y.toPlainText())
def onGetTrayCamera3(self):
self.ui.plainTextEditPosCamera3X.setPlainText(str(self.mouseX))
self.ui.plainTextEditPosCamera3Y.setPlainText(str(self.mouseY))
config["Tray"][self.trayID]["Corner"]["Camera"]["LowerRight"][0] = float(self.ui.plainTextEditPosCamera3X.toPlainText())
config["Tray"][self.trayID]["Corner"]["Camera"]["LowerRight"][1] = float(self.ui.plainTextEditPosCamera3Y.toPlainText())
def onGetTrayCamera4(self):
self.ui.plainTextEditPosCamera4X.setPlainText(str(self.mouseX))
self.ui.plainTextEditPosCamera4Y.setPlainText(str(self.mouseY))
config["Tray"][self.trayID]["Corner"]["Camera"]["LowerLeft"][0] = float(self.ui.plainTextEditPosCamera4X.toPlainText())
config["Tray"][self.trayID]["Corner"]["Camera"]["LowerLeft"][1] = float(self.ui.plainTextEditPosCamera4Y.toPlainText())
def onMoveSetDispenserHeadOffsetX(self):
# X-offset = X at touching upper-left corner of board
off = float(self.ui.plainTextEditDispenserOffsetX.toPlainText())
picker.move_X(off)
config["Physical"]["DispenserHeadOffset"][0] = off
def onMoveSetDispenserHeadOffsetY(self):
# Y-offset = Y at touching upper-left corner of board
off = float(self.ui.plainTextEditDispenserOffsetY.toPlainText())
picker.move_Y(200 + off)
config["Physical"]["DispenserHeadOffset"][1] = off
def onMoveSetDispenserHeadOffsetZ(self):
# Z-offset = Z at touching surface of base
off = float(self.ui.plainTextEditDispenserOffsetZ.toPlainText())
picker.move_Z(off)
config["Physical"]["DispenserHeadOffset"][2] = off
def onTrayAutoCalcFrom1(self):
for tr in range(2, 13):
trs = str(tr)
tray_pitch = (36, 29)
config["Tray"][trs]["Corner"]["Camera"] = config["Tray"]["1"]["Corner"]["Camera"]
x = (tr - 1) % 6
y = int((tr - 1) / 6)
config["Tray"][trs]["Corner"]["Real"]["UpperLeft"][0] = config["Tray"]["1"]["Corner"]["Real"]["UpperLeft"][0] + tray_pitch[0] * x
config["Tray"][trs]["Corner"]["Real"]["UpperRight"][0] = config["Tray"]["1"]["Corner"]["Real"]["UpperRight"][0] + tray_pitch[0] * x
config["Tray"][trs]["Corner"]["Real"]["LowerRight"][0] = config["Tray"]["1"]["Corner"]["Real"]["LowerRight"][0] + tray_pitch[0] * x
config["Tray"][trs]["Corner"]["Real"]["LowerLeft"][0] = config["Tray"]["1"]["Corner"]["Real"]["LowerLeft"][0] + tray_pitch[0] * x
config["Tray"][trs]["Corner"]["Real"]["UpperLeft"][1] = config["Tray"]["1"]["Corner"]["Real"]["UpperLeft"][1] + tray_pitch[1] * y
config["Tray"][trs]["Corner"]["Real"]["UpperRight"][1] = config["Tray"]["1"]["Corner"]["Real"]["UpperRight"][1] + tray_pitch[1] * y
config["Tray"][trs]["Corner"]["Real"]["LowerRight"][1] = config["Tray"]["1"]["Corner"]["Real"]["LowerRight"][1] + tray_pitch[1] * y
config["Tray"][trs]["Corner"]["Real"]["LowerLeft"][1] = config["Tray"]["1"]["Corner"]["Real"]["LowerLeft"][1] + tray_pitch[1] * y
if __name__=="__main__":
config = picker.load_config()
#config_org = config.copy()
app = QApplication(sys.argv)
w = PickerCalib()
w.show()
sys.exit(app.exec_())
|
from django.urls import path
from myproject.core import views as c
app_name = 'core'
urlpatterns = [
path('', c.home, name='home'),
path('person/', c.PersonList.as_view(), name='person_list'),
path('person/add/', c.person_create, name='person_add'),
path('person/phone/add/', c.person_phone_create, name='person_phone_create'),
path('person/<int:pk>/', c.person_detail, name='person_detail'),
path('person/<int:pk>/phones/', c.person_phones, name='person_phones'),
path('person/<int:pk>/edit/', c.person_update, name='person_edit'),
path('person/<int:pk>/delete/', c.person_delete, name='person_delete'),
]
|
def get_place_cell_activation(current_features, min_signal, max_signal):
activation = []
for i in range(len(current_features)):
f = current_features[i]
if (f > max_signal * 0.3 or f < min_signal * 0.3):
activation.append(i)
#print "activation " + str(i) + ", f=" + str(f) + ", min_signal=" + str(min_signal) + ", maxSignal=" + str(max_signal)
return activation
|
#!/usr/bin/env python
# encoding: utf-8
"""
hots.py
Created by yang.zhou on 2012-10-29.
Copyright (c) 2012 zhouyang.me. All rights reserved.
"""
from core.base.route import route
from core.base.base import BaseHandler
@route("/topics/hots", dict(current_page="topics-hots"))
class HotsTopicsHandler(BaseHandler):
def get(self):
pass
|
from django.urls import path
from . import views
urlpatterns = [
path('artists/', views.artists, name="artists"),
path('releases/', views.releases, name="releases"),
path('editions/', views.editions, name="editions"),
path('posts/', views.posts, name="posts"),
path('tracks/', views.tracks, name="tracks")
] |
from scipy.stats import expon
def expon_dcdf(x, d, scale=1):
""" d^th derivative of the cumulative distribution function at x of the given RV.
:param x: array_like
quantiles
:param d: positive integer
derivative order of the cumulative distribution function
:param scale: positive number
scale parameter (default=1)
:return: array_like
If d = 0: the cumulative distribution function evaluated at x
If d = 1: the probability density function evaluated at x
If d => 2: the (d-1)-density derivative evaluated at x
"""
if d < 0 | (not isinstance(d, int)):
print("D must be a non-negative integer.")
return float('nan')
if d == 0:
output = expon.cdf(x, scale=scale)
if d >= 1:
output = ((-1/scale) ** (d - 1)) * expon.pdf(x, scale=scale)
return output
expon.dcdf = expon_dcdf |
from django.apps import AppConfig
class KullanicilarConfig(AppConfig):
name = 'kullanicilar'
|
from cx_Freeze import setup, Executable
import sys
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name = "InfiniCube",
version = "0.9",
description = "InfiniCube, a next-generation game experience brought to you by Bill Tyros. 2012",
executables = [Executable(script = "infinicube.py", base = base)]) |
from common.run_method import RunMethod
import allure
@allure.step("线上商店-团购&订单/团购列表")
def group_order_listGroupPurchases_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "线上商店-团购&订单/团购列表"
url = f"/service-gos/group/order/listGroupPurchases"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("线上商店-团购&订单/拼团订单列表")
def group_order_listMyGroupOrders_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "线上商店-团购&订单/拼团订单列表"
url = f"/service-gos/group/order/listMyGroupOrders"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("线上商店-团购&订单/团详详情")
def group_order_getGroupPurchaseDetail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "线上商店-团购&订单/团详详情"
url = f"/service-gos/group/order/getGroupPurchaseDetail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("线上商店-小程序分享/获取小程序分享详情")
def group_qr_getShareDetail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "线上商店-小程序分享/获取小程序分享详情"
url = f"/service-gos/group/qr/getShareDetail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("线上商店-小程序分享/获取活动班级二维码")
def group_qr_classQr_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "线上商店-小程序分享/获取活动班级二维码"
url = f"/service-gos/group/qr/classQr"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("线上商店-小程序分享/获取组团二维码")
def group_qr_groupQr_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "线上商店-小程序分享/获取组团二维码"
url = f"/service-gos/group/qr/groupQr"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购中心/组团详情")
def group_center_groupDetail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购中心/组团详情"
url = f"/service-gos/group/center/groupDetail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购订单/团购订单列表")
def group_order_orderList_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购订单/团购订单列表"
url = f"/service-gos/group/order/orderList"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购中心/组团列表")
def group_center_groupList_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购中心/组团列表"
url = f"/service-gos/group/center/groupList"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购中心/活动详情")
def group_center_activityDetail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购中心/活动详情"
url = f"/service-gos/group/center/activityDetail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购中心/删除活动")
def group_center_deleteActivity_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购中心/删除活动"
url = f"/service-gos/group/center/deleteActivity"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购中心/停用启用活动")
def group_center_updateActivityStatus_patch(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购中心/停用启用活动"
url = f"/service-gos/group/center/updateActivityStatus"
res = RunMethod.run_request("PATCH", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购中心/活动列表")
def group_center_activityList_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购中心/活动列表"
url = f"/service-gos/group/center/activityList"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购中心/新增编辑活动")
def group_center_saveActivity_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购中心/新增编辑活动"
url = f"/service-gos/group/center/saveActivity"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购订单/退款")
def group_operate_refund_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购订单/退款"
url = f"/service-gos/group/operate/refund"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购订单/团购订单总费用")
def group_order_getTotalAmount_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购订单/团购订单总费用"
url = f"/service-gos/group/order/getTotalAmount"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购订单/详情")
def group_operate_refundDetail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购订单/详情"
url = f"/service-gos/group/operate/refundDetail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/订单/订单状态查询")
def group_order_getOrderStatus_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/订单/订单状态查询"
url = f"/service-gos/group/order/getOrderStatus"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/订单/预支付")
def group_operate_prePay_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/订单/预支付"
url = f"/service-gos/group/operate/prePay"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/在线商城/支付前验证")
def group_micoApp_queryStudentActiveOrder_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/在线商城/支付前验证"
url = f"/service-gos/group/micoApp/queryStudentActiveOrder"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/拼团中、拼团成功、拼团失败、分享打开")
def group_micoApp_groupOrderDetail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/拼团中、拼团成功、拼团失败、分享打开"
url = f"/service-gos/group/micoApp/groupOrderDetail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/首页详情")
def group_micoApp_activeClassDetail_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/首页详情"
url = f"/service-gos/group/micoApp/activeClassDetail"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/首页")
def group_micoApp_index_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/首页"
url = f"/service-gos/group/micoApp/index"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/订单/取消订单")
def group_order_cancelGroupOrder_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/订单/取消订单"
url = f"/service-gos/group/order/cancelGroupOrder"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/订单/团购下单")
def group_order_order_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/订单/团购下单"
url = f"/service-gos/group/order/order"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/商品详情/参团信息")
def group_micoApp_getActiveJoinGroup_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/商品详情/参团信息"
url = f"/service-gos/group/micoApp/getActiveJoinGroup"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/小程序码生成")
def group_qr_getQr_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/小程序码生成"
url = f"/service-gos/group/qr/getQr"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购中心/分享活动二维码")
def group_qr_share_miniProgram_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购中心/分享活动二维码"
url = f"/service-gos/group/qr/share/miniProgram"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购订单/导出订单")
def group_order_orderList_export_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购订单/导出订单"
url = f"/service-gos/group/order/orderList/export"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/营销中心/团购订单/一键成团")
def group_order_forceMergeGroup_activityId_post(activityId, params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/营销中心/团购订单/一键成团"
url = f"/service-gos/group/order/forceMergeGroup/{activityId}"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("小程序/商品/活动校区列表")
def group_micoApp_activity_schoolAreas_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/商品/活动校区列表"
url = f"/service-gos/group/micoApp/activity/schoolAreas"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
"""
python >=3.7
pip install joycon-python
pip install hidapi
pip install pyautogui
"""
import numpy as np
import time
from pyjoycon import device
from pyjoycon.joycon import JoyCon
import pyautogui
component = np.array([0.82522979, 0.29479633, 0.48175815])
thres_zero = 500
thres_upper = 2000
coef = 0.0002
unit_meter = 10
target = unit_meter
print("Finding JoyCon-L...")
ids = device.get_ids("L")
joycon = JoyCon(*ids)
print(" Done.")
print("Wait for 5 seconds...")
time.sleep(5)
cumscore = 0
acc_old = np.array([joycon.get_accel_x(), joycon.get_accel_y(), joycon.get_accel_z()])
print(" Done.")
print("Started!")
while 1:
time.sleep(0.02)
acc = np.array([joycon.get_accel_x(), joycon.get_accel_y(), joycon.get_accel_z()])
delta = acc - acc_old
acc_old = acc
score = np.abs(np.dot(delta, component))
if score < thres_zero:
score = 0
score = min(score, thres_upper)
score *= coef
cumscore += score
if target < cumscore:
pyautogui.press("up")
target += unit_meter
print("\r ", end="")
print("\r score: {:d}\ttotal score: {:d}".format(int(score), int(cumscore)), end="")
|
import asyncio
import utils
import os
import cv2
import base64
import videocaptureasync
from time import time
# Location of the camera
#WEBCAM_LOCATION = "http://192.168.43.44:8080/video" # IP CAMERA
WEBCAM_LOCATION = 0 # USB CAMERA
ws = None
started = False
videoCapture = None
# Time before retrying to connect to the camera upon failure (seconds)
CAMERA_CONNECTION_RETRY_TIME = 3.0
# Maximum frames captured per second
MAX_FPS = 30
# Port used by this module to listen for messages
LISTENING_PORT = 5001
# Called when the camera receives a message from the main module
async def onReception(websocket, head, body):
global ws
if ws == None:
ws = websocket
print("[MAIN > CAMERA]", head)
if head == "start":
await turnCameraOn(onCameraStartupSuccess, onCameraStartupFailure)
# Called when the camera has been successfully started
async def onCameraStartupSuccess():
global MAX_FPS
minDelay = 1 / MAX_FPS
await utils.wsSend(ws, "start-success")
while True:
t = time()
img = capturePicture()
if img is None:
# If we're not ready frames but we were already started, try to reconnect
if started:
await turnCameraOn(onCameraStartupSuccess, onCameraStartupFailure)
return
else:
encodedImg = utils.imageToBase64(img)
if encodedImg is not None:
computingDelay = time()-t
if computingDelay < minDelay:
await asyncio.sleep(minDelay - computingDelay)
try:
await utils.wsSend(ws, "image-base64", {"time": t, "data": encodedImg})
except Exception as e:
print("Error sending image:", e)
await asyncio.sleep(3)
# Called when the camera couldn't be started or couldn't be reached
async def onCameraStartupFailure():
await utils.wsSend(ws, "start-failure")
# Retry later
await asyncio.sleep(CAMERA_CONNECTION_RETRY_TIME)
await turnCameraOn(onCameraStartupSuccess, onCameraStartupFailure)
# Turns the camera on
async def turnCameraOn(onSuccess=None, onFailure=None):
global started, videoCapture
try:
videoCapture = videocaptureasync.VideoCaptureAsync(WEBCAM_LOCATION)
videoCapture.start()
started = True
if onSuccess is not None:
await onSuccess()
except cv2.error as e:
print("CV2 Error upon connection with video source", e)
except Exception as e:
print("Error while connecting to the video source.");
if onFailure is not None:
await onFailure()
# Turns the camera off
def turnCameraOff():
global started
started = False
videoCapture.stop()
# Reads a picture from the camera
def capturePicture():
if not started:
return None
ret, frame = False, None
try:
ret, frame = videoCapture.read()
except Exception as e:
print("Error while reading frame from VideoCapture:", e)
if not ret:
return None
return frame
utils.startWsServer(LISTENING_PORT, onReception)
asyncio.get_event_loop().run_forever()
|
#!/usr/bin/env python
import numpy as np
import math
import matplotlib.pyplot as plt
from pylab import *
import h5py
from matplotlib.colors import LogNorm
#from superMapFile import superMap
## Set the Zero
zero = 1.0e-20
## Set the maximum size of helium/hydrogen
maxSize = 201
## Open the file
f = h5py.File('/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5', 'r')
## Get the last time step saved in the file
concGroup0 = f['concentrationsGroup']
timestep = concGroup0.attrs['lastTimeStep']
lastLoop = concGroup0.attrs['lastLoop']
## Open the concentration group
groupName ='concentrationsGroup/concentration_' + str(lastLoop) + '_' + str(timestep)
concGroup = f[groupName]
## Read the concentration and index datasets
concDset = concGroup['concs']
indexDset = concGroup['concs_startingIndices']
## Read the time at the chosen time step
time = concGroup.attrs['absoluteTime']
## Read the grid to know which grid point is which depth
gridDset = concGroup['grid']
gridSize = len(gridDset)
## Read how many normal and super clusters there are
networkGroup = f['networkGroup']
totalSize = networkGroup.attrs['totalSize']
## Create the mesh and data array
x = np.empty([maxSize+1, gridSize])
y = np.empty([maxSize+1, gridSize])
heArray = np.empty([maxSize+1, gridSize])
tArray = np.empty([maxSize+1, gridSize])
vArray = np.empty([maxSize+1, gridSize])
for i in range(0, maxSize+1):
for j in range(0, gridSize):
x[i][j] = gridDset[j] - gridDset[1]
y[i][j] = i
heArray[i][j] = zero
tArray[i][j] = zero
vArray[i][j] = zero
## Save the composition information for all clusters
superMap = []
for i in range(0, totalSize):
## Get the cluster bounds
groupName = str(i)
clusterGroup = networkGroup[groupName]
bounds = clusterGroup.attrs['bounds']
if (bounds[8] > 0): continue # I case
temp = [[],[],[]]
## Loop on them
for he in range(bounds[0], bounds[1]+1):
for d in range(bounds[2], bounds[3]+1):
for t in range(bounds[4], bounds[5]+1):
for v in range(bounds[6], bounds[7]+1):
## Look for he size
foundSize = False
for j in range(0, len(temp[0])):
if (he == temp[0][j][0]):
## Add to the weight
temp[0][j][1] = temp[0][j][1] + 1
foundSize = True
break
## Create a new field
if (not foundSize):
temp[0].append([he, 1])
## Look for t size
foundSize = False
for j in range(0, len(temp[1])):
if (t == temp[1][j][0]):
## Add to the weight
temp[1][j][1] = temp[1][j][1] + 1
foundSize = True
break
## Create a new field
if (not foundSize):
temp[1].append([t, 1])
## Look for v size
foundSize = False
for j in range(0, len(temp[2])):
if (v == temp[2][j][0]):
## Add to the weight
temp[2][j][1] = temp[2][j][1] + 1
foundSize = True
break
## Create a new field
if (not foundSize):
temp[2].append([v, 1])
superMap.append(temp)
#print('superMap =', superMap)
## Loop on the grid
for j in range(len(indexDset)-1):
## Loop on the concentrations
for i in range(indexDset[j], indexDset[j+1]):
## Skip the moments for now
if (int(concDset[i][0]) > totalSize - 1): continue
## Get the concentration
conc = concDset[i][1]
## Loop on each component
lists = superMap[concDset[i][0]]
## He
for k in range(0, len(lists[0])):
heSize = lists[0][k][0]
heArray[heSize][j] = heArray[heSize][j] + (conc * lists[0][k][1])
## T
for k in range(0, len(lists[1])):
tSize = lists[1][k][0]
tArray[tSize][j] = tArray[tSize][j] + (conc * lists[1][k][1])
## V
for k in range(0, len(lists[2])):
vSize = lists[2][k][0]
vArray[vSize][j] = vArray[vSize][j] + (conc * lists[2][k][1])
## Create plots
fig = plt.figure()
title = 'Helium Concentrations at t = ' + str(time) + ' s'
fig.suptitle(title,fontsize=22)
hePlot = plt.subplot(111)
zMaxPlus = max(heArray.max(), max(tArray.max(), vArray.max()))
## Plot the data
cb1 = hePlot.pcolor(x, y, heArray, norm=LogNorm(vmin=1.0e-10, vmax=zMaxPlus), cmap="Oranges", alpha=1.0)
#cb1 = hePlot.pcolor(x, y, heArray, vmin=-zMaxPlus, vmax=zMaxPlus, cmap="bwr", alpha=1.0)
hePlot.set_xlabel("Depth (nm)",fontsize=22)
hePlot.set_ylabel("Helium Cluster Size",fontsize=22)
hePlot.set_xlim([0.1, gridDset[len(gridDset)-1] - gridDset[1]])
hePlot.set_ylim([0, maxSize])
hePlot.set_xscale('log')
hePlot.tick_params(axis='both', which='major', labelsize=20)
## Make an axis for the colorbar on the right side
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
colorbar1 = plt.colorbar(cb1, cax=cax)
colorbar1.set_label("# / nm3",fontsize=22)
colorbar1.ax.tick_params(axis='y',labelsize=20)
#fig.savefig('He_cong.png')
## Create plots
fig2 = plt.figure()
title = 'Tritium Concentrations at t = ' + str(time) + ' s'
fig2.suptitle(title,fontsize=22)
tPlot = plt.subplot(111)
## Plot the data
cb2 = tPlot.pcolor(x, y, tArray, norm=LogNorm(vmin=1.0e-10, vmax=zMaxPlus), cmap="Oranges", alpha=1.0)
#cb2 = tPlot.pcolor(x, y, tArray, vmin=-zMaxPlus, vmax=zMaxPlus, cmap="bwr", alpha=1.0)
tPlot.set_xlabel("Depth (nm)",fontsize=22)
tPlot.set_ylabel("Tritium Cluster Size",fontsize=22)
tPlot.set_xlim([0.1, gridDset[len(gridDset)-1] - gridDset[1]])
tPlot.set_ylim([0, maxSize])
tPlot.set_xscale('log')
tPlot.tick_params(axis='both', which='major', labelsize=20)
## Make an axis for the colorbar on the right side
cax = fig2.add_axes([0.9, 0.1, 0.03, 0.8])
colorbar2 = plt.colorbar(cb2, cax=cax)
colorbar2.set_label("# / nm3",fontsize=22)
colorbar2.ax.tick_params(axis='y',labelsize=20)
#fig2.savefig('T_cong.png')
## Create plots
fig3 = plt.figure()
title = 'Vacancy Concentrations at t = ' + str(time) + ' s'
fig3.suptitle(title,fontsize=22)
vPlot = plt.subplot(111)
## Plot the data
cb3 = vPlot.pcolor(x, y, vArray, norm=LogNorm(vmin=1.0e-10, vmax=zMaxPlus), cmap="Oranges", alpha=1.0)
#cb3 = vPlot.pcolor(x, y, vArray, vmin=-zMaxPlus, vmax=zMaxPlus, cmap="bwr", alpha=1.0)
vPlot.set_xlabel("Depth (nm)",fontsize=22)
vPlot.set_ylabel("Tritium Cluster Size",fontsize=22)
vPlot.set_xlim([0.1, gridDset[len(gridDset)-1] - gridDset[1]])
vPlot.set_ylim([0, maxSize])
vPlot.set_xscale('log')
vPlot.tick_params(axis='both', which='major', labelsize=20)
## Make an axis for the colorbar on the right side
cax = fig3.add_axes([0.9, 0.1, 0.03, 0.8])
colorbar3 = plt.colorbar(cb3, cax=cax)
colorbar3.set_label("# / nm3",fontsize=22)
colorbar3.ax.tick_params(axis='y',labelsize=20)
#fig3.savefig('V_cong.png')
## Show the plots
plt.show()
|
import math
from sympy import Point, Line, Circle, intersection, Ray, pi
from sympy import plot_implicit, cos, sin, symbols, Eq, And
from sympy import symbols
from sympy.plotting import plot
import matplotlib.pyplot as plt
import numpy as np
#import ball_detection as detection
pathBallW = [] # contains the rays which the cue ball will follow
pathBallN = [] # contains the lines which the normal ball will follow
RADIUS = 5
def path_of_white_ball_after_collision(m, c, r):
pass
def plot_graph(point_inter, circle_centre, point_stick):
white_ball_centre = Point(float(2 * point_inter.x - circle_centre.x), float(2 * point_inter.y - circle_centre.y))
x = np.linspace(-30, 25, 10)
circle1 = plt.Circle((circle_centre.x,circle_centre.y), RADIUS, color='r')
circle2 = plt.Circle((point_stick.x, point_stick.y), RADIUS, color='black')
circle3 = plt.Circle((white_ball_centre.x, white_ball_centre.y), RADIUS, color='grey')
fig, ax = plt.subplots()
ax.add_artist(circle1)
ax.add_artist(circle2)
ax.add_artist(circle3)
x = np.linspace(-30, 25, 10)
y = pathBallW[0].slope*x + (pathBallW[0].p1.y - pathBallW[0].slope*pathBallW[0].p1.x)
plt.plot(x, y, 'b')
y = pathBallW[2].slope*x + (pathBallW[2].p1.y - pathBallW[2].slope*pathBallW[2].p1.x)
plt.plot(x, y, 'b')
path_of_white_ball(circle_centre, point_inter, RADIUS)
plt.axis("equal")
plt.title('Graph')
plt.xlabel('x', color='#1C2833')
plt.ylabel('y', color='#1C2833')
plt.legend(loc='upper left')
plt.grid()
plt.show()
def path_of_white_ball(p1, p2, r):
pathBallW[:] = []
middle_ray = Ray(p1, p2).rotate(pi)
cue_circle = Circle(p1, r)
normal_line = middle_ray.perpendicular_line(p1)
points = intersection(cue_circle, normal_line)
pathBallW.append(middle_ray.parallel_line(points[0]))
pathBallW.append(middle_ray)
pathBallW.append(middle_ray.parallel_line(points[1]))
def plot_white_ball_path():
x = np.linspace(-30, 25, 10)
y = pathBallW[0].slope*x + (pathBallW[0].p1.y - pathBallW[0].slope*pathBallW[0].p1.x)
plt.plot(x, y, 'b')
y = pathBallW[2].slope*x + (pathBallW[2].p1.y - pathBallW[2].slope*pathBallW[2].p1.x)
plt.plot(x, y, 'b')
plt.axis("equal")
plt.title('Graph')
plt.xlabel('x', color='#1C2833')
plt.ylabel('y', color='#1C2833')
plt.legend(loc='upper left')
plt.grid()
plt.show()
def find_collision_point(cue_point, ball_point, radius):
point_collision = cue_point
slope_line = pathBallW[1].slope
white_ball_ray = pathBallW[1]
white_ball_line = Line(cue_point, slope=slope_line)
coeff = white_ball_line.coefficients
ball_circle = Circle(ball_point, radius)
point1 = cue_point
point2 = intersection(white_ball_line.perpendicular_line(cue_point), Line(ball_point, slope=slope_line))[0]
while 1:
mid_point = Point((point1.x+point2.x)/2, (point1.y+point2.y)/2)
mid_line = Line(mid_point, slope=slope_line)
intersect_point = intersection(mid_line, ball_circle)
if len(intersect_point) == 2 and cue_point.distance(intersect_point[0]) >= cue_point.distance(intersect_point[1]):
point_collision = intersect_point[1]
elif len(intersect_point) == 2 or len(intersect_point) == 1:
point_collision = intersect_point[0]
else:
break
point_extended = Point(float(2*point_collision.x - ball_point.x), float(2 * point_collision.y - ball_point.y))
if white_ball_ray.contains(point_extended):
break
else:
val_point_extended = coeff[0]*point_extended.x + coeff[1]*point_extended.y + coeff[2]
val_mid_point = coeff[0]*mid_point.x + coeff[1]*mid_point.y + coeff[2]
if (val_mid_point < 0 and val_point_extended < 0) or (val_mid_point > 0 and val_point_extended > 0):
point1 = mid_point
else:
point2 = mid_point
if point1 == point2:
break
print(point_collision)
return point_collision
def ball_collide_first(cue_point, ball_coord):
min_distance = 1e9
first_ball = cue_point
for coord in ball_coord:
ball_circle = Circle(Point(coord[0], coord[1]), coord[2])
if len(intersection(pathBallW[0], ball_circle)) >= 1 or len(intersection(pathBallW[2], ball_circle)) >= 1:
d = cue_point.distance(ball_circle.center)
if min_distance > d:
min_distance = d
first_ball = Point(coord[0], coord[1])
return first_ball
def main():
image_address = '3.png'
ball_coord, cue_coord, stick_coord = detection.detect_coordinates(image_address)
print(ball_coord, cue_coord, stick_coord)
if len(cue_coord) == 0 or len(stick_coord) == 0:
print("No point detected")
return
cue_point = Point(cue_coord[0], cue_coord[1])
stick_point = Point(stick_coord[0], stick_coord[1])
path_of_white_ball(cue_point, stick_coord, cue_coord[2])
#path_of_white_ball(p1, p2, RADIUS)
first_ball = ball_collide_first(cue_point, ball_coord)
if first_ball == cue_point:
print("No collision")
return
def test():
p1 = Point(25, 0) # White ball centre
p2 = Point(30, 0) # Point from cue stick
ball_coord = []
ball_coord.append([0,-8,5]) # Ball which lies in the path
path_of_white_ball(p1, p2, RADIUS)
first_ball = ball_collide_first(p1, ball_coord)
if first_ball == p1:
print("No collision")
else:
point_collision = find_collision_point(p1,first_ball,RADIUS)
print(point_collision)
#plot_white_ball_path()
plot_graph(point_collision, first_ball, p1)
if __name__ == '__main__':
test()
#main()
|
import cv2
img = cv.imread('1.jpg')
cv2.imshow('img',img)
cv2.waitkey(0)
print('第一次修改文件')
|
#------------------------------------------------------------------------------#
# #
# A validation example #
# Will require the pymot and munkres libraries #
# Note: pymot module should be modified as per KG for bug fix and point eval #
# In iPython: copy -> %paste the following in the interpreter #
# #
#------------------------------------------------------------------------------#
from Bag2PymotJson import Bag2PymotJson
from pymot.pymot import MOTEvaluation
from DataBag import DataBag
gt = DataBag("../data/bags/deepVelocity/tmp8.db")
hyp = DataBag("../data/bags/deepVelocity/tmp8_tracking.db")
gt_converter = Bag2PymotJson(gt, ground_truth=True)
hyp_converter = Bag2PymotJson(hyp, ground_truth=False)
json_tracking_data1 = gt_converter.convert()[0]
json_tracking_data2 = hyp_converter.convert()[0]
evaluator = MOTEvaluation(json_tracking_data1, json_tracking_data2, 5)
evaluator.evaluate()
evaluator.printResults() |
# -*- coding: utf-8 -*-
import mimetypes
import web
class Public:
def GET(self):
try:
path = web.ctx.path
file_name = path.split("/")[-1]
web.header("Content-type", mime_type(file_name))
return open(path[1:], "rb").read()
except IOError:
raise web.notfound()
def mime_type(filename):
return mimetypes.guess_type(filename)[0] or "application/octet-stream"
|
# Generated by Django 3.2 on 2021-04-28 14:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FriendShip',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.IntegerField(default=0)),
('dester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dester', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to=settings.AUTH_USER_MODEL)),
],
),
]
|
#!/usr/bin/env python2
import json
import sys
from argparse import ArgumentParser
parser = ArgumentParser('Diff root-me.org results.')
parser.add_argument('--previous', help='JSON file where to read base results', required=True)
parser.add_argument('--current', help='JSON file where to read current results', default=None)
options = parser.parse_args()
assert('previous' in options and 'current' in options)
previous = {}
with open(options.previous, 'rb') as f:
previous = json.load(f)
current = {}
with open(options.current, 'rb') as f:
current = json.load(f)
previous_users = set([u for u in previous])
current_users = set([u for u in current])
new_users = current_users - previous_users
already_users = current_users & previous_users
progresses = {}
for u in already_users:
progresses[u] = {
'delta': current[u]['score'] - previous[u]['score']
}
current_solved = set([p[0] for p in current[u]['problems'] if p[1]])
previous_solved = set([p[0] for p in previous[u]['problems'] if p[1]])
progresses[u]['solved_problems'] = current_solved-previous_solved
sorted_progresses = sorted([(k, progresses[k]['delta']) for k in progresses], key=lambda x: x[1],
reverse=True)
for (k, delta) in sorted_progresses:
print('%s: %d' % (k, delta))
for p in progresses[k]['solved_problems']:
print(' %s' % p)
for u in new_users:
print('welcome new challenger %s' % u)
|
#! /usr/bin/env python
# ======================= Gen Imports ========================
from datetime import date, datetime
import json
import traceback
import sys
# ==== Add our path to the python path so we can import our modules ====
with open('./data_operations/config.json') as f:
config = json.load(f)
sys.path.insert(1, config['project_root'])
# ====================== Custom Imports ======================
from data_operations.utils.scrapers import Bloomberg, TDAmeritrade
from data_operations.database.helpers import DB
from shared.models import Sectors
from data_operations.utils.util import send_mail
from data_operations.utils.helpers import FireFox
try:
browser = FireFox()
browser.config = config
mySQL = DB(config['sectors']['database_name'], False)
model = Sectors()
today = datetime.now().date()
bloomberg = Bloomberg(browser)
model.data['date'] = today
model.data['s_p'] = bloomberg.sectors['all sectors']
model.data['real_estate'] = bloomberg.sectors['real estate']
model.data['consumer_staples'] = bloomberg.sectors['consumer staples']
model.data['health_care'] = bloomberg.sectors['health care']
model.data['utilities'] = bloomberg.sectors['utilities']
model.data['materials'] = bloomberg.sectors['materials']
model.data['industrials'] = bloomberg.sectors['industrials']
model.data['financials'] = bloomberg.sectors['financials']
model.data['energy'] = bloomberg.sectors['energy']
model.data['communication_services'] = bloomberg.sectors['communication services']
model.data['consumer_discretionary'] = bloomberg.sectors['consumer discretionary']
model.data['information_technology'] = bloomberg.sectors['information technology']
td_dji = TDAmeritrade('$DJI', True)
model.data['dji'] = td_dji.get_percent_change()
td_vix = TDAmeritrade('$VIX.X', True)
model.data['vix'] = td_vix.get_percent_change()
model.data['vix_close'] = td_vix.get_price()
td_nasdaq = TDAmeritrade('$COMPX', True)
model.data['nasdaq'] = td_nasdaq.get_percent_change()
td_rus_1000 = TDAmeritrade('$RUI.X', True)
model.data['russell_1000'] = td_rus_1000.get_percent_change()
td_rus_2000 = TDAmeritrade('$RUT.X', True)
model.data['russell_2000'] = td_rus_2000.get_percent_change()
mySQL.save(model)
send_mail('Sectors Script Succesfully Executed',
config['sectors']['database_name'])
del browser
except Exception:
ex = traceback.format_exc()
send_mail('------- Sectors Script Failed!! ------ \n\n' +
ex, config['sectors']['database_name'])
del browser
|
# -*- coding: utf-8 -*-
"""
@author: xiaoke
@file: checkSubarraySum.py
@time:2020-04-29 17:13
@file_desc:
"""
class Solution(object):
def checkSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
l = len(nums)
if l<=1:
return False
dp = [0 for i in range(l)]
dp[0] = nums[0]
for i in range(1, l):
dp[i] = dp[i-1] + nums[i]
for i in range(l):
for j in range(i+1, l):
su = dp[j] - dp[i] + nums[i]
if su ==0 or su%k == 0:
return True
return False
if __name__ == '__main__':
solu = Solution
nums = [1, 2, 12]
k = 6
print(solu.checkSubarraySum(solu, nums, k))
# print(1%2) |
# submitted score: 23,727,620
class Library:
def __init__(self, id, n_books, signup, n_ship, all_books,books_dicts, remaining_days):
self.id = id
self.n_books = n_books
self.signup = signup
self.n_ship = n_ship
self.all_books = all_books
#self.my_points=self.signup+ self.n_books/self.n_ship
totpoints = 0
for b in all_books:
totpoints += books_dicts[b]
giorni = self.n_books/self.n_ship
if self.n_books % self.n_ship != 0:
giorni += 1
giorni+= self.signup
if giorni > remaining_days:
diff = giorni - remaining_days
giorni = remaining_days
totpoints = totpoints - ((totpoints/self.n_books) * diff * self.n_ship)
if giorni == 0:
self.my_points = 0
else:
self.my_points = totpoints/giorni
else:
self.my_points = totpoints/giorni
def __repr__(self):
return "ID: {}, N_Books: {}, signup: {}, shiprate: {}".format(self.id, self.n_books, self.signup,self.n_ship)
def update_points(self,booktoremove, remaining_days):
for b in self.all_books:
if b in booktoremove:
self.all_books.remove(b)
self.n_books = len(self.all_books)
totpoints = 0
if self.n_books > 0:
for b in self.all_books:
totpoints += books_dicts[b]
giorni = self.n_books / self.n_ship
if self.n_books%self.n_ship != 0:
giorni += 1
giorni += self.signup
if giorni > remaining_days:
diff = giorni - remaining_days
giorni = remaining_days
totpoints = totpoints - ((totpoints/self.n_books) * diff * self.n_ship)
if giorni == 0:
self.my_points = 0
else:
self.my_points = totpoints/giorni
else:
self.my_points = 0
# this functions check if books in the lib have already been sent
def check_books(already_sent, new_books):
to_send = []
for b in new_books:
if b not in already_sent:
to_send.append(b)
return to_send
# this functions returns the libraries and books considered
def used_days(total_days, libs):
remaining = total_days
libs_considered = []
dontstop = True
# remaining = how many days are left, libs = libs not yet considered, ordered by some parameter
while libs != [] and remaining > 0 and dontstop:
books_sent = []
l = libs[0]
reorder = False
# all the next will be zero
if l.my_points == 0:
dontstop = False
if l.signup <= remaining:
if l.all_books != []:
remaining -= l.signup
# reorder if there are days left
# !! This is very slow
if remaining > 0:
reorder = True
libs_considered.append((l.id, l.all_books))
books_sent=l.all_books
libs.remove(l)
if reorder:
for i in libs:
i.update_points(books_sent, remaining)
libs.sort(key=lambda x: x.my_points, reverse=True)
return libs_considered
all_files= ['b_read_on', 'c_incunabula', 'd_tough_choices', 'e_so_many_books', 'f_libraries_of_the_world']
for a in all_files:
filename = '../input/' + a + '.txt'
print("Now running %s." % (a))
with open(filename) as f:
input_f = f.read().splitlines()
input_n = []
for l in input_f:
l = [int(i) for i in l.split()]
input_n.append(l)
tot_libraries = input_n[0][0]
tot_libs = input_n[0][1]
tot_days = input_n[0][2]
books_scores = input_n[1]
# Some info on the file currently analyzed
print("Number of books %d, number of libs %d and number of days %d \n" % (tot_libraries,tot_libs,tot_days))
input_n = input_n[2:]
books_dicts = {}
for i, b in enumerate(books_scores):
books_dicts[i] = b
libs = []
count = 0
# create objects Library with the info
for i in range(0, len(input_n) - 1, 2):
lib = Library(count, input_n[i][0], input_n[i][1], input_n[i][2], input_n[i+1], books_dicts, tot_days)
libs.append(lib)
count += 1
# sort libraries by sign up time or n ship?
libs.sort(key=lambda x: x.my_points, reverse=True)
# call function to have results
libs_output = used_days(tot_days, libs)
# write results in file
output_file = "../outputs/" + filename[9:-4] + '_output'
o = open(output_file, 'w+')
o.write(str(len(libs_output)) + "\n")
for l in libs_output:
o.write(str(l[0]) + " " + str(len(l[1])) + "\n")
for single_book in l[1]:
o.write(str(single_book) + " ")
if l[1] == []:
print("EMPTY")
o.write("\n")
o.close() |
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# mock, just outputs empty .h/.cpp files
import os
import sys
if len(sys.argv) == 2:
basename, ext = os.path.splitext(sys.argv[1])
with open('%s.h' % basename, 'w') as f:
f.write('// %s.h\n' % basename)
with open('%s.cpp' % basename, 'w') as f:
f.write('// %s.cpp\n' % basename)
|
print("*")
print("**")
print("***")
print("****")
print("*****")
# 檔名: exercise0303.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
# -*- coding: utf-8 -*-
from app.models import TournamentComment, Season, Tournament
from app.tests import dbfixture, TournamentCommentData, TournamentData
from app.tests.models import ModelTestCase
from web import config
import datetime
class TestTournamentComment(ModelTestCase):
def setUp(self):
super(TestTournamentComment, self).setUp()
self.data = dbfixture.data(TournamentData, TournamentCommentData)
self.data.setup()
def tearDown(self):
self.data.teardown()
def test_all(self):
all_comments = TournamentComment.all()
self.assertEqual(len(all_comments), 4)
def test_get(self):
comments_11 = config.orm.query(TournamentComment).join(TournamentComment.tournament).filter(Tournament.tournament_dt == datetime.date(2009, 9, 1)).all() #@UndefinedVariable
comments_12 = config.orm.query(TournamentComment).join(TournamentComment.tournament).filter(Tournament.tournament_dt == datetime.date(2010, 1, 1)).all() #@UndefinedVariable
comments_21 = config.orm.query(TournamentComment).join(TournamentComment.tournament).join(Tournament.season).filter(Season.id == 2).all() #@UndefinedVariable
self.assertEqual(len(comments_11), 0)
self.assertEqual(len(comments_12), 3)
self.assertEqual(len(comments_21), 1)
|
from Tkinter import *
import tkMessageBox
def beenClicked():
radioValue = relStatus.get()
tkMessageBox.showinfo("you clicked ",radioValue)
return
def changeLabel():
name="thanks for click " + yourName.get()
labelText.set(name)
yourName.delete(0, END)
yourName.insert(0, "MY name is SHUbh")
return
app = Tk()
app.title("GUI EXMAPLE")
app.geometry('450x300+200+200')
labelText = StringVar()
labelText.set("click button")
label1 = Label(app,textvariable=labelText,height=4)
label1.pack()
checkBoxVal = IntVar()
checkBox1 =Checkbutton(app,variable=checkBoxVal,text="Happy?")
checkBox1.pack()
custName = StringVar(None)
yourName= Entry(app,textvariable = custName)
yourName.pack()
relStatus = StringVar()
relStatus.set(None)
radio1 = Radiobutton(app,text="single", value="Single",variable=relStatus,command=beenClicked).pack()
radio1 = Radiobutton(app,text="married", value="Married",variable=relStatus,command=beenClicked).pack()
button1 = Button(app ,text="click here",width=20,command=changeLabel)
button1.pack(side='bottom',padx=15,pady=15)
app.mainloop()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-02 20:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20170302_1204'),
]
operations = [
migrations.AlterField(
model_name='side_dish',
name='image',
field=models.FileField(blank=True, upload_to=''),
),
migrations.AlterField(
model_name='side_dish',
name='meal',
field=models.ManyToManyField(blank=True, related_name='side_dishes', to='home.Meal'),
),
]
|
def testData():
otest = open('test.txt', 'r')
test = otest.readlines()
oanswer = open('answer.txt', 'r')
answer = oanswer.readline()
status = False
print("Runs test data")
result = runCode(test)
if result == int(answer): #not always int
status = True
print("Correct answer: " + answer + "My answer: " + str(result))
return status
def runCode(data):
print("Runs code")
correctPasswords = 0
#a list of numbers
for line in data:
pos1true = False
pos2true = False
rule, passw = line.split(':')
password = passw.strip()
positions, letter = rule.split()
tempPos = positions.split('-')
pos1 = int(tempPos[0])
pos2 = int(tempPos[1])
if (password[pos1-1] == letter):
pos1true = True
if (password[pos2-1] == letter):
pos2true = True
if (pos1true != pos2true):
correctPasswords +=1
return correctPasswords
#Runs testdata
testResult = testData()
if testResult == True:
print("Test data parsed. Tries to run puzzle.")
opuzzle = open('input.txt', 'r')
puzzle = opuzzle.readlines()
finalResult = runCode(puzzle)
print(finalResult)
else:
print("Test data failed. Code is not correct. Try again.")
|
from NotiWeather import settings
from urllib import request
import re
import json
API_KEY = settings.WUNDERGROUND_KEY
API_URL = "http://api.wunderground.com/api/{}/conditions/almanac/q/{}/{}.json"
def get_safe_city(city):
"""
Helper function that removes periods '.' and replaces whitespaces ' ' with
underscores. The generated url-safe city code will be used to in
Wunderground API calls.
"""
no_periods = re.sub('\.', '', city)
rv_code = re.sub('\s', '_', no_periods)
return rv_code
def gen_api_url(user):
"""
Helper function that generates the URL to be used with Wunderground's REST
API based on the provide user's location object.
"""
state = user.location.state_short
city = get_safe_city(user.location.city)
return API_URL.format(API_KEY, state, city)
def get_weather_json(user):
"""
Main function that returns weather information in JSON format for the
given user.
"""
res = request.urlopen(gen_api_url(user))
data = json.loads(res.read().decode('utf-8'))
return data
|
import unittest
import sys
import os
sys.path.append(os.getcwd().replace('\\','/') + '/../')
from googleStorage import GoogleStorage
#These tests assume an empty bucket
class googleStorageTest(unittest.TestCase):
@classmethod
def setUp(self):
self.googleStorage = GoogleStorage('nothing')
def test_shouldListAllFiles(self):
results = list(self.googleStorage.listFiles(''))
self.assertEqual(len(results), 0)
for i in range(3):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'listTest'+str(i)+'.txt')
results = list(self.googleStorage.listFiles(''))
self.assertEqual(len(results), 3)
for i in range(3):
self.googleStorage.deleteFile('listTest'+str(i)+'.txt')
def test_shouldAddFile(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'addTest.txt')
self.assertTrue(self.googleStorage.isFile('addTest.txt'))
self.googleStorage.deleteFile('addTest.txt')
def test_shouldDeleteFile(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'deleteTest.txt')
self.assertTrue(self.googleStorage.isFile('deleteTest.txt'))
self.googleStorage.deleteFile('deleteTest.txt')
self.assertFalse(self.googleStorage.isFile('deleteTest.txt'))
def test_shouldGetFile(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'downloadTest.txt')
self.googleStorage.getFile('downloadTest.txt', os.getcwd()+'/downloadedFile.txt')
self.assertTrue(os.path.isfile(os.getcwd()+'/downloadedFile.txt'))
os.remove(os.getcwd()+'/downloadedFile.txt')
self.googleStorage.deleteFile('downloadTest.txt')
def test_shouldGetFileUrl(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'fileUrlTest.txt')
result = self.googleStorage.getFileUrl('fileUrlTest.txt')
self.assertEqual(result, 'https://www.googleapis.com/storage/'\
+ self.googleStorage.bucket.id + '/fileUrlTest.txt' )
self.googleStorage.deleteFile('fileUrlTest.txt')
def test_shouldGetNonExistentFileUrl(self):
self.assertRaises(OSError, self.googleStorage.getFileUrl, 'nonExistentFile.txt')
def test_isFileTrue(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'addTest.txt')
self.assertTrue(self.googleStorage.isFile('addTest.txt'))
self.googleStorage.deleteFile('addTest.txt')
def test_isFileFalse(self):
self.assertFalse(self.googleStorage.isFile('nonExistentFile.txt'))
def test_shouldUpdatefile(self):
self.googleStorage.addFile(os.getcwd()+'/testfiles/test1.txt', 'fileToUpdate.txt')
self.googleStorage.getFile('fileToUpdate.txt', os.getcwd()+'fileUpdating.txt')
fd = os.open(os.getcwd()+'fileUpdating.txt', os.O_RDWR)
self.assertEqual(os.read(fd, 13), 'Test file One')
os.close(fd)
self.googleStorage.addFile(os.getcwd()+'/testfiles/test3.txt', 'fileToUpdate.txt')
self.googleStorage.getFile('fileToUpdate.txt',os.getcwd()+'fileUpdating.txt')
fd = os.open(os.getcwd()+'fileUpdating.txt', os.O_RDWR)
self.assertEqual(os.read(fd, 15), 'Third test file')
os.close(fd)
os.remove(os.getcwd()+'fileUpdating.txt')
self.googleStorage.deleteFile('fileToUpdate.txt')
if __name__ == '__main__':
unittest.main() |
# -*- coding: utf-8 -*-
# @Time : 2019-12-22
# @Author : mizxc
# @Email : xiangxianjiao@163.com
import os
import time
def allowedImage(fileName):
"""
判读是否是允许的图片类型
"""
ALLOWED = ['jpg', 'JPG','jpeg','JPEG','png','PNG', 'gif', 'GIF','ico','svg']
return '.' in fileName and fileName.rsplit('.', 1)[1] in ALLOWED
def creatFileName(id,fileName):
"""
filename=用户id+时间戳+后缀
"""
return fileName.rsplit('.', 1)[0] + str(id) + str(time.time()) + '.' + fileName.rsplit('.', 1)[1]
def allowedFileSize(size,m):
"""
允许的图片大小:1M=1048576字节
"""
return size <= 1048578*m
def removeFile(path):
try:
os.remove(path)
except:
pass
def writeLog(path,info):
"""
写日志文件,如果文件超过10M,覆盖文件
"""
#判断是否存在文件,不存在创建
if not os.path.exists(path):
with open(path, 'w') as wf:
wf.write('%s\n' % info)
else:
size = os.path.getsize(path)
if (size/1000000) > 10.0:
with open(path, 'w') as wf:
wf.write('%s\n' % info)
else:
with open(path, 'a') as wf:
wf.write('%s\n' % info)
|
aPDFLinks = ["https://fhww.files.wordpress.com/2018/07/01-kevin-rose.pdf",
"https://fhww.files.wordpress.com/2018/07/02-josh-waitzkin.pdf",
"https://fhww.files.wordpress.com/2018/07/03-kelly-starrett-and-justin-megar.pdf",
"https://fhww.files.wordpress.com/2018/07/04-ryan-holiday.pdf",
"https://fhww.files.wordpress.com/2018/07/05-jason-silva.pdf",
"https://fhww.files.wordpress.com/2018/07/06-tim-ferriss-6-formulas-for-more-output-and-less-overwhelm.pdf",
"https://fhww.files.wordpress.com/2018/09/07-stephen-dubner.pdf",
"https://fhww.files.wordpress.com/2018/07/08-chase-jarvis.pdf",
"https://fhww.files.wordpress.com/2018/07/09-tim-ferriss-the-9-habits-to-stop-now.pdf",
"https://fhww.files.wordpress.com/2018/07/10-brian-koppelman.pdf",
"https://fhww.files.wordpress.com/2018/07/11-tim-ferriss-drugs-and-the-meaning-of-life.pdf",
"https://fhww.files.wordpress.com/2018/09/12-rhonda-patrick.pdf",
"https://fhww.files.wordpress.com/2018/07/13-tim-ferriss-productivity-tricks-for-the-neurotic.pdf",
"https://fhww.files.wordpress.com/2018/07/14-sam-harris.pdf",
"https://fhww.files.wordpress.com/2018/07/15-neil-strauss.pdf",
"https://fhww.files.wordpress.com/2018/07/16-joe-de-sena.pdf",
"https://fhww.files.wordpress.com/2018/09/17-tim-ferriss-the-power-of-negative-visualization.pdf",
"https://fhww.files.wordpress.com/2018/07/18-james-altucher.pdf",
"https://fhww.files.wordpress.com/2018/07/19-tim-ferriss-the-top-5-reasons-to-be-a-jack-of-all-trades.pdf",
"https://fhww.files.wordpress.com/2018/07/20-dan-carlin.pdf",
"https://fhww.files.wordpress.com/2018/07/21-mike-shinoda.pdf",
"https://fhww.files.wordpress.com/2018/07/22-ed-catmull.pdf",
"https://fhww.files.wordpress.com/2018/07/23-tim-ferriss-do-22homeopathic22-remedies-or-medicine-work.pdf",
"https://fhww.files.wordpress.com/2018/07/24-random-show.pdf",
"https://fhww.files.wordpress.com/2018/07/25-26-27-kevin-kelly.pdf",
"https://fhww.files.wordpress.com/2018/07/28-peter-thiel.pdf",
"https://fhww.files.wordpress.com/2018/07/29-brendan-moynihan.pdf",
"https://fhww.files.wordpress.com/2018/07/30-31-32-tracy-dinunzio.pdf",
"https://fhww.files.wordpress.com/2018/09/33-ramit-sethi.pdf",
"https://fhww.files.wordpress.com/2018/09/34-ramit-sethi.pdf",
"https://fhww.files.wordpress.com/2018/07/35-tony-robbins-and-peter-diamandis.pdf",
"https://fhww.files.wordpress.com/2018/07/36-alexis-ohanian.pdf",
"https://fhww.files.wordpress.com/2018/07/37-tony-robbins-part-1.pdf",
"https://fhww.files.wordpress.com/2018/07/38-tony-robbins-part-2.pdf",
"https://fhww.files.wordpress.com/2018/07/39-maria-popova.pdf",
"https://fhww.files.wordpress.com/2018/07/40-andrew-zimmern.pdf",
"https://fhww.files.wordpress.com/2018/07/41-rolf-potts-part-1.pdf",
"https://fhww.files.wordpress.com/2018/07/42-rolf-potts-part-2.pdf",
"https://fhww.files.wordpress.com/2018/07/43-margaret-cho.pdf",
"https://fhww.files.wordpress.com/2018/07/44-tim-ferriss-how-to-avoid-decision-fatigue.pdf",
"https://fhww.files.wordpress.com/2018/07/45-nick-ganju.pdf",
"https://fhww.files.wordpress.com/2018/07/46-random-show.pdf",
"https://fhww.files.wordpress.com/2018/07/47-bryan-callen.pdf",
"https://fhww.files.wordpress.com/2018/07/48-marc-goodman.pdf",
"https://fhww.files.wordpress.com/2018/07/49-tim-ferriss-tim-answers-your-10-most-popular-questions.pdf",
"https://fhww.files.wordpress.com/2018/07/50-peter-attia.pdf",
"https://fhww.files.wordpress.com/2018/07/51-tim-ferriss-tim-answers-10-more-popular-questions-from-listeners.pdf",
"https://fhww.files.wordpress.com/2018/07/52-53-ed-cooke.pdf",
"https://fhww.files.wordpress.com/2018/07/54-jonathan-eisen-jessica-richman.pdf",
"https://fhww.files.wordpress.com/2018/07/55-pavel-tsatsouline.pdf",
"https://fhww.files.wordpress.com/2018/07/56-peter-diamandis.pdf",
"https://fhww.files.wordpress.com/2018/07/57-pavel-tsatsouline.pdf",
"https://fhww.files.wordpress.com/2018/07/58-alex-blumberg-part-1.pdf",
"https://fhww.files.wordpress.com/2018/07/59-alex-blumberg-part-2.pdf",
"https://fhww.files.wordpress.com/2018/07/60-arnold-schwarzenegger.pdf",
"https://fhww.files.wordpress.com/2018/07/61-matt-mullenweg.pdf",
"https://fhww.files.wordpress.com/2018/07/62-justin-boreta.pdf",
"https://fhww.files.wordpress.com/2018/07/63-mark-hart-and-raoul-pal.pdf",
"https://fhww.files.wordpress.com/2018/07/64-kelly-starrett.pdf",
"https://fhww.files.wordpress.com/2018/07/65-peter-attia.pdf",
"https://fhww.files.wordpress.com/2018/07/66-james-fadiman.pdf",
"https://fhww.files.wordpress.com/2018/07/67-amanda-palmer.pdf",
"https://fhww.files.wordpress.com/2018/07/69-glenn-beck.pdf",
"https://fhww.files.wordpress.com/2018/09/71-jon-favreau.pdf",
"https://fhww.files.wordpress.com/2018/09/72-paul-levesque-tfs.pdf",
"https://fhww.files.wordpress.com/2018/07/74-samy-kamkar-part-1.pdf",
"https://fhww.files.wordpress.com/2018/07/74-samy-kamkar-part-2.pdf",
"https://fhww.files.wordpress.com/2018/07/75-noah-kagan.pdf",
"https://fhww.files.wordpress.com/2018/07/76-rick-rubin.pdf",
"https://fhww.files.wordpress.com/2018/07/77-danielle-and-astro-teller.pdf",
"https://fhww.files.wordpress.com/2018/07/78-tim-ferriss-how-to-build-a-large-audience-from-scratch.pdf",
"https://fhww.files.wordpress.com/2018/07/79-chris-sacca.pdf",
"https://fhww.files.wordpress.com/2018/07/81-bryan-johnson.pdf",
"https://fhww.files.wordpress.com/2018/07/82-sam-kass.pdf",
"https://fhww.files.wordpress.com/2018/07/83-adam-gazzaley.pdf",
"https://fhww.files.wordpress.com/2018/09/84-whitney-cummings.pdf",
"https://fhww.files.wordpress.com/2018/09/85-kelly-starrett.pdf",
"https://fhww.files.wordpress.com/2018/07/86-stanley-mcchrystal-chris-fussell.pdf",
"https://fhww.files.wordpress.com/2018/07/87-sam-harris.pdf",
"https://fhww.files.wordpress.com/2018/07/88-stanley-mcchrystal.pdf",
"https://fhww.files.wordpress.com/2018/07/89-laird-hamilton-gabrielle-reece-brian-mackenzie.pdf",
"https://fhww.files.wordpress.com/2018/07/90-peter-diamandis.pdf",
"https://fhww.files.wordpress.com/2018/07/91-charles-poliquin.pdf",
"https://fhww.files.wordpress.com/2018/07/92-maria-popova.pdf",
"https://fhww.files.wordpress.com/2018/07/93-jane-mcgonigal.pdf",
"https://fhww.files.wordpress.com/2018/07/94-tara-brach.pdf",
"https://fhww.files.wordpress.com/2018/07/95-phil-libin.pdf",
"https://fhww.files.wordpress.com/2018/07/96-kevin-kelly.pdf",
"https://fhww.files.wordpress.com/2018/07/97-naval-ravikant.pdf",
"https://fhww.files.wordpress.com/2018/07/98-robert-rodriguez.pdf",
"https://fhww.files.wordpress.com/2018/07/99-tim-ferriss-how-to-build-a-world-class-network-in-record-time.pdf",
"https://fhww.files.wordpress.com/2018/07/100-brene-brown.pdf",
"https://fhww.files.wordpress.com/2018/08/101-reid-hoffman-and-michael-mccullough.pdf",
"https://fhww.files.wordpress.com/2018/08/102-wim-hof.pdf",
"https://fhww.files.wordpress.com/2018/08/103-tim-ferriss-episode-100-drunk-dialing-fans.pdf",
"https://fhww.files.wordpress.com/2018/08/104-martin-polanco-and-dan-engle.pdf",
"https://fhww.files.wordpress.com/2018/08/105-5-morning-rituals-that-help-me-win-the-day.pdf",
"https://fhww.files.wordpress.com/2018/08/106-scott-adams.pdf",
"https://fhww.files.wordpress.com/2018/08/107-jocko-willink.pdf",
"https://fhww.files.wordpress.com/2018/09/108-seth-rogen-evan-goldberg.pdf",
"https://fhww.files.wordpress.com/2018/08/109-tim-ferriss-5-things-i-did-to-become-a-better-investor.pdf",
"https://fhww.files.wordpress.com/2018/08/110-richard-betts.pdf",
"https://fhww.files.wordpress.com/2018/08/111-tim-ferriss-conversation-at-expa-should-you-start-a-startup.pdf",
"https://fhww.files.wordpress.com/2018/08/112-sophia-amoruso.pdf",
"https://fhww.files.wordpress.com/2018/08/113-tim-ferriss-5-tools-i-use-for-faster-and-better-sleep.pdf",
"https://fhww.files.wordpress.com/2018/08/114-jimmy-chin.pdf",
"https://fhww.files.wordpress.com/2018/08/115-lisa-randall.pdf",
"https://fhww.files.wordpress.com/2018/08/116-casey-neistat.pdf",
"https://fhww.files.wordpress.com/2018/08/117-dominic-dagostino.pdf",
"https://fhww.files.wordpress.com/2018/08/118-alain-de-botton.pdf",
"https://fhww.files.wordpress.com/2018/08/119-kevin-costner.pdf",
"https://fhww.files.wordpress.com/2018/08/120-william-macaskill.pdf",
"https://fhww.files.wordpress.com/2018/08/121-bj-novak.pdf",
"https://fhww.files.wordpress.com/2018/08/122-tim-ferriss-the-magic-of-mindfulness.pdf",
"https://fhww.files.wordpress.com/2018/08/123-rainn-wilson.pdf",
"https://fhww.files.wordpress.com/2018/08/124-jamie-foxx.pdf",
"https://fhww.files.wordpress.com/2018/08/125-derek-sivers.pdf",
"https://fhww.files.wordpress.com/2018/08/126-tim-ferriss-25-great-things-ive-learned-from-podcast-guests.pdf",
"https://fhww.files.wordpress.com/2018/08/127-amelia-boone.pdf",
"https://fhww.files.wordpress.com/2018/08/129-random-show.pdf",
"https://fhww.files.wordpress.com/2018/08/131-eric-weinstein.pdf",
"https://fhww.files.wordpress.com/2018/08/132-chris-sacca.pdf",
"https://fhww.files.wordpress.com/2018/08/134-tim-ferriss-the-tao-of-seneca.pdf",
"https://fhww.files.wordpress.com/2018/08/135-luis-von-ahn.pdf",
"https://fhww.files.wordpress.com/2018/08/137-tim-ferriss-how-to-practice-poverty-and-reduce-fear.pdf",
"https://fhww.files.wordpress.com/2018/09/138-seth-godin.pdf",
"https://fhww.files.wordpress.com/2018/08/140-shaun-white.pdf",
"https://fhww.files.wordpress.com/2018/08/141-kaskade-and-sekou-andrews.pdf",
"https://fhww.files.wordpress.com/2018/08/142-tim-ferriss-how-to-achieve-self-ownership.pdf",
"https://fhww.files.wordpress.com/2018/08/143-patrick-arnold.pdf",
"https://fhww.files.wordpress.com/2018/08/144-10x-results-with-joel-stein.pdf",
"https://fhww.files.wordpress.com/2018/08/145-cal-fussman.pdf",
"https://fhww.files.wordpress.com/2018/08/146-random-show.pdf",
"https://fhww.files.wordpress.com/2018/08/147-tim-ferriss-how-to-avoid-the-busy-trap.pdf",
"https://fhww.files.wordpress.com/2018/08/148-josh-waitzkin.pdf",
"https://fhww.files.wordpress.com/2018/08/149-tim-ferriss-how-to-live-in-the-moment.pdf",
"https://fhww.files.wordpress.com/2018/08/150-morgan-spurlock.pdf"] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015-2017 Shenzhen Auto-link world Information Technology Co., Ltd.
All Rights Reserved
Name: CanProtoDFSK.py
Purpose:
Created By: Clive Lau <liuxusheng@auto-link.com.cn>
Created Date: 2017-12-28
Changelog:
Date Desc
2017-12-28 Created by Clive Lau
"""
# Builtin libraries
# Third-party libraries
# Customized libraries
from CanMsgBasic import *
from Resource.DFSKVehicleStatus import EngineStatus
from Resource.DFSKVehicleStatus import DoorStatus
from Resource.DFSKVehicleStatus import LockStatus
from Resource.DFSKVehicleStatus import HandbrakeStatus
from Resource.DFSKVehicleStatus import DefrostStatus
from Resource.DFSKVehicleStatus import WiperStatus
from Resource.DFSKVehicleStatus import AcStatus
from Resource.DFSKVehicleStatus import GearStatus
from Resource.DFSKVehicleStatus import PepsStatus
class Tbox011(CanMsgBasic):
""" """
def __init__(self):
super(Tbox011, self).__init__('TBOX_011',
EnumMsgType.Normal,
0x011,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
10,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# Request BCM open or close door
self.__control_lock = 0
# Notice BCM silent or sound shift
self.__look_for_car = 0
# Request PEPS remote control of the engine
self.__control_engine = 0
# Request PEPS power off or on
self.__control_power = 0
# 控制空调标志
self.__control_ac_flag = 0
# 设置温度
self.__control_temperature = 0
# 前除霜按键
self.__control_front_defrost = 0
# 后除霜及后视镜加热按键
self.__control_rear_defrost = 0
# 关闭空调按键
self.__control_ac = 0
@property
def control_lock(self):
""" Request BCM open or close door """
return self.__control_lock
@property
def look_for_car(self):
""" Notice BCM silent or sound shift """
return self.__look_for_car
@property
def control_engine(self):
""" Request PEPS remote control of the engine """
return self.__control_engine
@property
def control_power(self):
""" Request PEPS power off or on """
return self.__control_power
@property
def control_ac_flag(self):
""" 控制空调标志 """
return self.__control_ac_flag
@property
def control_temperature(self):
""" 设置空调温度 """
return self.__control_temperature
@property
def control_front_defrost(self):
""" 控制前除霜 """
return self.__control_front_defrost
@property
def control_rear_defrost(self):
""" 控制后除霜及后视镜加热 """
return self.__control_rear_defrost
@property
def control_ac_key(self):
""" 控制空调 """
return self.__control_ac
def encode(self):
# control_lock + look_for_car + control_engine + control_power
self._msg_data[0] = hex((self.__control_lock << 0) |
(self.__look_for_car << 2) |
(self.__control_engine << 4) |
(self.__control_power << 6))
# 控制空调标志 + 设置空调温度
self._msg_data[1] = hex((self.__control_ac_flag << (8 % 8)) |
(self.__control_temperature << (9 % 8)))
# 控制前除霜 + 控制后除霜及后视镜加热 + 控制空调
self._msg_data[2] = hex((self.__control_front_defrost << (16 % 8)) |
(self.__control_rear_defrost << (17 % 8)) |
(self.__control_ac << (19 % 8)))
return self._msg_data
def decode(self, *args):
super(Tbox011, self).decode()
# Request BCM open or close door
self.__control_lock = self._msg_data[0] & (0x3 << 0)
# Notice BCM silent or sound shift
self.__look_for_car = self._msg_data[0] & (0x3 << 2)
# Request PEPS remote control of the engine
self.__control_engine = self._msg_data[0] & (0x3 << 4)
# Request PEPS power off or on
self.__control_power = self._msg_data[0] & (0x3 << 6)
# 控制空调标志
self.__control_ac_flag = self._msg_data[1] & (0x1 << (8 % 8))
# 设置温度
self.__control_temperature = self._msg_data[1] & (0x7 << (9 % 8))
# 前除霜按键
self.__control_front_defrost = self._msg_data[2] & (0x1 << (16 % 8))
# 后除霜及后视镜加热按键
self.__control_rear_defrost = self._msg_data[2] & (0x1 << (17 % 8))
# 关闭空调按键
self.__control_ac = self._msg_data[2] & (0x1 << (18 % 8))
def dump(self):
super(Tbox011, self).dump()
class Sas300(CanMsgBasic):
""" """
def __init__(self):
super(Sas300, self).__init__('SAS_300',
EnumMsgType.Normal,
0x300,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# Message Counter
self.__msg_counter = 0
# 方向盘角度
self.__steering_angle = 0
@property
def steering_angle(self):
""" 方向盘角度 """
return float(self.__steering_angle)
@steering_angle.setter
def steering_angle(self, value):
""" 方向盘角度 """
try:
if not isinstance(value, float):
raise AttributeError
self.__steering_angle = 0xFFFF if value < -780.0 or value > 779.9 else value
except AttributeError:
print("AttributeError on steering_angle")
def encode(self):
# Message Counter
self._msg_data[0] = hex(self.__msg_counter)
# 方向盘角度
self._msg_data[3] = hex(self.__steering_angle >> 8)
self._msg_data[4] = hex(self.__steering_angle % 256)
return self._msg_data
def dump(self):
super(Sas300, self).dump()
class Ems302(CanMsgBasic):
""" 发动机管理系统 """
@unique
class ValidInvalidStatus(Enum):
Valid = 0
Invalid = 1
def __init__(self):
super(Ems302, self).__init__('EMS_302',
EnumMsgType.Normal,
0x302,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# 发动机转速故障
self.__engine_speed_error = 0
# 节气门位置故障
self.__throttle_position_error = 0
# 加速踏板故障
self.__acc_pedal_error = 0
# 发动机转速
self.__engine_speed = 0
# 发动机节气门位置
self.__engine_throttle_position = 0
# 加速踏板位置
self.__acc_pedal = 0
@property
def engine_speed(self):
""" 发动机转速 """
return float(self.__engine_speed * 0.25)
@engine_speed.setter
def engine_speed(self, value):
""" 发动机转速 """
try:
if not isinstance(value, float):
raise AttributeError
if value < 0.0 or value > 16383.5:
self.__engine_speed_error = Ems302.ValidInvalidStatus.Invalid.value
self.__engine_speed = 0xFFFF
else:
self.__engine_speed_error = Ems302.ValidInvalidStatus.Valid.value
self.__engine_speed = int(value / 0.25)
except AttributeError:
print("AttributeError on engine_speed")
@property
def acc_pedal(self):
""" 加速踏板位置 """
return float(self.__acc_pedal * 0.4)
@acc_pedal.setter
def acc_pedal(self, value):
""" 加速踏板位置 """
try:
if not isinstance(value, float):
raise AttributeError
if value < 0.0 or value > 100.0:
self.__acc_pedal_error = Ems302.ValidInvalidStatus.Invalid.value
self.__acc_pedal = 0xFF
else:
self.__acc_pedal_error = Ems302.ValidInvalidStatus.Valid.value
self.__acc_pedal = int(value / 0.4)
except AttributeError:
print("AttributeError on acc_pedal")
def encode(self):
# 发动机转速故障 + 节气门位置故障 + 加速踏板故障
self._msg_data[0] = hex((self.__engine_speed_error << 2) |
(self.__throttle_position_error << 3) |
(self.__acc_pedal_error << 4))
# 发动机转速
self._msg_data[1] = hex(self.__engine_speed >> 8)
self._msg_data[2] = hex(self.__engine_speed % 256)
# 发动机节气门位置
self._msg_data[3] = hex(self.__engine_throttle_position)
# 加速踏板位置
self._msg_data[4] = hex(self.__acc_pedal)
return self._msg_data
def dump(self):
super(Ems302, self).dump()
# print("-> EMS_EngineSpeedErr:\t\t" + Ems302.ValidInvalidStatus(self.engine_speed_error).name)
# print("-> EMS_ThrottlePosErr:\t\t" + Ems302.ValidInvalidStatus(self.throttle_position_error).name)
# print("-> EMS_AccPedalErr:\t\t\t" + Ems302.ValidInvalidStatus(self.acc_pedal_error).name)
# print("-> EMS_EngineSpeed:\t\t\t" + (str(self.engine_speed) if self.__engine_speed != int('FFFF', 16) else 'Invalid'))
# print("-> EMS_EngineThrottlePos:\t" + (str(self.engine_throttle_position) if self.__engine_throttle_position != int('FF', 16) else 'Invalid'))
# print("-> EMS_AccPedal:\t\t\t" + (str(self.acc_pedal) if self.__acc_pedal != int('FF', 16) else 'Invalid'))
class Ems303(CanMsgBasic):
""" 发动机管理系统 """
@unique
class EngineStartFlag(Enum):
NotFinished = 0
Finished = 1
def __init__(self):
super(Ems303, self).__init__('EMS_303',
EnumMsgType.Normal,
0x303,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# 发动机运行状态
self.__engine_status = 0
# 发动机启动成功状态
self.__engine_start_flag = 0
@property
def engine_status(self):
""" 发动机运行状态 """
return self.__engine_status
@engine_status.setter
def engine_status(self, status):
""" 发动机运行状态 """
try:
if status not in EngineStatus.CanStatus:
raise AttributeError
self.__engine_status = status.value
if status == EngineStatus.CanStatus.Running:
self.__engine_start_flag = Ems303.EngineStartFlag.Finished.value
else:
self.__engine_start_flag = Ems303.EngineStartFlag.NotFinished.value
except AttributeError:
print("AttributeError on engine_status")
def encode(self):
# 发动机运行状态 + 发动机启动成功状态
self._msg_data[0] = hex((self.__engine_status << 0) |
(self.__engine_start_flag << 3))
return self._msg_data
def dump(self):
super(Ems303, self).dump()
class Tcu328(CanMsgBasic):
""" 变速箱控制单元 """
@unique
class ValidInvalidStatus(Enum):
Valid = 0
Invalid = 1
def __init__(self):
super(Tcu328, self).__init__('TCU_328',
EnumMsgType.Normal,
0x328,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# Gear Position
self.__gear_position_status = 0
# Validity of Gear Position
self.__gear_position_vd = 0
# TCU warning for meter display
self.__ind_fault_status = 0
@property
def gear_position_status(self):
""" 变速箱档位 """
return self.__gear_position_status
@gear_position_status.setter
def gear_position_status(self, status):
""" 变速箱档位 """
try:
if status not in GearStatus.CanStatus:
self.__gear_position_vd = Tcu328.ValidInvalidStatus.Invalid.value
raise AttributeError
self.__gear_position_status = status.value
self.__gear_position_vd = Tcu328.ValidInvalidStatus.Valid.value
except AttributeError:
print("AttributeError on gear_position_status")
def encode(self):
# Gear Position + Gear Position VD
self._msg_data[0] = hex((self.__gear_position_status << 0) |
(self.__gear_position_vd << 4))
# IND Fault Status
self._msg_data[2] = hex(self.__ind_fault_status << (17 % 8))
return self._msg_data
def dump(self):
super(Tcu328, self).dump()
class Abs330(CanMsgBasic):
""" 刹车防抱死系统 """
@unique
class SuccessFailureStatus(Enum):
Success = 0
Failure = 1
@unique
class ValidInvalidStatus(Enum):
Valid = 0
Invalid = 1
def __init__(self):
super(Abs330, self).__init__('ABS_330',
EnumMsgType.Normal,
0x330,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# ABS system has detected a failure which does not allow a reliable ABS regulation and is therefore switched off
self.__abs_failure = 0
# ABS system has detected a heavy fault, which does not even allow a reliable electronic brake distribution and is therefore completely shut down
self.__ebd_failure = 0
# vehicle reference speed
self.__vehicle_speed = 0
# vehicle reference speed valid
self.__vehicle_speed_valid = 0
# every message increments the counter
self.__message_counter = 0
# vehicle reference speed checksum
self.__checksum = 0
@property
def vehicle_speed(self):
""" 车速 """
return float(self.__vehicle_speed * 0.05625)
@vehicle_speed.setter
def vehicle_speed(self, value):
""" 车速 """
try:
if not isinstance(value, float):
raise AttributeError
if value < 0.0 or value > 270.0:
self.__vehicle_speed = 0
self.__vehicle_speed_valid = Abs330.ValidInvalidStatus.Invalid.value
else:
self.__vehicle_speed = int(value / 0.05625)
self.__vehicle_speed_valid = Abs330.ValidInvalidStatus.Valid.value
except AttributeError:
print("AttributeError on vehicle_speed")
def encode(self):
# ABS Failure + EBD Failure
self._msg_data[0] = hex((self.__vehicle_speed >> 8) |
(self.__ebd_failure << 5) |
(self.__abs_failure << 6))
# vehicle reference speed
self._msg_data[1] = hex(self.__vehicle_speed % 256)
# vehicle reference speed valid
self._msg_data[2] = hex(self.__vehicle_speed_valid << (16 % 8))
# message counter
self._msg_data[6] = hex(self.__message_counter << (52 % 8))
# checksum
checksum = 0
for idx in range(0, 7):
checksum ^= int(self._msg_data[idx], 16)
self._msg_data[7] = hex(checksum)
return self._msg_data
def dump(self):
super(Abs330, self).dump()
class Peps341(CanMsgBasic):
""" 无钥匙进入和启动系统 """
def __init__(self):
super(Peps341, self).__init__('PEPS_341',
EnumMsgType.Normal,
0x341,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# 电源分配状态
self.__power_mode = 0
# 智能钥匙电池电量低提示
self.__fob_low_bat_warning = 0
# 远程模式
self.__remote_mode = 0
# ECU故障类型指示
self.__escl_ecu_fail_warning = 0
# ECU故障提示
self.__ecu_fail_warning = 0
# 发动机启动请求
self.__engine_start_request = 0
# 防盗认证结果
self.__release_sig = 0
@property
def power_mode(self):
""" PEPS电源分配状态 """
return self.__power_mode
@power_mode.setter
def power_mode(self, status):
""" PEPS电源分配状态 """
try:
if status not in PepsStatus.CanStatus:
raise AttributeError
self.__power_mode = status.value
except AttributeError:
print("AttributeError on power_mode")
def encode(self):
# 电源分配状态 + 智能钥匙电池电量低提示 + 远程模式
self._msg_data[0] = hex((self.__power_mode << 0) |
(self.__fob_low_bat_warning << 5) |
(self.__remote_mode << 6))
# ESCL ECU故障类型指示
self._msg_data[1] = hex(self.__escl_ecu_fail_warning << (8 % 8))
# ECU故障提示
self._msg_data[2] = hex(self.__ecu_fail_warning << (22 % 8))
# 发动机启动请求
self._msg_data[3] = hex(self.__engine_start_request << (24 % 8))
# 防盗认证结果
self._msg_data[4] = hex(self.__release_sig << (35 % 8))
return self._msg_data
def dump(self):
super(Peps341, self).dump()
class Bcm350(CanMsgBasic):
""" 车身控制器 """
@unique
class LampStatus(Enum):
Off = 0
On = 1
NotUsed = 2
Error = 3
@unique
class FindVehicleStatus(Enum):
Invalid = 0
NotAllowed = 1
Executing = 2
Finished = 3
def __init__(self):
super(Bcm350, self).__init__('BCM_350',
EnumMsgType.Normal,
0x350,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# 近光灯工作状态
self.__low_beam_status = 0
# 远光灯工作状态
self.__high_beam_status = 0
# 前雾灯工作状态
self.__front_fog_lamp_status = 0
# 后雾灯工作状态
self.__rear_fog_lamp_status = 0
# 左转向灯信号
self.__turn_indicator_left = 0
# 右转向灯信号
self.__turn_indicator_right = 0
# 左前门状态
self.__driver_door_status = 0
# 右前门状态
self.__passenger_door_status = 0
# 左后门状态
self.__left_rear_door_status = 0
# 右后门状态
self.__right_rear_door_status = 0
# 尾门状态
self.__tailgate_status = 0
# 左前门门锁状态
self.__driver_door_lock_status = 7
# 手刹信号
self.__handbrake_signal = 0
# 寻车控制请求执行状态
self.__find_car_valid = 0
@property
def driver_door_status(self):
""" 左前门状态 """
return self.__driver_door_status
@driver_door_status.setter
def driver_door_status(self, status):
""" 左前门状态 """
try:
if status not in DoorStatus.CanStatus:
raise AttributeError
self.__driver_door_status = status.value
except AttributeError:
print("AttributeError on driver_door_status")
@property
def passenger_door_status(self):
""" 右前门状态 """
return self.__passenger_door_status
@passenger_door_status.setter
def passenger_door_status(self, status):
""" 右前门状态 """
try:
if status not in DoorStatus.CanStatus:
raise AttributeError
self.__passenger_door_status = status.value
except AttributeError:
print("AttributeError on passenger_door_status")
@property
def left_rear_door_status(self):
""" 左后门状态 """
return self.__left_rear_door_status
@left_rear_door_status.setter
def left_rear_door_status(self, status):
""" 左后门状态 """
try:
if status not in DoorStatus.CanStatus:
raise AttributeError
self.__left_rear_door_status = status.value
except AttributeError:
print("AttributeError on left_rear_door_status")
@property
def right_rear_door_status(self):
""" 右后门状态 """
return self.__right_rear_door_status
@right_rear_door_status.setter
def right_rear_door_status(self, status):
""" 右后门状态 """
try:
if status not in DoorStatus.CanStatus:
raise AttributeError
self.__right_rear_door_status = status.value
except AttributeError:
print("AttributeError on right_rear_door_status")
@property
def tailgate_status(self):
""" 尾门状态 """
return self.__tailgate_status
@tailgate_status.setter
def tailgate_status(self, status):
""" 尾门状态 """
try:
if status not in DoorStatus.CanStatus:
raise AttributeError
self.__tailgate_status = status.value
except AttributeError:
print("AttributeError on tailgate_status")
@property
def driver_door_lock_status(self):
""" 左前门门锁状态 """
return self.__driver_door_lock_status
@driver_door_lock_status.setter
def driver_door_lock_status(self, status):
""" 左前门门锁状态 """
try:
if status not in LockStatus.CanStatus:
raise AttributeError
self.__driver_door_lock_status = status.value
except AttributeError:
print("AttributeError on driver_door_lock_status")
@property
def handbrake_signal(self):
""" 手刹信号 """
return self.__handbrake_signal
@handbrake_signal.setter
def handbrake_signal(self, status):
""" 手刹信号 """
try:
if status not in HandbrakeStatus.CanStatus:
raise AttributeError
self.__handbrake_signal = status.value
except AttributeError:
print("AttributeError on handbrake_signal")
def encode(self):
# 近光灯工作状态 + 远光灯工作状态 + 前雾灯工作状态 + 后雾灯工作状态
self._msg_data[0] = hex((self.__low_beam_status << 0) |
(self.__high_beam_status << 2) |
(self.__front_fog_lamp_status << 4) |
(self.__rear_fog_lamp_status << 6))
# 左转向灯信号 + 右转向灯信号 + 左前门状态 + 右前门状态 + 左后门状态 + 右后门状态
self._msg_data[1] = hex((self.__turn_indicator_left << (8 % 8)) |
(self.__turn_indicator_right << (10 % 8)) |
(self.__driver_door_status << (12 % 8)) |
(self.__passenger_door_status << (13 % 8)) |
(self.__left_rear_door_status << (14 % 8)) |
(self.__right_rear_door_status << (15 % 8)))
# 尾门状态 + 左前门门锁状态 + 手刹信号 + 寻车控制请求执行状态
self._msg_data[2] = hex((self.__tailgate_status << (16 % 8)) |
(self.__driver_door_lock_status << (17 % 8)) |
(self.__handbrake_signal << (20 % 8)) |
(self.__find_car_valid << (22 % 8)))
return self._msg_data
def dump(self):
super(Bcm350, self).dump()
# print("-> BCM_LowBeamStatus:\t\t" + EnumLampStatus(self.low_beam_status).name)
# print("-> BCM_HighBeamStatus:\t\t" + EnumLampStatus(self.high_beam_status).name)
# print("-> BCM_FrontFogLampStatus:\t" + EnumLampStatus(self.front_fog_lamp_status).name)
# print("-> BCM_RearFogLampStatus:\t" + EnumLampStatus(self.rear_fog_lamp_status).name)
# print("-> BCM_TurnIndicatorLeft:\t" + EnumLampStatus(self.turn_indicator_left).name)
# print("-> BCM_TurnIndicatorRight:\t" + EnumLampStatus(self.turn_indicator_right).name)
# print("-> BCM_DriverDoorStatus:\t" + EnumDoorStatus(self.driver_door_status).name)
# print("-> BCM_PassengerDoorStatus:\t" + EnumDoorStatus(self.passenger_door_status).name)
# print("-> BCM_LeftRearDoorStatus:\t" + EnumDoorStatus(self.left_rear_door_status).name)
# print("-> BCM_RightRearDoorStatus:\t" + EnumDoorStatus(self.right_rear_door_status).name)
# print("-> BCM_TailgateStatus:\t\t" + EnumDoorStatus(self.tailgate_status).name)
# print("-> BCM_DriverDoorLockStatus:" + EnumLockStatus(self.driver_door_lock_status).name)
# print("-> BCM_HandbrakeSignal:\t\t" + EnumHandbrakeStatus(self.handbrake_signal).name)
# print("-> BCM_FindCarValid:\t\t" + EnumFindCarStatus(self.find_car_valid).name)
class Ems360(CanMsgBasic):
""" 发动机管理系统 """
def __init__(self):
super(Ems360, self).__init__('EMS_360',
EnumMsgType.Normal,
0x360,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# MIL指示灯
self.__mil = 0
# 防盗认证结果
self.__release_sig = 0
# 瞬时油耗
self.__fuel_consumption = 0
@property
def fuel_consumption(self):
""" 瞬时油耗 """
return float(self.__fuel_consumption * 0.217)
@fuel_consumption.setter
def fuel_consumption(self, value):
""" 瞬时油耗 """
try:
if not isinstance(value, float):
raise AttributeError
self.__fuel_consumption = 0xFFFF if value < 0.0 or value > 14220.878 else int(value / 0.217)
except AttributeError:
print("AttributeError on acc_pedal")
def encode(self):
# MIL指示灯
self._msg_data[3] = hex(self.__mil << (24 % 8))
# 防盗认证结果
self._msg_data[5] = hex(self.__release_sig << (43 % 8))
# 瞬时油耗
self._msg_data[6] = hex(self.__fuel_consumption >> 8)
self._msg_data[7] = hex(self.__fuel_consumption % 256)
return self._msg_data
def dump(self):
super(Ems360, self).dump()
class Bcm365(CanMsgBasic):
""" 车身控制器 """
@unique
class ValidInvalidStatus(Enum):
Invalid = 0
Valid = 1
def __init__(self):
super(Bcm365, self).__init__('BCM_365',
EnumMsgType.Normal,
0x365,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# ESCL电源请求的响应信号
self.__escl_power_resp = 0
# ESCL解锁信号反馈
self.__escl_unlock_feedback = 0
# 电源继电器输出状态
self.__power_relay_output_status = 0
# 点火信号状态
self.__ignition_status = 0
# 雨刷状态
self.__wiper_status = 0
# 前挡风玻璃洗涤喷水信号
self.__sprinkler_status = 0
# 前挡风玻璃洗涤喷水信号有效标志
self.__sprinkler_status_valid = 0
# 后除霜状态
self.__rear_defrost_status = 0
# 后除霜状态有效标志位
self.__rear_defrost_status_valid = 0
# 外后视镜折叠状态
self.__exterior_mirror_elec_flod_status = 0
# 车身防盗状态
self.__vehicle_antt_status = 0
@property
def rear_defrost_status(self):
""" 后除霜状态 """
return self.__rear_defrost_status
@rear_defrost_status.setter
def rear_defrost_status(self, status):
""" 后除霜状态 """
try:
if status not in DefrostStatus.CanStatus:
self.__rear_defrost_status_valid = Bcm365.ValidInvalidStatus.Invalid.value
raise AttributeError
self.__rear_defrost_status = status.value
self.__rear_defrost_status_valid = Bcm365.ValidInvalidStatus.Valid.value
except AttributeError:
print("AttributeError on rear_defrost_status")
@property
def wiper_status(self):
""" 雨刷状态 """
return self.__wiper_status
@wiper_status.setter
def wiper_status(self, status):
""" 雨刷状态 """
try:
if status not in WiperStatus.CanStatus:
raise AttributeError
self.__wiper_status = status.value
except AttributeError:
print("AttributeError on wiper_status")
def encode(self):
# ESCL电源请求的响应信号 + ESCL解锁信号反馈 + 电源继电器输出状态
self._msg_data[0] = hex((self.__escl_power_resp << 0) |
(self.__escl_unlock_feedback << 2) |
(self.__power_relay_output_status << 4))
# 点火信号状态 + 雨刷状态 + 前挡风玻璃洗涤喷水信号 + 前挡风玻璃洗涤喷水信号有效标志
self._msg_data[1] = hex((self.__ignition_status << (8 % 8)) |
(self.__wiper_status << (11 % 8)) |
(self.__sprinkler_status << (14 % 8)) |
(self.__sprinkler_status_valid << (15 % 8)))
# 后除霜状态 + 后除霜状态有效标志
self._msg_data[2] = hex((self.__rear_defrost_status << (16 % 8)) |
(self.__rear_defrost_status_valid << (17 % 8)) |
(self.__exterior_mirror_elec_flod_status << (18 % 8)) |
(self.__vehicle_antt_status << (20 % 8)))
return self._msg_data
def dump(self):
super(Bcm365, self).dump()
class Ac378(CanMsgBasic):
""" 空调 """
def __init__(self):
super(Ac378, self).__init__('AC_378',
EnumMsgType.Normal,
0x378,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# 当前环境温度(摄氏度)
self.__outside_ambient_temperature = 0
# 当前环境温度有效状态
self.__outside_ambient_temperature_valid = 0
# 空调系统中压信号
self.__pressure_status = 0
# 空调系统中压信号有效标志
self.__pressure_status_valid = 0
# 压缩机开关请求
self.__ac_request = 0
# 压缩机开关请求有效状态
self.__ac_request_valid = 0
# 鼓风机开关状态
self.__blower_on_off_status = 0
# 鼓风机开关状态有效标志
self.__blower_on_off_status_valid = 0
# 后除霜开关请求
self.__rear_defrost_request = 0
# 后除霜开关请求有效标志
self.__rear_defrost_request_valid = 0
# 按键或旋钮操作导致空调控制器状态发生变化时,需向DVD请求显示变更(此标志维持的时间为:100ms,即空调控制器只需发一次)
self.__display_active = 0
# AC Max状态
self.__ac_max_mode = 0
# 设置温度
self.__set_temperature = 0
# 鼓风机当前档位
self.__blower_speed_level = 0
# 出风模式
self.__air_distribute_mode = 0
# 前除霜状态
self.__defrost_mode = 0
# 内外循环状态
self.__air_let_mode = 0
# Auto模式状态
self.__auto_mode = 0
# Off状态
self.__on_off_state = 0
# Rear状态
self.__rear_mode = 0
# AC工作指示灯
self.__ac_indicator = 0
@property
def set_temperature(self):
""" 设置温度 """
return float(self.__set_temperature * 0.5)
@set_temperature.setter
def set_temperature(self, value):
""" 设置温度 """
try:
if not isinstance(value, float):
raise AttributeError
# self.__set_temperature = int('7F', 16) if value < 17.0 or value > 32.0 else int(value / 0.5)
self.__set_temperature = int(value / 0.5)
except AttributeError:
print("AttributeError on set_temperature")
@property
def defrost_mode(self):
""" 前除霜状态 """
return self.__defrost_mode
@defrost_mode.setter
def defrost_mode(self, status):
""" 前除霜状态 """
try:
if status not in DefrostStatus.CanStatus:
raise AttributeError
self.__defrost_mode = status.value
except AttributeError:
print("AttributeError on defrost_mode")
@property
def on_off_state(self):
""" OnOff状态 """
return self.__on_off_state
@on_off_state.setter
def on_off_state(self, status):
""" OnOff状态 """
try:
if status not in AcStatus.CanStatus:
raise AttributeError
self.__on_off_state = status.value
except AttributeError:
print("AttributeError on on_off_state")
def encode(self):
# 当前环境温度(摄氏度)
self._msg_data[0] = hex(self.__outside_ambient_temperature)
# 当前环境温度有效状态 + 空调系统中压信号 + 空调系统中压信号有效状态 + 压缩机开关请求 + 压缩机开关请求有效状态 + 鼓风机开关状态 + 鼓风机开关状态有效状态
self._msg_data[1] = hex((self.__outside_ambient_temperature_valid << (8 % 8)) |
(self.__pressure_status << (9 % 8)) |
(self.__pressure_status_valid << (11 % 8)) |
(self.__ac_request << (12 % 8)) |
(self.__ac_request_valid << (13 % 8)) |
(self.__blower_on_off_status << (14 % 8)) |
(self.__blower_on_off_status_valid << (15 % 8)))
# 后除霜开关请求 + 后除霜开关请求有效标志位 + 按键或旋钮操作导致空调控制器状态发生变化 + AC MAX状态
self._msg_data[2] = hex((self.__rear_defrost_request << (20 % 8)) |
(self.__rear_defrost_request_valid << (21 % 8)) |
(self.__display_active << (22 % 8)) |
(self.__ac_max_mode << (23 % 8)))
# 设置温度
self._msg_data[3] = hex(self.__set_temperature << (24 % 8))
# 鼓风机当前档位 + 出风模式 + 前除霜状态
self._msg_data[4] = hex((self.__blower_speed_level << (32 % 8)) |
(self.__air_distribute_mode << (36 % 8)) |
(self.__defrost_mode << (39 % 8)))
# 内外循环状态 + Auto模式状态 + Off状态 + Rear状态 + AC工作指示灯
self._msg_data[5] = hex((self.__air_let_mode << (40 % 8)) |
(self.__auto_mode << (41 % 8)) |
(self.__on_off_state << (42 % 8)) |
(self.__rear_mode << (43 % 8)) |
(self.__ac_indicator << (44 % 8)))
return self._msg_data
def dump(self):
super(Ac378, self).dump()
class Ic380(CanMsgBasic):
""" 组合仪表 """
def __init__(self):
super(Ic380, self).__init__('IC_380',
EnumMsgType.Normal,
0x380,
EnumMsgTransmitType.Cycle,
EnumMsgSignalType.Cycle,
100,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# 驾驶位安全带状态
self.__driver_seat_belt_status = 0
# 驾驶位安全带状态有效位
self.__driver_seat_belt_status_valid = 0
# 总里程
self.__total_mileage = 0
@property
def total_mileage(self):
""" 总里程 """
return int(self.__total_mileage * 10)
@total_mileage.setter
def total_mileage(self, value):
""" 总里程 """
try:
if not isinstance(value, int):
raise AttributeError
self.__total_mileage = 0xFFFF if value < 0.0 or value > 655350 else int(value / 10)
except AttributeError:
print("AttributeError on total_mileage")
def encode(self):
# 驾驶位安全带状态 + 驾驶位安全带状态有效位
self._msg_data[0] = hex((self.__driver_seat_belt_status << 0) |
(self.__driver_seat_belt_status_valid << 1))
# 总里程
self._msg_data[1] = hex(self.__total_mileage >> 8)
self._msg_data[2] = hex(self.__total_mileage % 256)
return self._msg_data
def dump(self):
super(Ic380, self).dump()
class Bcm401(CanMsgBasic):
""" BCM网络管理报文 """
@unique
class NmStatus(Enum):
Inactive = 0
Active = 1
def __init__(self):
super(Bcm401, self).__init__('BCM_401',
EnumMsgType.NM,
0x401 - 0x400,
EnumMsgTransmitType.Event,
EnumMsgSignalType.Cycle,
200,
8,
['0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'])
# 近光灯工作状态
self.__destination_address = 0x1
self.__alive = 0x1
self.__ring = 0
self.__limp_home = 0
self.__sleep_indication = 0
self.__sleep_acknowledge = 0
def encode(self):
# 目标地址
self._msg_data[0] = hex(self.__destination_address << 0)
# Alive + Ring + LimpHome + SleepIndication + SleepAcknowledge
self._msg_data[1] = hex((self.__alive << (8 % 8)) |
(self.__ring << (9 % 8)) |
(self.__limp_home << (10 % 8)) |
(self.__sleep_indication << (12 % 8)) |
(self.__sleep_acknowledge << (13 % 8)))
return self._msg_data
def dump(self):
super(Bcm401, self).dump()
# print("-> BCM_NM_DestinationAddress:" + hex(self.destination_address))
# print("-> BCM_NM_Alive:\t\t\t " + EnumNmStatus(self.alive).name)
# print("-> BCM_NM_Ring:\t\t\t\t " + EnumNmStatus(self.ring).name)
# print("-> BCM_NM_LimpHome:\t\t\t " + EnumNmStatus(self.limp_home).name)
# print("-> BCM_NM_SleepIndication:\t " + EnumNmStatus(self.sleep_indication).name)
# print("-> BCM_NM_SleepAcknowledge:\t " + EnumNmStatus(self.sleep_acknowledge).name)
if __name__ == '__main__':
pass
|
import csv
import glob
from bs4 import BeautifulSoup
'''
Create CSV of crimes reported on each daily activity log page downloaded with sacpd-scraper.py
'''
my_writer = csv.writer(open('crimes.csv', 'wb'), quoting=csv.QUOTE_MINIMAL)
my_writer.writerow(['Number', 'Crime', 'Address', 'Time', 'url'])
def readFile(nameOfFile):
text = open(nameOfFile, 'r')
text = text.read()
return text
def scrapeSoup(soup, crime_count, url):
ids = ['#northContent', '#centralContent', '#southContent']
crime_count=0
for div_id in ids:
crime_grafs = soup.select(div_id)
for graf in crime_grafs:
crime_line = graf.select('p')
for line in crime_line:
crime_text = line.get_text()
if "12-" in crime_text:
print crime_text
crime_list = crime_text.split(',')
if len(crime_list) == 5:
number = crime_list[0]
crime = crime_list[1]
address = crime_list[2]
time = crime_list[3]
try:
my_writer.writerow([number, crime, address, time, url])
except:
my_writer.writerow(['', '', '', ''])
crime_count += 1
return crime_count
''' crime_text = crime_line.get_text()
crime_list = crime_text.split(',')
number = crime_list[0]
crime = crime_list[1]
address = crime_list[2]
time = crime_list[3]
try:
my_writer.writerow([number, crime, address, time, url])
except:
my_writer.writerow(['', '', '', ''])
return crime_count'''
count=0
for url in glob.glob("pages/view.*"):
soup = BeautifulSoup(readFile(url))
count += scrapeSoup(soup, count, url)
#scrapeSoup(soup, count, url)
print u'CRIME COUNT: %s' % (count)
|
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
import numpy as np
import sys
n = 2
def lane_emden(x, Y):
global n
y, z = Y
dydx = z
if x == 0:
dzdx = -y**n + (2/3)
else:
dzdx = -y**n - (2/x)*z
return dydx, dzdx
end_xi = 15
points = np.linspace(0, end_xi, 1000)
plt.style.use('seaborn')
linestyles = ['-', '--']
plt.figure(figsize=(10,16))
for n in np.linspace(2, 4, 7):
r = solve_ivp(lane_emden, (0, end_xi), (1, 0), t_eval = points)
mask = (r.y[0] >= 0) & (r.y[1] <= 1)
x = r.t[mask]
y = r.y[0][mask]
z = r.y[1][mask]
plt.subplot(211)
plt.plot(x, y, label=f'n={n:.1f}')
plt.subplot(212)
plt.plot(x, z, label=f'n={n:.1f}')
plt.subplot(211)
plt.xlabel(r'$x=\xi$')
plt.ylabel(r'$y=\theta_n$')
plt.subplot(212)
plt.xlabel(r'$x=\xi$')
plt.ylabel(r'$z=\frac{d\theta_n}{d\xi}$')
plt.legend(loc = 4)
plt.suptitle(r'y and z vs x for a range of n $\in$ [2,4]')
plt.savefig('prob7.4.pdf', format='pdf') |
import requests
import bs4
from collections import namedtuple
obj_to_sort= namedtuple('objj' , ['name', 'num'])
def return_num(objj):
return objj.num
f_url=open("url.txt","r")
f=open("site_info.txt","w", encoding='utf-8')
f_urls=open("site_urls.txt","w",encoding='utf-8')
f_tegs=open("site_tags_info.txt","w",encoding='utf-8')
f_words=open("site_words_info.txt","w",encoding='utf-8')
r=requests.get(f_url.read().strip())
f_url.close()
tags_list='''
!--...-- !DOCTYPE a abbr acronym address applet area article aside audio b base basefont bdi bdo big
blockquote body br button canvas caption center Defines cite code col colgroup data datalist dddetails
dfn dialog dir div dl dt em embed fieldset figcaption figure font footer form frame frameset
h1 h2 h3 h4 h5 h6 head header hr html i iframe img input ins kbd label legend li link main map
mark meta meter nav noframes noscript object ol Defines an ordered list optgroup option output p param
picture pre progress q rp rt ruby s samp script section select small source span strike Defines
strong style subsummary sup svg table tbody tdtemplate textarea tfoot th thead time title tr track
tt u ul var video wbr'''
print(r.ok," ",r.status_code)
soup=bs4.BeautifulSoup(r.text,"html.parser")
f.write("total url number :"+str(len(soup.find_all('a',href=True)))+"\n")
f.write("total img number :"+str(len(soup.find_all('img')))+"\ntext : \n")
f.write(soup.get_text())
for i in soup.find_all('a',href=True):
f_urls.write(i["href"]+"\n")
tags__=[]
for tag_ in tags_list.split():
if(len(soup.find_all(tag_))!=0):
tags__.append(obj_to_sort(tag_,len(soup.find_all(tag_))))
tags__.sort(key=return_num)
tags__.reverse()
for tag_ in tags__ :
f_tegs.write(tag_.name+": "+str(tag_.num)+"\n")
words_=set(soup.get_text().split())
words=soup.get_text().split()
words__=[]
for i in words_:
words__.append(obj_to_sort(i,words.count(i)))
words__.sort(key=return_num)
words__.reverse()
for word_ in words__ :
f_words.write(word_.name+": "+str(word_.num)+"\n")
f.close()
f_urls.close()
f_tegs.close()
f_words.close() |
from zitarice.solids.hello_cereals import hello_cereal
from dagster import pipeline
from dagster import execute_pipeline
@pipeline
def hello_cereal_pipeline():
hello_cereal()
if __name__ == "__main__":
result = execute_pipeline(hello_cereal_pipeline)
|
from rest_framework import generics
import core.models
from .serializers import CountrySerializer
class CountriesAPIView(generics.ListAPIView):
queryset = core.models.Country.objects.all()
serializer_class = CountrySerializer
|
# Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
# def __repr__(self):
# #return "({}, {})".format(self.start, self.end)
# return "(%d, %d) " %(self.start, self.end)
class Solution:
# @param intervals, a list of Intervals
# @param new_interval, a Interval
# @return a list of Interval
def insert(self, intervals, new_interval):
if new_interval.start > new_interval.end:
new_interval.start, new_interval.end = new_interval.end, new_interval.start
interval_inserted = False
merged = []
for interval in intervals:
if not interval_inserted:
for i1, i2 in [[interval, new_interval], [new_interval, interval]]:
if (i1.start >= i2.start and i1.start <= i2.end) or (i1.end >= i2.start and i1.end <= i2.end):
interval.start, interval.end = min(interval.start, new_interval.start), max(interval.end, new_interval.end)
interval_inserted = True
break
merged.append(interval)
else:
if merged and (interval.start >= merged[-1].start and interval.start <= merged[-1].end) or (merged[-1].end >= interval.start and merged[-1].end <= interval.end):
merged[-1].start, merged[-1].end = min(interval.start, merged[-1].start), max(interval.end, merged[-1].end)
else:
merged.append(interval)
if not interval_inserted:
merged.append(new_interval)
return sorted(merged, key=lambda x: x.start)
|
#!/usr/bin/python3
from .base_model import BaseModel
"""
Here we will be creating a class User that inherits from BaseModel
"""
class User(BaseModel):
"""
Creating a class named User
"""
email = ""
password = ""
first_name = ""
last_name = ""
def __init__(self, *args, **kwargs):
"""
super will initialize and inherit from the parent class(Base_model)/\
keyworded arguments
"""
super().__init__(**kwargs)
|
def func(**data):
print(data)
data = {"a":1, "b":2}
func(**data) |
print("Your Name has" , len(input("Enter your name\n")) , "Characters") |
from django.apps import AppConfig
class EncuestasConfig(AppConfig):
name = 'encuestas'
def ready(self):
import encuestas.signals |
# -*- coding: utf8 -*-
import os.path
DATA_DIR = os.path.dirname(__file__) + '/data' |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from qa.views import test, new_questions, popular_questions, one_question, ask, answer, signup, login
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'ask.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', new_questions, name='home'),
url(r'login/$', login, name='login'),
url(r'signup/$', signup, name='signup'),
url(r'ask/$', ask, name='ask'),
url(r'answer/$', answer, name='anwer'),
url(r'popular/$', popular_questions, name='popular'),
url(r'new/$', new_questions, name='new'),
url(r'question/(?P<id>\d+)/$', one_question, name='question'),
url(r'^admin/', include(admin.site.urls)),
)
|
import cv2
import numpy as np
import types
from google.colab.patches import cv2_imshow
# converting types to binary
def msg_to_bin(msg):
if type(msg) == str:
return ''.join([format(ord(i), "08b") for i in msg])
elif type(msg) == bytes or type(msg) == np.ndarray:
return [format(i, "08b") for i in msg]
elif type(msg) == int or type(msg) == np.uint8:
return format(msg, "08b")
else:
raise TypeError("Input type not supported")
# defining function to hide the secret message into the image
def hide_data(img, secret_msg):
# calculating the maximum bytes for encoding
nBytes = img.shape[0] * img.shape[1] * 3 // 8
print("Maximum Bytes for encoding:", nBytes)
# checking whether the number of bytes for encoding is less
# than the maximum bytes in the image
if len(secret_msg) > nBytes:
raise ValueError("Error encountered insufficient bytes, need bigger image or less data!!")
secret_msg += '#####' # we can utilize any string as the delimiter
dataIndex = 0
# converting the input data to binary format using the msg_to_bin() function
bin_secret_msg = msg_to_bin(secret_msg)
# finding the length of data that requires to be hidden
dataLen = len(bin_secret_msg)
for values in img:
for pixels in values:
# converting RGB values to binary format
r, g, b = msg_to_bin(pixels)
# modifying the LSB only if there is data remaining to store
if dataIndex < dataLen:
# hiding the data into LSB of Red pixel
pixels[0] = int(r[:-1] + bin_secret_msg[dataIndex], 2)
dataIndex += 1
if dataIndex < dataLen:
# hiding the data into LSB of Green pixel
pixels[1] = int(g[:-1] + bin_secret_msg[dataIndex], 2)
dataIndex += 1
if dataIndex < dataLen:
# hiding the data into LSB of Blue pixel
pixels[2] = int(b[:-1] + bin_secret_msg[dataIndex], 2)
dataIndex += 1
# if data is encoded, break out the loop
if dataIndex >= dataLen:
break
return img
def show_data(img):
bin_data = ""
for values in img:
for pixels in values:
# converting the Red, Green, Blue values into binary format
r, g, b = msg_to_bin(pixels)
# data extraction from the LSB of Red pixel
bin_data += r[-1]
# data extraction from the LSB of Green pixel
bin_data += g[-1]
# data extraction from the LSB of Blue pixel
bin_data += b[-1]
# split by 8-Bits
allBytes = [bin_data[i: i + 8] for i in range(0, len(bin_data), 8)]
# converting from bits to characters
decodedData = ""
for bytes in allBytes:
decodedData += chr(int(bytes, 2))
# checking if we have reached the delimiter which is "#####"
if decodedData[-5:] == "#####":
break
# print(decodedData)
# removing the delimiter to display the actual hidden message
return decodedData[:-5]
# defining function to encode data into Image
def encodeText():
img_name = input("Enter image name (with extension): ")
# reading the input image using OpenCV-Python
img = cv2.imread(img_name)
# printing the details of the image
print("The shape of the image is: ", img.shape) # checking the image shape to calculate the number of bytes in it
print("The original image is as shown below: ")
# resizing the image as per the need
resizedImg = cv2.resize(img, (500, 500))
# displaying the image
cv2_imshow(resizedImg)
data = input("Enter data to be encoded: ")
if (len(data) == 0):
raise ValueError('Data is Empty')
file_name = input("Enter the name of the new encoded image (with extension): ")
# calling the hide_data() function to hide the secret message into the selected image
encodedImage = hide_data(img, data)
cv2.imwrite(file_name, encodedImage)
# defining the function to decode the data in the image
def decodeText():
# reading the image containing the hidden image
img_name = input("Enter the name of the Steganographic image that has to be decoded (with extension): ")
img = cv2.imread(img_name) # reading the image using the imread() function
print("The Steganographic image is as follow: ")
resizedImg = cv2.resize(img, (500, 500)) # resizing the actual image as per the needs
cv2_imshow(resizedImg) # displaying the Steganographic image
text = show_data(img)
return text
# image steganography
def steganography():
n = int(input("Image Steganography \n1. Encode the data \n2. Decode the data \n Select the option: "))
if (n == 1):
print("\nEncoding...")
encodeText()
elif (n == 2):
print("\nDecoding...")
print("Decoded message is " + decodeText())
else:
raise Exception("Inserted value is incorrect!")
steganography()
|
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
class DistributedLockDoorAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLockDoorAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air) |
from flask_frozen import Freezer
from flask import Flask, render_template, request
import json
app = Flask(__name__)
freezer = Freezer(app)
app.config["FREEZER_DESTINATION_IGNORE"] = ["CNAME"]
app.config["FREEZER_STATIC_IGNORE"] = ["*.scss"]
app.config["FREEZER_DESTINATION"] = "docs"
app.debug = True
@app.route("/")
def index():
post_properties = {}
with open("posts.json", "r") as f:
posts = json.load(f)
post_properties = posts["index"]
post_properties["url"] = request.path
return render_template("index.html", **post_properties)
@app.route("/summer-of-protocols-resume.html")
def sop_resume():
description = "The resume of Matt Parrilla for the Summer of Protocols Core Researcher. Matt wants to create a protocol for connecting a wallet to a physical location."
return render_template("sop_resume.html", url=request.path)
@app.route("/post/<title>/index.html")
def post(title):
post_properties = {}
with open("posts.json", "r") as f:
posts = json.load(f)
post_properties = posts[title]
post_properties["url"] = request.path
if not post_properties.get("title", False):
raise ValueError("Missing title entry for {}".format(title))
if not post_properties.get("description", False):
raise ValueError("Missing description entry for {}".format(title))
return render_template("{}.html".format(title), **post_properties)
@freezer.register_generator
def post():
with open("posts.json", "r") as f:
posts = json.load(f)
for title in posts.keys():
if title != "index":
yield {"title": title}
if __name__ == "__main__":
freezer.freeze()
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
def main(request):
template = loader.get_template('polls/main.html')
context = {
'latest_question_list': "test",
}
return HttpResponse(template.render(context, request))
def login(request):
template = loader.get_template('polls/login.html')
context = {
'latest_question_list': "test",
}
return HttpResponse(template.render(context, request))
def qna(request):
template = loader.get_template('polls/qna.html')
context = {
'latest_question_list': "test",
}
return HttpResponse(template.render(context, request))
# Create your views here.
|
import zmq, time
import numpy as np
import copy
import sys, json, pdb, pickle, operator, collections
import inflect
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
import argparse
from random import shuffle
from operator import itemgetter
import matplotlib.pyplot as plt
from nltk.tokenize import word_tokenize
#from classifier import Classifier
import constants
import re
DEBUG = False
ANALYSIS = False
COUNT_ZERO = False
#Global variables
int2tags = constants.int2tags
NUM_RELATIONS = len(int2tags)
# NUM_QUERY_TYPES = NUM_RELATIONS + 1
NUM_QUERY_TYPES = 1
WORD_LIMIT = 1000
CONTEXT_LENGTH = 3
CONTEXT_TYPE = None
STATE_SIZE = 4 * NUM_RELATIONS + 2 * CONTEXT_LENGTH * NUM_RELATIONS
STOP_ACTION = NUM_RELATIONS
IGNORE_ALL = STOP_ACTION + 1
ACCEPT_ALL = 999 #arbitrary
trained_model = None
tfidf_vectorizer = TfidfVectorizer()
inflect_engine = inflect.engine()
def dd():
return {}
def ddd():
return collections.defaultdict(dd)
TRAIN_COSINE_SIM = collections.defaultdict(dd)
TRAIN_ENTITIES = collections.defaultdict(ddd)
TRAIN_CONFIDENCES = collections.defaultdict(ddd)
TRAIN_CONTEXT = collections.defaultdict(ddd) #final value will be a vector
TEST_COSINE_SIM = collections.defaultdict(dd)
TEST_ENTITIES = collections.defaultdict(ddd)
TEST_CONFIDENCES = collections.defaultdict(ddd)
TEST_CONTEXT = collections.defaultdict(ddd) #final value will be a vector
CORRECT = collections.defaultdict(lambda:0.)
GOLD = collections.defaultdict(lambda:0.)
PRED = collections.defaultdict(lambda:0.)
EVALCONF = collections.defaultdict(lambda:[])
EVALCONF2 = collections.defaultdict(lambda:[])
QUERY = collections.defaultdict(lambda:0.)
ACTION = collections.defaultdict(lambda:0.)
CHANGES = 0
evalMode = False
STAT_POSITIVE, STAT_NEGATIVE = 0, 0 #stat. sign.
CONTEXT = None
def splitBars(w):
return [q.strip() for q in w.split('|')]
#Environment for each episode
class Environment:
def __init__(self, newArticles, goldEntities, indx, args, evalMode):
self.indx = indx
# self.originalArticle = originalArticle
self.newArticles = newArticles #extra articles to process
self.goldEntities = goldEntities
self.ignoreDuplicates = args.ignoreDuplicates
self.entity = args.entity
self.aggregate = args.aggregate
self.delayedReward = args.delayedReward
self.shooterLenientEval = args.shooterLenientEval
self.listNum = 0 #start off with first list
self.rlbasicEval = args.rlbasicEval
self.rlqueryEval = args.rlqueryEval
self.shuffledIndxs = [range(len(q)) for q in self.newArticles]
if not evalMode and args.shuffleArticles:
for q in self.shuffledIndxs:
shuffle(q)
self.state = [0 for i in range(STATE_SIZE)]
self.terminal = False
self.bestEntities = collections.defaultdict(lambda:'') #current best entities
self.bestConfidences = collections.defaultdict(lambda:0.)
for i in range(NUM_RELATIONS):
self.bestEntities[i] = 'NA'
self.bestConfidences[i] = 0.02
self.bestEntitySet = None
if self.aggregate == 'majority':
self.bestEntitySet = collections.defaultdict(lambda:[])
self.bestIndex = (0,0)
self.prevListNum = 0
self.prevArticleIndx = 0
# to keep track of extracted values from previousArticle
# start off with list 0 always
###################################
# if 0 in ENTITIES[self.indx][0]:
if len(ENTITIES[self.indx][0])>0:
####
self.prevEntities, self.prevConfidences = ENTITIES[self.indx][0][0], CONFIDENCES[self.indx][0][0]
# else:
# self.prevEntities, self.prevConfidences = self.extractEntitiesWithConfidences(self.originalArticle)
# ENTITIES[self.indx][0][0] = self.prevEntities
# CONFIDENCES[self.indx][0][0] = self.prevConfidences
#store the original entities before updating state
self.originalEntities = self.prevEntities
#calculate tf-idf similarities using all the articles related to the original
# self.allArticles = [originalArticle] + [item for sublist in self.newArticles for item in sublist]
# self.allArticles = [' '.join(q) for q in self.allArticles]
############################################
# if self.indx not in COSINE_SIM:
# # self.tfidf_matrix = TFIDF_MATRICES[0][self.indx]
# self.tfidf_matrix = tfidf_vectorizer.fit_transform(self.allArticles)
# cnt = 0
# for listNum, sublist in enumerate(self.newArticles):
# COSINE_SIM[self.indx][listNum] = cosine_similarity(self.tfidf_matrix[0:1], self.tfidf_matrix[cnt:cnt+len(sublist)])[0]
# pdb.set_trace()
# cnt += len(sublist)
############################################
#update the initial state
self.stepNum = [0 for q in range(NUM_QUERY_TYPES)]
# self.updateState(ACCEPT_ALL, 1, self.ignoreDuplicates)
self.updateState(IGNORE_ALL, 1, self.ignoreDuplicates)
return
# def extractEntitiesWithConfidences(self, article):
# #article is a list of words
# joined_article = ' '.join(article)
# pred, conf_scores, conf_cnts = predict.predictWithConfidences(trained_model, joined_article, False, helper.cities)
#
# for i in range(len(conf_scores)):
# if conf_cnts[i] > 0:
# conf_scores[i] /= conf_cnts[i]
#
# return pred.split(' ### '), conf_scores
#find the article similarity between original and newArticle[i] (=allArticles[i+1])
def articleSim(self, indx, listNum, i):
# return cosine_similarity(self.tfidf_matrix[0:1], self.tfidf_matrix[i+1:i+2])[0][0]
return COSINE_SIM[indx][listNum][i]
# update the state based on the decision from DQN
def updateState(self, action, query, ignoreDuplicates=False):
global CONTEXT, CONTEXT_TYPE
#use query to get next article
articleIndx = None
if self.rlbasicEval:
#ignore the query decision from the agent
listNum = self.listNum
self.listNum += 1
if self.listNum == NUM_QUERY_TYPES: self.listNum = 0
else:
listNum = query-1 #convert from 1-based to 0-based
if self.rlqueryEval:
#set the reconciliation action
action = ACCEPT_ALL
if ignoreDuplicates:
nextArticle = None
while not nextArticle and self.stepNum[listNum] < len(self.newArticles[listNum]):
articleIndx = self.shuffledIndxs[listNum][self.stepNum]
if self.articleSim(self.indx, listNum, articleIndx) < 0.95:
nextArticle = self.newArticles[listNum][articleIndx]
else:
self.stepNum[listNum] += 1
else:
#get next article
if self.stepNum[listNum] < len(self.newArticles[listNum]):
articleIndx = self.shuffledIndxs[listNum][self.stepNum[listNum]]
nextArticle = self.newArticles[listNum][articleIndx]
else:
nextArticle = None
if action != STOP_ACTION:
# integrate the values into the current DB state
entities, confidences = self.prevEntities, self.prevConfidences
# all other tags
####################################
# todo: only one relation not one entity
for i in range(NUM_RELATIONS):
if action != ACCEPT_ALL and i != action: continue #only perform update for the entity chosen by agent
self.bestIndex = (self.prevListNum, self.prevArticleIndx) #analysis
if self.aggregate == 'majority':
self.bestEntitySet[i].append((entities[i], confidences[i]))
self.bestEntities[i], self.bestConfidences[i] = self.majorityVote(self.bestEntitySet[i])
else:
if i==0:
#handle shooterName - add to list or directly replace
if not self.bestEntities[i]:
self.bestEntities[i] = entities[i]
self.bestConfidences[i] = confidences[i]
elif self.aggregate == 'always' or confidences[i] > self.bestConfidences[i]:
self.bestEntities[i] = entities[i] #directly replace
# self.bestEntities[i] = self.bestEntities[i] + '|' + entities[i] #add to list
self.bestConfidences[i] = confidences[i]
else:
if not self.bestEntities[i] or self.aggregate == 'always' or confidences[i] > self.bestConfidences[i]:
self.bestEntities[i] = entities[i]
self.bestConfidences[i] = confidences[i]
# print "Changing best Entities"
# print "New entities", self.bestEntities
if DEBUG:
print "entitySet:", self.bestEntitySet
########
if nextArticle and action != STOP_ACTION:
assert(articleIndx != None)
##########
# if (articleIndx+1) in ENTITIES[self.indx][listNum]:
# entities, confidences = ENTITIES[self.indx][listNum][articleIndx+1], CONFIDENCES[self.indx][listNum][articleIndx+1]
entities, confidences = ENTITIES[self.indx][listNum][articleIndx], CONFIDENCES[self.indx][listNum][articleIndx]
# else:
# entities, confidences = self.extractEntitiesWithConfidences(nextArticle)
# ENTITIES[self.indx][listNum][articleIndx+1], CONFIDENCES[self.indx][listNum][articleIndx+1] = entities, confidences
##########
assert(len(entities) == len(confidences))
else:
# print "No next article"
entities, confidences = [""] * NUM_RELATIONS, [0] * NUM_RELATIONS
self.terminal = True
#modify self.state appropriately
# print(self.bestEntities, entities)
#############
if constants.mode == 'Shooter':
matches = map(self.checkEquality, self.bestEntities.values()[1:-1], entities[1:-1]) # map() is a high order function, the first parameter is a pointer of a function
matches.insert(0, self.checkEqualityShooter(self.bestEntities.values()[0], entities[0]))
matches.append(self.checkEqualityCity(self.bestEntities.values()[-1], entities[-1]))
elif constants.mode == "DS":
matches = map(self.checkEquality, self.bestEntities.values(), entities)
elif constants.mode == 'DS02':
matches = map(self.checkEquality, self.bestEntities.values(), entities)
else:
matches = map(self.checkEqualityShooter, self.bestEntities.values(), entities)
#######
# pdb.set_trace()
self.state = [0 for i in range(STATE_SIZE)]
for i in range(NUM_RELATIONS):
self.state[i] = self.bestConfidences[i] #DB state
self.state[NUM_RELATIONS + i] = confidences[i] #IMP: (original) next article state
matchScore = float(matches[i])
if matchScore > 0:
self.state[2 * NUM_RELATIONS + i] = 1
else:
self.state[3 * NUM_RELATIONS + i] = 1
# self.state[2*NUM_ENTITIES+i] = float(matches[i])*confidences[i] if float(matches[i])>0 else -1*confidences[i]
# if nextArticle:
# # print self.indx, listNum, articleIndx
# # print COSINE_SIM[self.indx][listNum]
# self.state[4 * NUM_RELATIONS] = self.articleSim(self.indx, listNum, articleIndx)
# else:
# self.state[4 * NUM_RELATIONS] = 0
#selectively mask states
if self.entity != NUM_RELATIONS:
for j in range(NUM_RELATIONS):
if j != self.entity:
self.state[j] = 0
self.state[NUM_RELATIONS + j] = 0
#TODO: mask matches
#add in context information
if nextArticle and CONTEXT_TYPE != 0:
#######################################
j = 4 * NUM_RELATIONS + 1
self.state[j:j + 2 * CONTEXT_LENGTH] = CONTEXT[self.indx][listNum][articleIndx][0]
j += 2 * CONTEXT_LENGTH
self.state[j:j + 2 * CONTEXT_LENGTH] = CONTEXT[self.indx][listNum][articleIndx][1]
# for q in range(NUM_ENTITIES):
# if self.entity == NUM_ENTITIES or self.entity == q:
# self.state[j:j+2*CONTEXT_LENGTH] = CONTEXT[self.indx][listNum][articleIndx+1][q]
# j += 2*CONTEXT_LENGTH
########################################
# pdb.set_trace()
#update state variables
self.prevEntities = entities
self.prevConfidences = confidences
self.prevListNum = listNum
self.prevArticleIndx = articleIndx
return
# check if two entities are equal. Need to handle city
def checkEquality(self, e1, e2):
# if gold is unknown, then dont count that
return e2!='' and (COUNT_ZERO or e2 != 'zero') and e1.lower() == e2.lower()
def checkEqualityShooter(self, e1, e2):
if e2 == '' or e2=='unknown': return 0.
gold = set(splitBars(e2.lower()))
pred = set(splitBars(e1.lower()))
correct = len(gold.intersection(pred))
prec = float(correct)/len(pred)
rec = float(correct)/len(gold)
if self.shooterLenientEval:
if correct > 0:
return 1.
else:
return 0.
else:
if prec+rec > 0:
f1 = (2*prec*rec)/(prec+rec)
else:
f1 = 0.
return f1
def checkEqualityCity(self, e1, e2):
return e2!='' and e1.lower() == e2.lower()
def calculateReward(self, oldEntities, newEntities):
if constants.mode == 'Shooter':
rewards = [int(self.checkEquality(newEntities[1], self.goldEntities[1])) - int(self.checkEquality(oldEntities[1], self.goldEntities[1])),
int(self.checkEquality(newEntities[2], self.goldEntities[2])) - int(self.checkEquality(oldEntities[2], self.goldEntities[2]))]
#add in shooter reward
if self.goldEntities[0]:
rewards.insert(0, self.checkEqualityShooter(newEntities[0], self.goldEntities[0]) \
- self.checkEqualityShooter(oldEntities[0], self.goldEntities[0]))
else:
rewards.insert(0, 0.)
# add in city reward
rewards.append(self.checkEqualityCity(newEntities[-1], self.goldEntities[-1]) \
- self.checkEqualityCity(oldEntities[-1], self.goldEntities[-1]))
##########################################
elif constants.mode == 'DS':
rewards = [int(self.checkEquality(newEntities[0], self.goldEntities[0])) - int(self.checkEquality(oldEntities[0], self.goldEntities[0]))]
elif constants.mode == 'DS02':
rewards = []
for i in range(NUM_RELATIONS):
newAcc = int(self.checkEquality(newEntities[i], self.goldEntities[i]))
oldAcc = int(self.checkEquality(oldEntities[i], self.goldEntities[i]))
rewards.append(newAcc-oldAcc)
##########################################
else:
rewards = []
for i in range(len(newEntities)):
if self.goldEntities[i] != 'unknown':
rewards.append(self.checkEqualityShooter(newEntities[i], self.goldEntities[i]) - self.checkEqualityShooter(oldEntities[i], self.goldEntities[i]))
else:
rewards.append(0.)
if self.entity == NUM_RELATIONS:
return sum(rewards)
else:
return rewards[self.entity]
def calculateStatSign(self, oldEntities, newEntities):
if constants.mode == 'Shooter':
rewards = [int(self.checkEquality(newEntities[1], self.goldEntities[1])) - int(self.checkEquality(oldEntities[1], self.goldEntities[1])),
int(self.checkEquality(newEntities[2], self.goldEntities[2])) - int(self.checkEquality(oldEntities[2], self.goldEntities[2]))]
#add in shooter reward
if self.goldEntities[0]:
rewards.insert(0, self.checkEqualityShooter(newEntities[0], self.goldEntities[0]) \
- self.checkEqualityShooter(oldEntities[0], self.goldEntities[0]))
else:
rewards.insert(0, 0.)
# add in city reward
rewards.append(self.checkEqualityCity(newEntities[-1], self.goldEntities[-1]) \
- self.checkEqualityCity(oldEntities[-1], self.goldEntities[-1]))
else:
rewards = []
for i in range(len(newEntities)):
if self.goldEntities[i] != 'unknown':
rewards.append(self.checkEqualityShooter(newEntities[i], self.goldEntities[i]) - self.checkEqualityShooter(oldEntities[i], self.goldEntities[i]))
else:
rewards.append(0.)
return rewards
def myCalculateStatSign(self, oldEntities, newEntities):
rewards = [int(self.checkEquality(newEntities[0], self.goldEntities[0])) - int(self.checkEquality(oldEntities[0], self.goldEntities[0]))]
return rewards
def myevaluate(self, predEntities, goldEntities, evalOutFile):
gold = goldEntities[0]
pred = predEntities[0]
if gold != 'NA':
GOLD[gold] += 1
if pred != 'NA':
if gold == pred:
CORRECT[pred] += 1
PRED[pred] += 1
if evalOutFile:
evalOutFile.write("--------------------\n")
evalOutFile.write("Gold: "+str(gold)+"\n")
evalOutFile.write("Pred: "+str(pred)+"\n")
# evalOutFile.write("Correct: "+str(correct)+"\n")
def myevaluate02(self, predEntities, goldEntities, evalOutFile):
for i in range(NUM_RELATIONS):
gold = goldEntities[i]
pred = predEntities[i]
if gold != 'NA':
GOLD[gold] += 1
if pred != 'NA':
if gold == pred:
CORRECT[pred] += 1
PRED[pred] += 1
if evalOutFile:
evalOutFile.write("--------------------\n")
evalOutFile.write(str(i)+"\n")
evalOutFile.write("Gold: "+str(gold)+"\n")
evalOutFile.write("Pred: "+str(pred)+"\n")
# evalOutFile.write("Correct: "+str(correct)+"\n")
#evaluate the bestEntities retrieved so far for a single article
#IMP: make sure the evaluate variables are properly re-initialized
def evaluateArticle(self, predEntities, goldEntities, shooterLenientEval, shooterLastName, evalOutFile):
# print "Evaluating article", predEntities, goldEntities
if constants.mode == 'Shooter':
#shooterName first: only add this if gold contains a valid shooter
if goldEntities[0]!='':
if shooterLastName:
gold = set(splitBars(goldEntities[0].lower())[-1:])
else:
gold = set(splitBars(goldEntities[0].lower()))
pred = set(splitBars(predEntities[0].lower()))
correct = len(gold.intersection(pred))
if shooterLenientEval:
CORRECT[int2tags[0]] += (1 if correct> 0 else 0)
GOLD[int2tags[0]] += (1 if len(gold) > 0 else 0)
PRED[int2tags[0]] += (1 if len(pred) > 0 else 0)
else:
CORRECT[int2tags[0]] += correct
GOLD[int2tags[0]] += len(gold)
PRED[int2tags[0]] += len(pred)
#all other tags
for i in range(1, NUM_RELATIONS):
if COUNT_ZERO or goldEntities[i] != 'zero':
# gold = set(goldEntities[i].lower().split())
# pred = set(predEntities[i].lower().split())
# correct = len(gold.intersection(pred))
# GOLD[int2tags[i]] += len(gold)
# PRED[int2tags[i]] += len(pred)
GOLD[int2tags[i]] += 1
PRED[int2tags[i]] += 1
if predEntities[i].lower() == goldEntities[i].lower():
CORRECT[int2tags[i]] += 1
else:
#all other tags
for i in range(NUM_RELATIONS):
if goldEntities[i] != 'unknown':
#old eval
gold = set(splitBars(goldEntities[i].lower()))
pred = set(splitBars(predEntities[i].lower()))
# if 'unknown' in pred:
# pred = set()
correct = len(gold.intersection(pred))
if shooterLenientEval:
CORRECT[int2tags[i]] += (1 if correct> 0 else 0)
GOLD[int2tags[i]] += (1 if len(gold) > 0 else 0)
PRED[int2tags[i]] += (1 if len(pred) > 0 else 0)
else:
CORRECT[int2tags[i]] += correct
GOLD[int2tags[i]] += len(gold)
PRED[int2tags[i]] += len(pred)
# print i, pred, "###", gold, "$$$", correct
#new eval (Adam)
# pred = predEntities[i].lower()
# gold = goldEntities[i].lower()
# if pred in gold:
# CORRECT[int2tags[i]] += 1
# GOLD[int2tags[i]] += 1
# if pred != 'unknown':
# PRED[int2tags[i]] += 1
if evalOutFile:
evalOutFile.write("--------------------\n")
evalOutFile.write("Gold: "+str(gold)+"\n")
evalOutFile.write("Pred: "+str(pred)+"\n")
evalOutFile.write("Correct: "+str(correct)+"\n")
#TODO:use recall output for now. change this output later.
def oracleEvaluate(self, goldEntities, entityDic, confDic):
# the best possible numbers assuming that just the right information is extracted
# from the set of related articles
global PRED, GOLD, CORRECT, EVALCONF, EVALCONF2
bestPred, bestCorrect = collections.defaultdict(lambda:0.), collections.defaultdict(lambda:0.)
bestConf = collections.defaultdict(lambda:0.)
for stepNum, predEntitiesDic in entityDic.items():
for listNum, predEntities in predEntitiesDic.items():
if constants.mode == 'Shooter':
#shooterName first: only add this if gold contains a valid shooter
if goldEntities[0]!='':
gold = set(splitBars(goldEntities[0].lower()))
pred = set(splitBars(predEntities[0].lower()))
correct = 1. if len(gold.intersection(pred)) > 0 else 0
if correct > bestCorrect[int2tags[0]] or (correct == bestCorrect[int2tags[0]] and len(pred) < bestPred[int2tags[0]]):
# print "Correct: ", correct
# print "Gold:", gold
# print "pred:", pred
bestCorrect[int2tags[0]] = correct
bestPred[int2tags[0]] = 1 if len(pred) > 0 else 0.
bestConf[int2tags[0]] = confDic[stepNum][listNum][0]
if stepNum == 0 and listNum == 0:
GOLD[int2tags[0]] += (1 if len(gold) > 0 else 0)
if correct==0:
EVALCONF2[int2tags[0]].append(confDic[stepNum][listNum][0])
#all other tags
for i in range(1, NUM_RELATIONS):
if not COUNT_ZERO and goldEntities[i].lower() == 'zero': continue
gold = set(goldEntities[i].lower().split())
pred = set(predEntities[i].lower().split())
correct = len(gold.intersection(pred))
if correct > bestCorrect[int2tags[i]] or (correct == bestCorrect[int2tags[i]] and len(pred) < bestPred[int2tags[i]]):
bestCorrect[int2tags[i]] = correct
bestPred[int2tags[i]] = len(pred)
bestConf[int2tags[i]] = confDic[stepNum][listNum][i]
if stepNum == 0 and listNum == 0:
GOLD[int2tags[i]] += len(gold)
if correct==0:
EVALCONF2[int2tags[i]].append(confDic[stepNum][listNum][i])
else:
#EMA
for i in range(NUM_RELATIONS):
if goldEntities[i].lower() == 'unknown': continue
gold = set(splitBars(goldEntities[i].lower()))
pred = set(splitBars(predEntities[i].lower()))
correct = 1. if len(gold.intersection(pred)) > 0 else 0
if correct > bestCorrect[int2tags[i]] or (correct == bestCorrect[int2tags[i]] and len(pred) < bestPred[int2tags[i]]):
bestCorrect[int2tags[i]] = correct
bestPred[int2tags[i]] = 1 if len(pred) > 0 else 0
# bestPred[int2tags[i]] = 1 if 'unknown' not in pred else bestPred[int2tags[i]]
bestConf[int2tags[i]] = confDic[stepNum][listNum][i]
# print "Correct: ", correct
# print "Gold:", gold
# print "pred:", pred
if stepNum == 0 and listNum == 0:
GOLD[int2tags[i]] += 1 #len(gold)
if correct==0:
EVALCONF2[int2tags[i]].append(confDic[stepNum][listNum][i])
for i in range(NUM_RELATIONS):
PRED[int2tags[i]] += bestPred[int2tags[i]]
CORRECT[int2tags[i]] += bestCorrect[int2tags[i]]
EVALCONF[int2tags[i]].append(bestConf[int2tags[i]])
#TODO for EMA
def thresholdEvaluate(self, goldEntities, args):
#use a tf-idf threshold to select articles to extract from
# uses Majority aggregation
global PRED, GOLD, CORRECT, EVALCONF, EVALCONF2
global ENTITIES, CONFIDENCES
thres = args.threshold
bestPred, bestCorrect = collections.defaultdict(lambda:0.), collections.defaultdict(lambda:0.)
bestConf = collections.defaultdict(lambda:0.)
bestSim = 0.
bestEntities = ['','','','']
aggEntites = collections.defaultdict(lambda:collections.defaultdict(lambda:0.))
#add in the original entities
for i in range(NUM_RELATIONS):
aggEntites[i][self.bestEntities[i]] += 1.1
for listNum in range(len(self.newArticles)):
for i in range(len(self.newArticles[listNum])):
sim = self.articleSim(self.indx, listNum, i)
# print sim
# if sim > bestSim:
# bestSim = sim
# entities, confidences = self.extractEntitiesWithConfidences(self.newArticles[i])
# bestEntities = entities
# bestConf = confidences
# print bestSim
if sim > thres:
if (i+1) in ENTITIES[self.indx][listNum]:
entities, confidences = ENTITIES[self.indx][listNum][i+1], CONFIDENCES[self.indx][listNum][i+1]
else:
entities, confidences = self.extractEntitiesWithConfidences(self.newArticles[listNum][i])
for j in range(NUM_RELATIONS):
if entities[j] and entities[j] != 'unknown':
aggEntites[j][entities[j]] += 1
#choose the best entities now
for i in range(NUM_RELATIONS):
tmp = sorted(aggEntites[i].items(), key=itemgetter(1), reverse=True)
bestEntities[i] = tmp[0][0]
print i, tmp
# pdb.set_trace()
self.evaluateArticle(bestEntities, goldEntities, args.shooterLenientEval, args.shooterLastName, args.evalOutFile)
#TODO for EMA
def confEvaluate(self, goldEntities, args):
# use confidence-based aggregation over tf-idf similarity threshold used to select articles
global PRED, GOLD, CORRECT, EVALCONF, EVALCONF2
global ENTITIES, CONFIDENCES
thres = args.threshold
bestPred, bestCorrect = collections.defaultdict(lambda:0.), collections.defaultdict(lambda:0.)
bestConf = collections.defaultdict(lambda:0.)
bestSim = 0.
bestEntities = ['','','','']
bestConfidences = [0.,0.,0.,0.]
aggEntites = collections.defaultdict(lambda:collections.defaultdict(lambda:0.))
#add in the original entities
for i in range(NUM_RELATIONS):
if self.bestConfidences[i] > bestConfidences[i]:
bestConfidences[i] = self.bestConfidences[i]
bestEntities[i] = self.bestEntities[i]
for listNum in range(len(self.newArticles)):
for i in range(len(self.newArticles[listNum])):
sim = self.articleSim(self.indx, listNum, i)
if sim <= thres: continue
if (i+1) in ENTITIES[self.indx][listNum]:
entities, confidences = ENTITIES[self.indx][listNum][i+1], CONFIDENCES[self.indx][listNum][i+1]
else:
entities, confidences = self.extractEntitiesWithConfidences(self.newArticles[listNum][i])
for j in range(NUM_RELATIONS):
if entities[j] != '' and entities[j] != 'unknown':
if confidences[j] > bestConfidences[j]:
bestConfidences[j] = confidences[j]
bestEntities[j] = entities[j]
self.evaluateArticle(bestEntities, goldEntities, args.shooterLenientEval, args.shooterLastName, args.evalOutFile)
#TODO for EMA
#TODO: use conf or 1 for mode calculation
def majorityVote(self, entityList):
if not entityList: return '',0.
dic = collections.defaultdict(lambda:0.)
confDic = collections.defaultdict(lambda:0.)
cnt = collections.defaultdict(lambda:0.)
ticker = 0
for entity, conf in entityList:
dic[entity] += 1
cnt[entity] += 1
confDic[entity] += conf
if ticker == 0: dic[entity] += 0.1 #extra for original article to break ties
ticker += 1
bestEntity, bestVote = sorted(dic.items(), key=itemgetter(1), reverse=True)[0]
return bestEntity, confDic[bestEntity]/cnt[bestEntity]
#take a single step in the episode
def step(self, action, query):
global CHANGES
oldEntities = copy.copy(self.bestEntities.values())
#update pointer to next article
listNum = query-1
self.stepNum[listNum] += 1
self.updateState(action, query, self.ignoreDuplicates)
newEntities = self.bestEntities.values()
if self.delayedReward == 'True':
reward = self.calculateReward(self.originalEntities, newEntities)
else:
reward = self.calculateReward(oldEntities, newEntities)
# negative per step
reward -= 0.001
return self.state, reward, self.terminal
def loadFile(filename):
articles, titles, identifiers, downloaded_articles = [], [] ,[] ,[]
#load data and process identifiers
with open(filename, "rb") as inFile:
while True:
try:
a, b, c, d = pickle.load(inFile)
articles.append(a)
titles.append(b)
identifiers.append(c)
downloaded_articles.append(d)
except:
break
identifiers_tmp = []
for e in identifiers:
for i in range(NUM_RELATIONS):
if type(e[i]) == int or e[i].isdigit():
e[i] = int(e[i])
e[i] = inflect_engine.number_to_words(e[i])
identifiers_tmp.append(e)
identifiers = identifiers_tmp
return articles, titles, identifiers, downloaded_articles
def baselineEval(articles, identifiers, args):
global CORRECT, GOLD, PRED
CORRECT = collections.defaultdict(lambda:0.)
GOLD = collections.defaultdict(lambda:0.)
PRED = collections.defaultdict(lambda:0.)
for indx in range(len(articles)):
print "INDX:", indx, '/', len(articles)
originalArticle = articles[indx][0] #since article has words and tags
newArticles = [[] for i in range(NUM_QUERY_TYPES)]
goldEntities = identifiers[indx]
env = Environment(originalArticle, newArticles, goldEntities, indx, args, evalMode=True)
env.evaluateArticle(env.bestEntities.values(), env.goldEntities, args.shooterLenientEval, args.shooterLastName, args.evalOutFile)
print "------------\nEvaluation Stats: (Precision, Recall, F1):"
for tag in int2tags:
prec = CORRECT[tag]/PRED[tag]
rec = CORRECT[tag]/GOLD[tag]
f1 = (2*prec*rec)/(prec+rec)
print tag, prec, rec, f1, "########", CORRECT[tag], PRED[tag], GOLD[tag]
def thresholdEval(articles, downloaded_articles, identifiers, args):
global CORRECT, GOLD, PRED
CORRECT = collections.defaultdict(lambda:0.)
GOLD = collections.defaultdict(lambda:0.)
PRED = collections.defaultdict(lambda:0.)
for indx in range(len(articles)):
print "INDX:", indx
originalArticle = articles[indx][0]
newArticles = [[q.split(' ')[:WORD_LIMIT] for q in sublist] for sublist in downloaded_articles[indx]]
goldEntities = identifiers[indx]
env = Environment(originalArticle, newArticles, goldEntities, indx, args, False)
env.thresholdEvaluate(env.goldEntities, args)
print "------------\nEvaluation Stats: (Precision, Recall, F1):"
for tag in int2tags:
prec = CORRECT[tag]/PRED[tag]
rec = CORRECT[tag]/GOLD[tag]
f1 = (2*prec*rec)/(prec+rec)
print tag, prec, rec, f1
def confEval(articles, downloaded_articles, identifiers, args):
global CORRECT, GOLD, PRED
CORRECT = collections.defaultdict(lambda:0.)
GOLD = collections.defaultdict(lambda:0.)
PRED = collections.defaultdict(lambda:0.)
for indx in range(len(articles)):
print "INDX:", indx
originalArticle = articles[indx][0]
newArticles = [[q.split(' ')[:WORD_LIMIT] for q in sublist] for sublist in downloaded_articles[indx]]
goldEntities = identifiers[indx]
env = Environment(originalArticle, newArticles, goldEntities, indx, args, False)
env.confEvaluate(env.goldEntities, args)
print "------------\nEvaluation Stats: (Precision, Recall, F1):"
for tag in int2tags:
prec = CORRECT[tag]/PRED[tag]
rec = CORRECT[tag]/GOLD[tag]
if prec+rec > 0:
f1 = (2*prec*rec)/(prec+rec)
else:
f1 = 0
print tag, prec, rec, f1
def plot_hist(evalconf, name):
for i in evalconf.keys():
plt.hist(evalconf[i], bins=[0, 0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
plt.title("Gaussian Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
# plt.show()
plt.savefig(name+"_"+str(i)+".png")
plt.clf()
def computeContext(ENTITIES, CONTEXT, ARTICLES, DOWNLOADED_ARTICLES, vectorizer, context=3):
# calculate the context variables for all articles
print "Computing context..."
vocab = vectorizer.vocabulary_
for indx, lists in ENTITIES.items():
print '\r', indx, '/', len(ENTITIES),
for listNum, articles in lists.items():
for articleNum, entities in articles.items():
CONTEXT[indx][listNum][articleNum] = []
if articleNum == 0:
#original article
article = [w.lower() for w in ARTICLES[indx][0]] #need only the tokens, not tags
else:
raw_article = DOWNLOADED_ARTICLES[indx][listNum][articleNum-1].lower()
cleaned_article = re.sub(r'[^\x00-\x7F]+',' ', raw_article)
article = word_tokenize(cleaned_article)
for entityNum, entity in enumerate(entities):
vec = []
phrase = []
if entityNum == 0 or constants.mode == 'EMA': #shooter case or EMA mode
entity = set(splitBars(entity))
for i, word in enumerate(article):
if word in entity:
for j in range(1,context+1):
if i-j>=0:
phrase.append(article[i-j])
else:
phrase.append('XYZUNK') #random unseen phrase
for j in range(1,context+1):
if i+j < len(article):
phrase.append(article[i+j])
else:
phrase.append('XYZUNK') #random unseen phrase
break
mat = vectorizer.transform([' '.join(phrase)]).toarray()
for w in phrase:
feat_indx = vocab.get(w)
if feat_indx:
vec.append(float(mat[0,feat_indx]))
else:
vec.append(0.)
else:
for i, word in enumerate(article):
if type(word) == int or word.isdigit() and word < 100:
word = int(word)
word = inflect_engine.number_to_words(word)
if word in entity:
for j in range(1,context+1):
if i-j>=0:
phrase.append(article[i-j])
else:
phrase.append('XYZUNK') #random unseen phrase
for j in range(1,context+1):
if i+j < len(article):
phrase.append(article[i+j])
else:
phrase.append('XYZUNK') #random unseen phrase
break
mat = vectorizer.transform([' '.join(phrase)]).toarray()
for w in phrase:
feat_indx = vocab.get(w)
if feat_indx:
vec.append(float(mat[0,feat_indx]))
else:
vec.append(0.)
# take care of all corner cases
if len(vec) == 0:
vec = [0. for q in range(2*context)]
#now store the vector
CONTEXT[indx][listNum][articleNum].append(vec)
try:
assert(len(CONTEXT[indx][listNum][articleNum]) <= NUM_RELATIONS)
except:
pdb.set_trace()
print "done."
return CONTEXT
def splitDict(dict, start, end):
return [dict[i] for i in range(start, end)]
def main(args):
global ENTITIES, CONFIDENCES, COSINE_SIM, CONTEXT
global TRAIN_ENTITIES, TRAIN_CONFIDENCES, TRAIN_COSINE_SIM, TRAIN_CONTEXT
global TEST_ENTITIES, TEST_CONFIDENCES, TEST_COSINE_SIM, TEST_CONTEXT
global evalMode
global CORRECT, GOLD, PRED, EVALCONF, EVALCONF2
global QUERY, ACTION, CHANGES
global trained_model
global CONTEXT_TYPE
global STAT_POSITIVE, STAT_NEGATIVE
print args
# trained_model = pickle.load( open(args.modelFile, "rb" ) )
#load cached entities (speed up)
####################################
# train_articles, train_titles, train_identifiers, train_downloaded_articles, TRAIN_ENTITIES, TRAIN_CONFIDENCES, TRAIN_COSINE_SIM, CONTEXT1, CONTEXT2 = pickle.load(open(args.trainEntities, "rb"))
train_articles, train_identifiers, train_entities, train_preds, train_confidences, train_contexts1, train_contexts2, train_vec1, train_vec2 = pickle.load(open(args.trainEntities, "rb"))
TRAIN_ENTITIES = train_preds
TRAIN_CONFIDENCES = train_confidences
CONTEXT1 = train_contexts1
CONTEXT2 = train_contexts2
####
if args.contextType == 1:
TRAIN_CONTEXT = CONTEXT1
else:
TRAIN_CONTEXT = CONTEXT2
CONTEXT_TYPE = args.contextType
########################################
# test_articles, test_titles, test_identifiers, test_downloaded_articles, TEST_ENTITIES, TEST_CONFIDENCES, TEST_COSINE_SIM, CONTEXT1, CONTEXT2 = pickle.load(open(args.testEntities, "rb"))
test_articles, test_identifiers, test_entities, test_preds, test_confidences, test_contexts1, test_contexts2, test_vec1, test_vec2 = pickle.load(open(args.testEntities, "rb"))
TEST_ENTITIES = test_preds
TEST_CONFIDENCES = test_confidences
CONTEXT1 = test_contexts1
CONTEXT2 = test_contexts2
####
if args.contextType == 1:
TEST_CONTEXT = CONTEXT1
else:
TEST_CONTEXT = CONTEXT2
print len(train_articles)
print len(test_articles)
#starting assignments
if not args.baselineEval and not args.thresholdEval and not args.confEval:
ENTITIES = TRAIN_ENTITIES
CONFIDENCES = TRAIN_CONFIDENCES
COSINE_SIM = TRAIN_COSINE_SIM
CONTEXT = TRAIN_CONTEXT
articles, identifiers = train_articles,train_identifiers
else:
ENTITIES = TEST_ENTITIES
CONFIDENCES = TEST_CONFIDENCES
COSINE_SIM = TEST_COSINE_SIM
CONTEXT = TEST_CONTEXT
articles, identifiers = test_articles, test_identifiers
if args.baselineEval:
baselineEval(articles, identifiers, args)
return
elif args.classifierEval:
print args.trainEntities
print args.testEntities
m = "TEST"
split_index = 292
if m == "DEV":
CLS_TEST_ENTITIES = splitDict(TRAIN_ENTITIES, split_index, len(TRAIN_ENTITIES) )
CLS_TEST_CONFIDENCES = splitDict(TRAIN_CONFIDENCES, split_index, len(TRAIN_ENTITIES) )
CLS_TEST_COSINE_SIM = splitDict(TRAIN_COSINE_SIM, split_index, len(TRAIN_ENTITIES) )
CLS_TEST_CONTEXT = splitDict(TRAIN_CONTEXT, split_index, len(TRAIN_ENTITIES) )
CLS_test_identifiers = splitDict(train_identifiers, split_index, len(TRAIN_ENTITIES) )
CLS_TRAIN_ENTITIES = splitDict(TRAIN_ENTITIES, 0, split_index )
CLS_TRAIN_CONFIDENCES = splitDict(TRAIN_CONFIDENCES, 0, split_index )
CLS_TRAIN_COSINE_SIM = splitDict(TRAIN_COSINE_SIM, 0, split_index )
CLS_TRAIN_CONTEXT= splitDict(TRAIN_CONTEXT, 0, split_index )
CLS_train_identifiers = splitDict(train_identifiers, 0, split_index )
elif m == "TEST":
CLS_TRAIN_ENTITIES =TRAIN_ENTITIES
CLS_TRAIN_CONFIDENCES =TRAIN_CONFIDENCES
CLS_TRAIN_COSINE_SIM =TRAIN_COSINE_SIM
CLS_TRAIN_CONTEXT =TRAIN_CONTEXT
CLS_TEST_ENTITIES = TEST_ENTITIES
CLS_TEST_CONFIDENCES = TEST_CONFIDENCES
CLS_TEST_COSINE_SIM = TEST_COSINE_SIM
CLS_TEST_CONTEXT= TEST_CONTEXT
CLS_train_identifiers = train_identifiers
CLS_test_identifiers = test_identifiers
# baseline = Classifier(CLS_TRAIN_ENTITIES, CLS_TRAIN_CONFIDENCES, CLS_TRAIN_COSINE_SIM, CLS_TRAIN_CONTEXT,\
# CLS_TEST_ENTITIES, CLS_TEST_CONFIDENCES, CLS_TEST_COSINE_SIM, CLS_TEST_CONTEXT)
#
# baseline.trainAndEval(CLS_train_identifiers, CLS_test_identifiers, args.entity, COUNT_ZERO)
return
articleNum = 0
savedArticleNum = 0
outFile = open(args.outFile, 'w', 0) #unbuffered
outFile.write(str(args)+"\n")
outFile2 = open(args.outFile+'.2', 'w', 0) #for analysis
outFile2.write(str(args)+"\n")
evalOutFile = None
if args.evalOutFile != '':
evalOutFile = open(args.evalOutFile, 'w')
# pdb.set_trace()
#server setup
port = args.port
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:%s" % port)
print "Started server on port", port
#for analysis
stepCnt = 0
# server loop
while True:
# Wait for next request from client
message = socket.recv()
# print "Received request: ", message
if message == "newGame":
# indx = articleNum % 10 #for test
indx = articleNum % len(articles)
if DEBUG: print "INDX:", indx
articleNum += 1
#IMP: make sure downloaded_articles is of form <indx, listNum>
newArticles = [[q.split(' ')[:WORD_LIMIT] for q in sublist] for sublist in articles[indx]]
goldEntities = identifiers[indx][0]
env = Environment(newArticles, goldEntities, indx, args, evalMode)
newstate, reward, terminal = env.state, 0, 'false'
elif message == "evalStart":
CORRECT = collections.defaultdict(lambda:0.)
GOLD = collections.defaultdict(lambda:0.)
PRED = collections.defaultdict(lambda:0.)
QUERY = collections.defaultdict(lambda:0.)
ACTION = collections.defaultdict(lambda:0.)
CHANGES = 0
evalMode = True
savedArticleNum = articleNum
articleNum = 0
stepCnt = 0
STAT_POSITIVE, STAT_NEGATIVE = [0 for i in range(NUM_RELATIONS)], [0 for i in range(NUM_RELATIONS)]
ENTITIES = TEST_ENTITIES
CONFIDENCES = TEST_CONFIDENCES
COSINE_SIM = TEST_COSINE_SIM
CONTEXT = TEST_CONTEXT
articles, identifiers = test_articles, test_identifiers
evalOutFile = None
if args.evalOutFile != '':
evalOutFile = open(args.evalOutFile, 'w')
# print "##### Evaluation Started ######"
elif message == "evalEnd":
print "------------\nEvaluation Stats: (Precision, Recall, F1):"
outFile.write("------------\nEvaluation Stats: (Precision, Recall, F1):\n")
######################################
for tag in int2tags:
if CORRECT[tag] and PRED[tag] and GOLD[tag]:
prec = CORRECT[tag]/PRED[tag]
rec = CORRECT[tag]/GOLD[tag]
f1 = (2*prec*rec)/(prec+rec)
print tag, prec, rec, f1, "########", CORRECT[tag], PRED[tag], GOLD[tag]
outFile.write(' '.join([str(tag), str(prec), str(rec), str(f1)])+'\n')
#########################
# tag = int2tags[0]
# prec = CORRECT[tag]/PRED[tag]
# rec = CORRECT[tag]/GOLD[tag]
# f1 = (2*prec*rec)/(prec+rec)
# print tag, prec, rec, f1, "########", CORRECT[tag], PRED[tag], GOLD[tag]
# outFile.write(' '.join([str(tag), str(prec), str(rec), str(f1)])+'\n')
#########
##########################
correct, pred, gold = 1e-6,1e-6,1e-6
for tag, num in CORRECT.items():
correct += num
for tag,num in PRED.items():
pred += num
for tag, num in GOLD.items():
gold += num
prec = correct/pred
rec = correct/gold
f1 = (2 * prec * rec) / (prec + rec)
print "macro average", prec, rec, f1, "########", correct, pred, gold
outFile.write(' '.join([str(tag), str(prec), str(rec), str(f1)])+'\n')
###########
print "StepCnt (total, average):", stepCnt, float(stepCnt)/len(articles)
outFile.write("StepCnt (total, average): " + str(stepCnt)+ ' ' + str(float(stepCnt)/len(articles)) + '\n')
qsum = sum(QUERY.values())
asum = sum(ACTION.values())
outFile2.write("------------\nQsum: " + str(qsum) + " Asum: " + str(asum)+'\n')
for k, val in QUERY.items():
outFile2.write("Query " + str(k) + ' ' + str(val/qsum)+'\n')
for k, val in ACTION.items():
outFile2.write("Action " + str(k) + ' ' + str(val/asum)+'\n')
outFile2.write("CHANGES: "+str(CHANGES)+ ' ' + str(float(CHANGES)/len(articles))+"\n")
outFile2.write("STAT_POSITIVE, STAT_NEGATIVE "+str(STAT_POSITIVE) + ', ' +str(STAT_NEGATIVE)+'\n')
#for analysis
# pdb.set_trace()
evalMode = False
articleNum = savedArticleNum
ENTITIES = TRAIN_ENTITIES
CONFIDENCES = TRAIN_CONFIDENCES
COSINE_SIM = TRAIN_COSINE_SIM
CONTEXT = TRAIN_CONTEXT
articles, identifiers = train_articles, train_identifiers
# print "##### Evaluation Ended ######"
if args.oracle:
plot_hist(EVALCONF, "conf1")
plot_hist(EVALCONF2, "conf2")
#save the extracted entities
if args.saveEntities:
pickle.dump([TRAIN_ENTITIES, TRAIN_CONFIDENCES, TRAIN_COSINE_SIM], open("train2.entities", "wb"))
pickle.dump([TEST_ENTITIES, TEST_CONFIDENCES, TEST_COSINE_SIM], open("test2.entities", "wb"))
return
else:
# message is "step"
action, query = [int(q) for q in message.split()]
if evalMode:
ACTION[action] += 1
QUERY[query] += 1
if evalMode and DEBUG:
print "State:"
print newstate[:4]
print newstate[4:8]
print newstate[8:]
print "Entities:", env.prevEntities
print "Action:", action, query
newstate, reward, terminal = env.step(action, query)
terminal = 'true' if terminal else 'false'
#remove reward unless terminal
if args.delayedReward == 'True' and terminal == 'false':
reward = 0
if evalMode and DEBUG and reward != 0:
print "Reward:", reward
pdb.set_trace()
if message != "evalStart" and message != "evalEnd":
#do article eval if terminal
if evalMode and articleNum <= len(articles) and terminal == 'true':
if args.oracle:
env.oracleEvaluate(env.goldEntities, ENTITIES[env.indx], CONFIDENCES[env.indx])
else:
#################################
# env.evaluateArticle(env.bestEntities.values(), env.goldEntities, args.shooterLenientEval, args.shooterLastName, evalOutFile)
# env.myevaluate(env.bestEntities.values(), env.goldEntities, evalOutFile)
env.myevaluate02(env.bestEntities.values(), env.goldEntities, evalOutFile)
################################
stepCnt += sum(env.stepNum)
#stat sign
################################################
vals = env.myCalculateStatSign(env.originalEntities, env.bestEntities.values())
############################################
for i, val in enumerate(vals):
if val > 0:
STAT_POSITIVE[i] += val
else:
STAT_NEGATIVE[i] -= val
#for analysis
for entityNum in [0,1,2,3]:
if ANALYSIS and evalMode and env.bestEntities.values()[entityNum].lower() != env.originalEntities[entityNum].lower() and reward > 0:
CHANGES += 1
try:
print "ENTITY:", entityNum
print "Entities:", 'best', env.bestEntities.values()[entityNum], 'orig', env.originalEntities[entityNum], 'gold', env.goldEntities[entityNum]
# print ' '.join(originalArticle)
print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
print ' '.join(newArticles[env.bestIndex[0]][env.bestIndex[1]])
print "----------------------------"
except:
pass
#send message (IMP: only for newGame or step messages)
outMsg = 'state, reward, terminal = ' + str(newstate) + ',' + str(reward)+','+terminal
socket.send(outMsg.replace('[', '{').replace(']', '}'))
else:
socket.send("done")
if __name__ == '__main__':
env = None
newstate, reward, terminal = None, None, None
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument("--port",
type = int,
default = 5050,
help = "port for server")
argparser.add_argument("--trainFile",
type = str,
help = "training File")
argparser.add_argument("--testFile",
type = str,
default = "",
help = "Testing File")
argparser.add_argument("--outFile",
type = str,
help = "Output File")
argparser.add_argument("--evalOutFile",
default = "",
type = str,
help = "Output File for predictions")
argparser.add_argument("--modelFile",
type = str,
help = "Model File")
argparser.add_argument("--shooterLenientEval",
type = bool,
default = False,
help = "Evaluate shooter leniently by counting any match as right")
argparser.add_argument("--shooterLastName",
type = bool,
default = False,
help = "Evaluate shooter using only last name")
argparser.add_argument("--oracle",
type = bool,
default = False,
help = "Evaluate using oracle")
argparser.add_argument("--ignoreDuplicates",
type = bool,
default = False,
help = "Ignore duplicate articles in downloaded ones.")
argparser.add_argument("--baselineEval",
type = bool,
default = False,
help = "Evaluate baseline performance")
argparser.add_argument("--classifierEval",
type = bool,
default = False,
help = "Evaluate performance using a simple maxent classifier")
argparser.add_argument("--thresholdEval",
type = bool,
default = False,
help = "Use tf-idf similarity threshold to select articles to extract from")
argparser.add_argument("--threshold",
type = float,
default = 0.8,
help = "threshold value for Aggregation baseline above")
argparser.add_argument("--confEval",
type = bool,
default = False,
help = "Evaluate with best conf ")
argparser.add_argument("--rlbasicEval",
type = bool,
default = False,
help = "Evaluate with RL agent that takes only reconciliation decisions.")
argparser.add_argument("--rlqueryEval",
type = bool,
default = False,
help = "Evaluate with RL agent that takes only query decisions.")
argparser.add_argument("--shuffleArticles",
type = bool,
default = False,
help = "Shuffle the order of new articles presented to agent")
argparser.add_argument("--entity",
type = int,
default = NUM_RELATIONS,
help = "Entity num. 4 means all.")
argparser.add_argument("--aggregate",
type = str,
default = 'always',
help = "Options: always, conf, majority")
argparser.add_argument("--delayedReward",
type = str,
default = 'False',
help = "delay reward to end")
argparser.add_argument("--trainEntities",
type = str,
default = '',
help = "Pickle file with extracted train entities")
argparser.add_argument("--testEntities",
type = str,
default = '',
help = "Pickle file with extracted test entities")
argparser.add_argument("--numEntityLists",
type = int,
default = 1,
help = "number of different query lists to consider")
argparser.add_argument("--contextType",
type = int,
default = 1,
help = "Type of context to consider (1 = counts, 2 = tfidf, 0 = none)")
argparser.add_argument("--saveEntities",
type = bool,
default = False,
help = "save extracted entities to file")
args = argparser.parse_args()
main(args)
#sample
#python server.py --port 7000 --trainEntities consolidated/train+context.5.p --testEntities consolidated/dev+test+context.5.p --outFile outputs/tmp2.out --modelFile trained_model2.p --entity 4 --aggregate always --shooterLenientEval True --delayedReward False --contextType 2
|
for value in range(1,10):
print(value)
print("\n")
lower = 0
higher = 4
for value in range(lower, higher):
print(value)
numbers = list(range(lower, higher))
print(numbers)
#even numbers
even_numbers = list(range(2,11,2))
print(even_numbers)
#create the empty list
squares = []
for value in range(1,11):
square = value**2
squares.append(square)
print(squares)
#get rid of the extra variable
squares = [] #create the empty list
for value in range(1,11):
squares.append(value**2)
print(squares)
#simple stats
digits = list(range(1,10))
print(digits)
print(min(digits))
print(max(digits))
print(sum(digits))
#list comprehension
squaresComp = [value**2 for value in range(1,11)]
print(squaresComp)
|
for x in range(0,10):
if(x % 2 == 0):
print(x, "is even") |
import numpy as np
import cv2
import matplotlib.pyplot as plt
from statistics import mean, median,variance,stdev
class Mass(object):
def __init__(self, img, tip):
self.x, self.y = self.uneUne(img, tip)
self.length = abs(self.x[-1]-self.x[0])
self.frontEndCoordinates = (self.x[0], self.y[0])
self.rearEndCoordinates = (self.x[-1], self.y[-1])
self.centroid = (int((self.x[0]+self.x[-1])/2), int((self.y[0]+self.y[-1])/2))
self.maxOfHeight = self.getMaxofHeight()
def uneUne(self, img, tip):
x = [tip[0]]
y = [tip[1]]
while True:
pos_x = x[-1]
pos_y = y[-1]
scan_line = img[pos_y-20:pos_y+20, pos_x+1]
for i in range(len(scan_line)):
if scan_line[i] != 0:
x.append(pos_x+1)
y.append(pos_y-20+i)
break
else:
break
return x, y
def getBottomLine(self):
leftend_x = self.x[0]
rightend_y = self.x[-1]
sample_left_y = self.y[0:20:2]
sample_right_y = self.y[-1:-21:-2]
bottom_y = mean(sample_left_y+sample_right_y)
return (leftend_x, bottom_y), (rightend_y, bottom_y)
def getMaxofHeight(self):
i = self.y.index(min(self.y))
return self.x[i], self.y[i]
def weldInspection():
cap = cv2.VideoCapture('./test04.mp4')
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('output.avi', fourcc, 30.0, (3840, 2160))
frame_num = 0
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
frame = imageProcessing(frame)
out.write(frame)
#cv2.imshow('frame', frame)
frame_num += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
def imageProcessing(frame):
#height, width = frame.shape[:2]
#色マスク
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV_FULL)
lowerRed = np.array([200, 0, 0])
upperRed = np.array([310, 255, 255])
img_mask = cv2.inRange(hsv, lowerRed, upperRed)
#ノイズキャンセル
labelStats = cv2.connectedComponentsWithStats(img_mask)
nLabels, labelImages, masses, center = labelStats
for mass in masses:
if mass[4] < 300:
cv2.rectangle(img_mask, (mass[0], mass[1]),
(mass[0] + mass[2], mass[1] + mass[3]), 0, -1)
#微分フィルタ
img_filtered = img_mask
#各部位を検出~連鎖
tip = sakicho(img_filtered, 0)
massA = Mass(img_filtered, tip)
tip = sakicho(img_filtered, massA.rearEndCoordinates[0]+1)
massB = Mass(img_filtered, tip)
tip = sakicho(img_filtered, massB.rearEndCoordinates[0]+1)
massC = Mass(img_filtered, tip)
tip = sakicho(img_filtered, massC.rearEndCoordinates[0]+1)
massD = Mass(img_filtered, tip)
tip = sakicho(img_filtered, massD.rearEndCoordinates[0]+1)
massE = Mass(img_filtered, tip)
tip = sakicho(img_filtered, massE.rearEndCoordinates[0]+1)
massF = Mass(img_filtered, tip)
# 書きこみ
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.line(frame, massB.getBottomLine()[0], massB.getBottomLine()[1], (0, 255, 0), 2)
cv2.putText(frame, str(massB.length), (massB.centroid[0], massB.centroid[1]-10), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
cv2.line(frame, massF.getBottomLine()[0], massF.getBottomLine()[1], (0, 255, 0), 2)
cv2.putText(frame, str(massF.length), (massF.centroid[0], massF.centroid[1]-10), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
cv2.line(frame, massC.getBottomLine()[0], massC.getBottomLine()[1], (255, 0, 255), 2)
cv2.putText(frame, str(massC.length), (massC.centroid[0], massC.centroid[1]-10), font, 1, (255, 0, 255), 2, cv2.LINE_AA)
cv2.line(frame, massE.getBottomLine()[0], massE.getBottomLine()[1], (255, 0, 255), 2)
cv2.putText(frame, str(massE.length), (massE.centroid[0], massE.centroid[1]-10), font, 1, (255, 0, 255), 2, cv2.LINE_AA)
cv2.line(frame, massD.getBottomLine()[0], massD.getBottomLine()[1], (255, 255, 0), 2)
cv2.putText(frame, str(massD.length), (massD.centroid[0], massD.centroid[1]+30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
cv2.line(frame, massB.rearEndCoordinates, (massB.rearEndCoordinates[0], massC.frontEndCoordinates[1]), (0, 0, 255), 2)
cv2.putText(frame, str(massC.frontEndCoordinates[1]-massB.rearEndCoordinates[1]),
(massB.rearEndCoordinates[0]+10, massB.rearEndCoordinates[1]+40),
font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.line(frame, (massD.maxOfHeight[0]-40, massD.maxOfHeight[1]),
(massD.maxOfHeight[0]+40, massD.maxOfHeight[1]), (255, 255, 0), 2)
cv2.line(frame, (massD.maxOfHeight[0]+40, massD.maxOfHeight[1]),
(massD.maxOfHeight[0]+40, massD.getBottomLine()[1][1]), (255, 255, 0), 2)
cv2.putText(frame, str(massD.getBottomLine()[0][1]-massD.maxOfHeight[1]), (massD.maxOfHeight[0]+60, massD.maxOfHeight[1]+30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
return frame
def filter(img):
filter_3x3 = np.array([[ 0, -1, 0],
[ 0, 1, 0],
[ 0, 0, 0]], np.float32)
img_filtered = cv2.filter2D(img, -1, filter_3x3)
return img_filtered
def sakicho(img, offset):
width = img.shape[1]
for i in range(offset, width):
line = img[:, i]
test = np.where(line==255)
if test[0].size != 0:
tip = (i, test[0][0])
break
return tip
def imshow(img):
cv2.imshow('frame', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def generateFrameImage():
cap = cv2.VideoCapture('./test04.mp4')
num = 0
while (cap.isOpened()):
ret, frame = cap.read()
img_mask, output = imageProcessing(frame)
#cv2.imshow('frame', frame)
filename_frame = './frame/{}.jpg'.format(num)
filename_mask = './mask/{}.jpg'.format(num)
filename_output = './output/{}.jpg'.format(num)
cv2.imwrite(filename_frame, frame)
cv2.imwrite(filename_mask, img_mask)
cv2.imwrite(filename_output, output)
num += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def main():
weldInspection()
# imageProcessing()
# generateFrameImage()
if __name__ == '__main__':
main()
|
import unittest
from katas.beta.decipher_the_cipher import encrypter
class TestDecipherTheCipher(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(encrypter("amz"), "man")
def test_equal_2(self):
self.assertEqual(encrypter("welcome to the organization"), "qibkyai ty tfi yvgmzenmteyz")
def test_equal_3(self):
self.assertEqual(encrypter("hello"), "fibby")
def test_equal_4(self):
self.assertEqual(encrypter("my name is"), "ao zmai eu")
def test_equal_5(self):
self.assertEqual(encrypter("goodbye"), "gyyjloi")
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import os
from typing import ClassVar, Iterable, Sequence
from pants.backend.python.target_types import ConsoleScript, EntryPoint, MainSpecification
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.lockfile_metadata import PythonLockfileMetadata
from pants.backend.python.util_rules.pex import PexRequest
from pants.backend.python.util_rules.pex_requirements import (
EntireLockfile,
LoadedLockfile,
LoadedLockfileRequest,
Lockfile,
PexRequirements,
Resolve,
)
from pants.engine.fs import Digest
from pants.engine.internals.selectors import Get
from pants.option.errors import OptionsError
from pants.option.option_types import StrListOption, StrOption
from pants.option.subsystem import Subsystem
from pants.util.docutil import doc_url, git_url
from pants.util.meta import classproperty
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
class PythonToolRequirementsBase(Subsystem):
"""Base class for subsystems that configure a set of requirements for a python tool."""
# Subclasses must set.
default_version: ClassVar[str]
# Subclasses do not need to override.
default_extra_requirements: ClassVar[Sequence[str]] = []
# Subclasses may set to override the value computed from default_version and
# default_extra_requirements.
# TODO: Once we get rid of those options, subclasses must set this to loose
# requirements that reflect any minimum capabilities Pants assumes about the tool.
default_requirements: Sequence[str] = []
default_interpreter_constraints: ClassVar[Sequence[str]] = ["CPython>=3.7,<4"]
register_interpreter_constraints: ClassVar[bool] = False
default_lockfile_resource: ClassVar[tuple[str, str] | None] = None
install_from_resolve = StrOption(
advanced=True,
default=None,
help=lambda cls: softwrap(
f"""\
If specified, install the tool using the lockfile for this named resolve.
This resolve must be defined in `[python].resolves`, as described in
{doc_url("python-third-party-dependencies#user-lockfiles")}.
The resolve's entire lockfile will be installed, unless specific requirements are
listed via the `requirements` option, in which case only those requirements
will be installed. This is useful if you don't want to invalidate the tool's
outputs when the resolve incurs changes to unrelated requirements.
If unspecified, and the `lockfile` option is unset, the tool will be installed
using the default lockfile shipped with Pants.
If unspecified, and the `lockfile` option is set, the tool will use the custom
`{cls.options_scope}` "tool lockfile" generated from the `version` and
`extra_requirements` options. But note that this mechanism is deprecated.
"""
),
)
requirements = StrListOption(
advanced=True,
help=lambda cls: softwrap(
"""\
If `install_from_resolve` is specified, install these requirements,
at the versions provided by the specified resolve's lockfile.
Values can be pip-style requirements (e.g., `tool` or `tool==1.2.3` or `tool>=1.2.3`),
or addresses of `python_requirement` targets (or targets that generate or depend on
`python_requirement` targets).
The lockfile will be validated against the requirements - if a lockfile doesn't
provide the requirement (at a suitable version, if the requirement specifies version
constraints) Pants will error.
If unspecified, install the entire lockfile.
"""
),
)
_interpreter_constraints = StrListOption(
register_if=lambda cls: cls.register_interpreter_constraints,
advanced=True,
default=lambda cls: cls.default_interpreter_constraints,
help="Python interpreter constraints for this tool.",
)
def __init__(self, *args, **kwargs):
if (
self.default_interpreter_constraints
!= PythonToolRequirementsBase.default_interpreter_constraints
and not self.register_interpreter_constraints
):
raise ValueError(
softwrap(
f"""
`default_interpreter_constraints` are configured for `{self.options_scope}`, but
`register_interpreter_constraints` is not set to `True`, so the
`--interpreter-constraints` option will not be registered. Did you mean to set
this?
"""
)
)
if not self.default_lockfile_resource:
raise ValueError(
softwrap(
f"""
The class property `default_lockfile_resource` must be set. See `{self.options_scope}`.
"""
)
)
super().__init__(*args, **kwargs)
@classproperty
def default_lockfile_url(cls) -> str:
assert cls.default_lockfile_resource is not None
return git_url(
os.path.join(
"src",
"python",
cls.default_lockfile_resource[0].replace(".", os.path.sep),
cls.default_lockfile_resource[1],
)
)
def pex_requirements(
self,
*,
extra_requirements: Iterable[str] = (),
) -> PexRequirements | EntireLockfile:
"""The requirements to be used when installing the tool."""
description_of_origin = f"the requirements of the `{self.options_scope}` tool"
if self.install_from_resolve:
use_entire_lockfile = not self.requirements
return PexRequirements(
(*self.requirements, *extra_requirements),
from_superset=Resolve(self.install_from_resolve, use_entire_lockfile),
description_of_origin=description_of_origin,
)
assert self.default_lockfile_resource is not None
pkg, path = self.default_lockfile_resource
url = f"resource://{pkg}/{path}"
origin = f"The built-in default lockfile for {self.options_scope}"
lockfile = Lockfile(
url=url,
url_description_of_origin=origin,
resolve_name=self.options_scope,
)
return EntireLockfile(lockfile)
@property
def interpreter_constraints(self) -> InterpreterConstraints:
"""The interpreter constraints to use when installing and running the tool.
This assumes you have set the class property `register_interpreter_constraints = True`.
"""
return InterpreterConstraints(self._interpreter_constraints)
def to_pex_request(
self,
*,
interpreter_constraints: InterpreterConstraints | None = None,
extra_requirements: Iterable[str] = (),
main: MainSpecification | None = None,
sources: Digest | None = None,
) -> PexRequest:
requirements = self.pex_requirements(extra_requirements=extra_requirements)
if not interpreter_constraints:
if self.options.is_default("interpreter_constraints") and (
isinstance(requirements, EntireLockfile)
or (
isinstance(requirements, PexRequirements)
and isinstance(requirements.from_superset, Resolve)
)
):
# If installing the tool from a resolve, and custom ICs weren't explicitly set,
# leave these blank. This will cause the ones for the resolve to be used,
# which is clearly what the user intends, rather than forcing the
# user to override interpreter_constraints to match those of the resolve.
interpreter_constraints = InterpreterConstraints()
else:
interpreter_constraints = self.interpreter_constraints
return PexRequest(
output_filename=f"{self.options_scope.replace('-', '_')}.pex",
internal_only=True,
requirements=requirements,
interpreter_constraints=interpreter_constraints,
main=main,
sources=sources,
)
class PythonToolBase(PythonToolRequirementsBase):
"""Base class for subsystems that configure a python tool to be invoked out-of-process."""
# Subclasses must set.
default_main: ClassVar[MainSpecification]
console_script = StrOption(
advanced=True,
default=lambda cls: (
cls.default_main.spec if isinstance(cls.default_main, ConsoleScript) else None
),
help=softwrap(
"""
The console script for the tool. Using this option is generally preferable to
(and mutually exclusive with) specifying an `--entry-point` since console script
names have a higher expectation of staying stable across releases of the tool.
Usually, you will not want to change this from the default.
"""
),
)
entry_point = StrOption(
advanced=True,
default=lambda cls: (
cls.default_main.spec if isinstance(cls.default_main, EntryPoint) else None
),
help=softwrap(
"""
The entry point for the tool. Generally you only want to use this option if the
tool does not offer a `--console-script` (which this option is mutually exclusive
with). Usually, you will not want to change this from the default.
"""
),
)
@property
def main(self) -> MainSpecification:
is_default_console_script = self.options.is_default("console_script")
is_default_entry_point = self.options.is_default("entry_point")
if not is_default_console_script and not is_default_entry_point:
raise OptionsError(
softwrap(
f"""
Both [{self.options_scope}].console-script={self.console_script} and
[{self.options_scope}].entry-point={self.entry_point} are configured
but these options are mutually exclusive. Please pick one.
"""
)
)
if not is_default_console_script:
assert self.console_script is not None
return ConsoleScript(self.console_script)
if not is_default_entry_point:
assert self.entry_point is not None
return EntryPoint.parse(self.entry_point)
return self.default_main
def to_pex_request(
self,
*,
interpreter_constraints: InterpreterConstraints | None = None,
extra_requirements: Iterable[str] = (),
main: MainSpecification | None = None,
sources: Digest | None = None,
) -> PexRequest:
return super().to_pex_request(
interpreter_constraints=interpreter_constraints,
extra_requirements=extra_requirements,
main=main or self.main,
sources=sources,
)
async def get_loaded_lockfile(subsystem: PythonToolBase) -> LoadedLockfile:
requirements = subsystem.pex_requirements()
if isinstance(requirements, EntireLockfile):
lockfile = requirements.lockfile
else:
assert isinstance(requirements, PexRequirements)
assert isinstance(requirements.from_superset, Resolve)
lockfile = await Get(Lockfile, Resolve, requirements.from_superset)
loaded_lockfile = await Get(LoadedLockfile, LoadedLockfileRequest(lockfile))
return loaded_lockfile
async def get_lockfile_metadata(subsystem: PythonToolBase) -> PythonLockfileMetadata:
loaded_lockfile = await get_loaded_lockfile(subsystem)
assert loaded_lockfile.metadata is not None
return loaded_lockfile.metadata
async def get_lockfile_interpreter_constraints(
subsystem: PythonToolBase,
) -> InterpreterConstraints:
"""If a lockfile is used, will try to find the interpreter constraints used to generate the
lock.
This allows us to work around https://github.com/pantsbuild/pants/issues/14912.
"""
# If the tool's interpreter constraints are explicitly set, or it is not using a lockfile at
# all, then we should use the tool's interpreter constraints option.
if not subsystem.options.is_default("interpreter_constraints"):
return subsystem.interpreter_constraints
lockfile_metadata = await get_lockfile_metadata(subsystem)
return lockfile_metadata.valid_for_interpreter_constraints
|
#to use the clear method in the dictionary
a={1:'bhavya',2:'komal',3:'khushi'}
a.clear()
print(a)
|
from main import givePrediction
from tkinter import *
passengerID = ""
passengerClass = ""
gender = ""
Sibling = ""
Embarked = ""
root = Tk()
root.geometry("1100x600")
root.title("Semester Project of Scikit-Learn")
def predictTheTitanicSurvival():
predict = "Survived"
predict = givePrediction(100,2,"Male",2,"S","LinearRegression")
if( predict == 0):
predict= "Passenger will not survive"
elif( predict == 1):
predict = "Passenger will survive"
label8 = Label(secondFrame, text=predict, font=('arial', 13, 'bold')).grid(row=6,column=0,padx=50,pady=30)
topFrame = Frame(root, width = 1097, height = 50, bd = 0, highlightbackground = "black", highlightcolor = "black", highlightthickness = 3)
topFrame.pack(side = TOP)
label = Label(topFrame,text = "Model to predicts which passengers survived the Titanic shipwreck.",font = ('arial', 18, 'bold'))
label.grid(row = 0, column = 0)
secondFrame = Frame(root, width = 1097, height =400, bg = "lightblue")
secondFrame.pack()
label1 = Label(secondFrame , text = "Passenger ID (1 ~ ...) : ",font = ('arial', 13, 'bold')).grid(row=0 , column = 0 , padx = 50, pady = 30)
entry1 = Entry(secondFrame , font = ('arial', 13, 'bold') , textvariable = passengerID).grid(row = 0 ,column = 1 , padx = 100, pady = 30)
label2 = Label(secondFrame , text = "Passenger Class (1 , 2 , 3) : ",font = ('arial', 13, 'bold')).grid(row=1 , column = 0 , padx = 50, pady = 10)
entry2 = Entry(secondFrame , font = ('arial', 13, 'bold') , textvariable = passengerClass).grid(row = 1 ,column = 1 , padx = 100, pady = 10)
label3 = Label(secondFrame , text = "Gender : ",font = ('arial', 13, 'bold')).grid(row=2 , column = 0 , padx = 50, pady = 10)
checkButton1 = Checkbutton(secondFrame, text = "Male").grid(row=2 , column = 1 , padx = 50, pady = 10)
checkButton2 = Checkbutton(secondFrame, text = "Female").grid(row=2 , column = 2 , padx = 50, pady = 10)
label4 = Label(secondFrame , text = "Sibling : ",font = ('arial', 13, 'bold')).grid(row=3 , column = 0 , padx = 50, pady = 10)
entry4 = Entry(secondFrame , font = ('arial', 13, 'bold')).grid(row = 3 ,column = 1 , padx = 100, pady = 10)
label5 = Label(secondFrame , text = "Embarked : ",font = ('arial', 13, 'bold')).grid(row=4 , column = 0 , padx = 50, pady = 10)
entry5 = Entry(secondFrame , font = ('arial', 13, 'bold')).grid(row = 4 ,column = 1 , padx = 100, pady = 10)
survival = Button(secondFrame, text = "Click Here to Predict!", command = predictTheTitanicSurvival).grid(row = 5 ,column = 1 , padx = 100, pady = 10)
root.mainloop() #To have the window for every time in the screen.
|
#!/usr/bin/env python
import sys
if __name__ == "__main__":
import _angSep
ra1,dec1,ra2,dec2 = sys.argv[1:]
ra1,dec1,ra2,dec2 = float(ra1),float(dec1),float(ra2),float(dec2)
sep = _angSep.angSep(ra1,dec1,ra2,dec2,1)
print('coords (%.7f,%.7f) and (%.7f,%.7f) separated by %.2f degrees'%(ra1,dec1,ra2,dec2,sep))
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent # noqa: PNT20
import pytest
from pants_explorer.server.graphql.rules import rules
from pants_explorer.server.graphql.setup import create_schema
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.project_info import peek
from pants.engine.environment import EnvironmentName
from pants.engine.explorer import RequestState
from pants.engine.internals.parser import BuildFileSymbolsInfo
from pants.engine.target import RegisteredTargetTypes
from pants.engine.unions import UnionMembership
from pants.help.help_info_extracter import AllHelpInfo, HelpInfoExtracter
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture(scope="session")
def schema():
return create_schema()
@pytest.fixture
def context(all_help_info: AllHelpInfo, rule_runner: RuleRunner) -> dict:
return dict(
pants_request_state=RequestState(
all_help_info,
rule_runner.build_config,
rule_runner.scheduler,
env_name=EnvironmentName(None),
)
)
@pytest.fixture
def all_help_info(rule_runner: RuleRunner) -> AllHelpInfo:
def fake_consumed_scopes_mapper(scope: str) -> tuple[str, ...]:
return ("somescope", f"used_by_{scope or 'GLOBAL_SCOPE'}")
return HelpInfoExtracter.get_all_help_info(
options=rule_runner.options_bootstrapper.full_options(
rule_runner.build_config, union_membership=UnionMembership({})
),
union_membership=rule_runner.union_membership,
consumed_scopes_mapper=fake_consumed_scopes_mapper,
registered_target_types=RegisteredTargetTypes.create(rule_runner.build_config.target_types),
build_symbols=BuildFileSymbolsInfo.from_info(),
build_configuration=rule_runner.build_config,
)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=(
*peek.rules(),
*rules(),
),
target_types=(DockerImageTarget,),
)
@pytest.fixture(scope="session")
def queries() -> str:
return dedent(
"""\
query TestRulesQuery($name: String, $limit: Int) {
rules(query:{nameRe: $name, limit: $limit}) { name }
}
query TestTargetsQuery($specs: [String!], $targetType: String, $limit: Int) {
targets(query:{specs: $specs, targetType: $targetType, limit: $limit}) { targetType }
}
query TestTargetTypesQuery($alias: String, $limit: Int) {
targetTypes(query:{aliasRe: $alias, limit: $limit}) { alias }
}
"""
)
|
from datetime import datetime, timedelta
import requests
from constants import uprr_request_token_url, uprr_scrape_url, uprr_data_dict, uprr_headers_dict
class UnionPacific:
def __get_request_token(self):
response = requests.post(uprr_request_token_url, headers=uprr_headers_dict, data=uprr_data_dict)
token_dict = response.json()
return token_dict['access_token']
def get_tracking_dict(self, container_list):
request_token = self.__get_request_token()
headers = {'Authorization': 'Bearer {}'.format(request_token)}
response = requests.get(uprr_scrape_url.format(','.join(container_list)), headers=headers)
response_list = response.json()
traced_containers_list = []
for container in response_list:
traced_containers_list.append({
'fields': {
'storage_details': container['storageCharges'],
'scheduled_events': container['scheduledEvents'],
'accomplished_events': container['accomplishedEvents'],
'billed_status': container['billedStatus'],
}
}
)
traced_containers_dict = dict(zip(container_list, traced_containers_list))
return traced_containers_dict
def get_last_free_day(self, _dict):
storage_charge_date_str = _dict['fields']['storage_details']['storageChargeBegins'].split('T')[0]
storage_charge_datetime = datetime.strptime(storage_charge_date_str, '%Y-%m-%d')
last_free_date = storage_charge_datetime - timedelta(days=1)
return str(datetime.strftime(last_free_date, '20%y-%m-%d'))
def get_outgate_date(self, _dict):
if 'Delivered to Truck Line' in _dict['fields']['accomplished_events'][0]['name']:
outgate_date_str = _dict['fields']['accomplished_events'][0]['dateTime'].split('T')[0]
outgate_date_object = datetime.strptime(outgate_date_str, '%Y-%m-%d')
return datetime.strftime(outgate_date_object, '%m/%d/%y')
def check_container_validity(self, container_num, _dict):
if _dict['fields']['storage_details'] is None:
print('{} not on BNSF, check other rails.'.format(container_num))
else:
print('{} is in BNSF inventory'.format(container_num))
def get_container_eta(self, _dict):
arrival_eta_str = _dict['fields']['scheduled_events'][0]['dateTime'].split('T')[0]
arrival_eta_datetime = datetime.strptime(arrival_eta_str, '%Y-%m-%d')
return datetime.strftime(arrival_eta_datetime, '%Y-%m-%d')
def get_uprr_event(_dict, event):
event_dict = {
'past': 'accomplished_events',
'scheduled': 'scheduled_events',
}
try:
return '{}, {}\t{} {}'.format(
_dict['fields'][event_dict[event]][0]['location']['city'],
_dict['fields'][event_dict[event]][0]['location']['state'],
_dict['fields'][event_dict[event]][0]['name'],
_dict['fields'][event_dict[event]][0]['dateTime'])
except (IndexError, KeyError):
pass
|
import unittest
from katas.beta.my_first_bug_fixing_kata import foo
class FooTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(foo(), 42)
def test_instance(self):
self.assertIsInstance(foo(), int)
|
from rest_framework import serializers
from sneakers.models import Sneakers
class SneakersSerializer(serializers.ModelSerializer):
class Meta:
model = Sneakers
fields = "__all__"
|
import tkinter as tk
from tkinter.constants import BOTTOM, TOP
import tkinter.font as tkFont
from tkinter import messagebox
from tkinter.messagebox import askokcancel, showinfo, WARNING
from mysql.connector.errors import DatabaseError
import GetImage as GM
import DatabaseConnection as DB;
def main(root,user,id):
root.title("Home")
width=750
height=600
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
root.geometry(alignstr)
root.resizable(width=False, height=False)
BgImg = GM.getImage("D:\Programming\Python\Room_Rental\Images\BG.jpg", 744, 592)
BGLabel=tk.Label(root,image=BgImg)
BGLabel.image=BgImg
BGLabel["justify"] = "center"
BGLabel.place(x=3,y=1,width=744,height=592)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\\addroom.png",160,125)
AddRoom=tk.Button(root,image=img, compound=TOP)
AddRoom.image = img
#AddRoom["bg"] = "#eeefff"
AddRoom["borderwidth"] = "4px"
ft = tkFont.Font(family='Times',size=16)
AddRoom["font"] = ft
#AddRoom["fg"] = "#2e3436"
AddRoom["justify"] = "center"
AddRoom["text"] = "Add Room"
AddRoom.place(x=70,y=220,width=160,height=155)
AddRoom["command"] = lambda : AddRoom_command(root,user,id)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\\removeroom.png",160,125)
RemoveRoom=tk.Button(root,image=img,compound=TOP)
RemoveRoom.image = img
RemoveRoom["bg"] = "#f6f5f4"
RemoveRoom["borderwidth"] = "4px"
ft = tkFont.Font(family='Times',size=16)
RemoveRoom["font"] = ft
RemoveRoom["fg"] = "#2e3436"
RemoveRoom["justify"] = "center"
RemoveRoom["text"] = "Remove Room"
RemoveRoom.place(x=300,y=220,width=160,height=155)
RemoveRoom["command"] = lambda : RemoveRoom_command(root,id)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\preview2.jpg",160,125)
RoomPreview=tk.Button(root,image=img,compound=TOP)
RoomPreview.image=img
RoomPreview["bg"] = "#eeeeee"
RoomPreview["borderwidth"] = "4px"
ft = tkFont.Font(family='Times',size=16)
RoomPreview["font"] = ft
RoomPreview["fg"] = "#2e3436"
RoomPreview["justify"] = "center"
RoomPreview["text"] = "Room Preview"
RoomPreview.place(x=530,y=220,width=160,height=155)
RoomPreview["command"] = lambda : RoomPreview_command(root,user,'O',id)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\history.jpg",160,125)
BookingHistory=tk.Button(root,image=img,compound=TOP)
BookingHistory.image=img
BookingHistory["bg"] = "#f6f5f4"
BookingHistory["borderwidth"] = "4px"
ft = tkFont.Font(family='Times',size=16)
BookingHistory["font"] = ft
BookingHistory["fg"] = "#2e3436"
BookingHistory["justify"] = "center"
BookingHistory["text"] = "Booking History"
BookingHistory.place(x=70,y=410,width=160,height=155)
BookingHistory["command"] = lambda : BookingHistory_command(root,user,id)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\\record.png",160,125)
TenantRecord=tk.Button(root,image=img,compound=TOP)
TenantRecord.image=img
TenantRecord["bg"] = "#f6f5f4"
TenantRecord["borderwidth"] = "4px"
ft = tkFont.Font(family='Times',size=16)
TenantRecord["font"] = ft
TenantRecord["fg"] = "#2e3436"
TenantRecord["justify"] = "center"
TenantRecord["text"] = "Tenant Record"
TenantRecord.place(x=300,y=410,width=160,height=155)
TenantRecord["command"] = lambda : TenantRecord_command(root,user,id)
Divider=tk.Label(root)
Divider["bg"] = "#90ee90"
ft = tkFont.Font(family='Times',size=10)
Divider["font"] = ft
Divider["fg"] = "#333333"
Divider["justify"] = "center"
Divider["text"] = ""
Divider.place(x=3,y=180,width=744,height=3)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\Logo.png",172,100)
Logo=tk.Button(root,image=img)
Logo.image=img
Logo["justify"] = "center"
Logo.place(x=20,y=70,width=172,height=100)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\\updateroom.png",160,125)
UpdateRoom=tk.Button(root,image=img,compound=TOP)
UpdateRoom.image=img
UpdateRoom["bg"] = "#eeeeee"
UpdateRoom["borderwidth"] = "4px"
ft = tkFont.Font(family='Times',size=16)
UpdateRoom["font"] = ft
UpdateRoom["fg"] = "#2e3436"
UpdateRoom["justify"] = "center"
UpdateRoom["text"] = "Update Room"
UpdateRoom.place(x=530,y=410,width=160,height=155)
UpdateRoom["command"] = lambda : UpdateRoom_command(root,user,id)
Title=tk.Button(root)
Title["bg"] = "#47ed63"
ft = tkFont.Font(family='Times',size=25)
Title["font"] = ft
Title["fg"] = "#000000"
Title["justify"] = "center"
Title["text"] = "Aspires' Room Rental Services\n\n Welcome "+user
Title.place(x=210,y=20,width=523,height=150)
Logout=tk.Button(root)
Logout["bg"] = "#d93636"
ft = tkFont.Font(family='Times',size=16)
Logout["font"] = ft
Logout["fg"] = "#ffffff"
Logout["justify"] = "center"
Logout["text"] = "Logout"
Logout.place(x=20,y=20,width=172,height=43)
Logout["command"] = lambda : Logout_command(root)
def AddRoom_command(root,user,id):
import DatabaseConnection as DB ;
rc = DB.runQuery2("select count(r_id) from Room where owner_id = "+str(id))[0]
if rc == 0 :
import AddRoom as AR; AR.main(root,user,id,'A')
else : messagebox.showwarning("Cannot add more Rooms !", "You Already have a room ...delete it to add another !")
def RemoveRoom_command(root,id):
import DatabaseConnection as DB ;
countR = DB.runQuery2("select count(r_id) from Room where owner_id = "+str(id))[0]
if countR != 0 :
room_name = DB.runQuery2("select name from Room where owner_id = "+str(id))[0]
answer = askokcancel( title='Delete Confirmation', message='Are you sure you want to remove the Room named {} ?'.format(room_name), icon=WARNING)
if answer :
status = DB.deletion('Room','owner_id',id)
if status : messagebox.showwarning("Room Deleted", "Room '{}' Deleted Successfully".format(room_name))
else : messagebox.showerror("Failed", "Oops! something went wrong.")
else :
messagebox.showwarning("Nothing Here !", "Please add a room first !")
def RoomPreview_command(root,user,token,id):
import DatabaseConnection as DB
#countR, room_id = DB.runQuery("select count(r_id),r_id from Room where owner_id = "+str(id))[0]
countR = DB.runQuery("select count(r_id) from Room where owner_id = "+str(id))[0] #for cloud
room_id = DB.runQuery("select r_id from Room where owner_id = "+str(id))[0] #for cloud
if countR != 0 :
import RoomPreview as RP; RP.main(root,user,token,id,room_id)
else :
messagebox.showwarning("Nothing Here","No Preview available, Please add a room")
def BookingHistory_command(root,user,id):
import DatabaseConnection as DB
available = int(bool(DB.runQuery("select count(room_id) from Booking where owner_id = "+str(id))[0][0])) # for cloud
if available != 0 :
room_name = DB.runQuery2("select name from Room where owner_id = "+str(id))[0]
import OwnerHistory as OH
OH.main(root,room_name,user,id)
else :
messagebox.showwarning("Nothing Here !", "No Customer found !")
def TenantRecord_command(root,user,id):
import DatabaseConnection as DB
available = int(bool(DB.runQuery("select count(room_id) from Booking where owner_id = "+str(id))[0][0])) # for cloud
if available != 0 :
room_name = DB.runQuery2("select name from Room where owner_id = "+str(id))[0]
import TenantRecord as OH
OH.main(root,room_name,user,id)
else :
messagebox.showwarning("Nothing Here !", "No booking found !")
def UpdateRoom_command(root,user,id):
import DatabaseConnection as DB
rc = DB.runQuery2("select count(r_id) from Room where owner_id = "+str(id))[0]
if rc !=0 :
import AddRoom as AR; AR.main(root, user, id, 'U')
else :
messagebox.showwarning("Nothing Here !", "Please add a room first !")
def Logout_command(root):
answer = askokcancel( title='Logout Confirmation', message='Are you sure to logout ?', icon=WARNING)
if answer :
import Welcome as welcome
welcome.App(root)
|
"""
부녀회장이 될테야
"""
"""
이런순서로 올라가게된다.
2 7 16 30 55
1 3 6 10 15
1 2 3 4 5
"""
t = int(input())
data_len = 14
k, n = [], []
dp = [[0] * 14 for _ in range(data_len)]
for _ in range(t):
k.append(int(input()))
n.append(int(input()))
# k = 층수 , n = 호수
# 초기값 만들어놓기
for i in range(data_len):
dp[0][i] = i+1
# 선택된 층이 2층이 아니라면 바로 밑에 호실 *2 와 이전 층의 모든 호실을 더하면된다.
#
# for kk in range(1, data_len):
#
#
# for nn in range(data_len):
# sum = dp[kk-1][nn]
#
# if kk == 1:
#
"""
못품
t = int(input())
for _ in range(t):
floor = int(input()) # 층
num = int(input()) # 호
f0 = [x for x in range(1, num+1)] # 0층 리스트
for k in range(floor): # 층 수 만큼 반복
for i in range(1, num): # 1 ~ n-1까지 (인덱스로 사용)
f0[i] += f0[i-1] # 층별 각 호실의 사람 수를 변경
print(f0[-1]) # 가장 마지막 수 출력
"""
|
from django.shortcuts import render,redirect
from .models import POST
from django.contrib.auth.models import User,auth
from django.contrib import messages
# Create your views here.
def hello(request):
#Qurey Data from Model
data = POST.objects.all()
return render(request,'index.html',{'posts': data})
def page1(request):
rating = 4
return render(request,'page1.html',
{'name':'บทความท่องเที่ยวภาคกลาง'})
def createForm(request):
return render(request,'form.html' )
def addUser(request):
Username = request.POST['username']
FirstName = request.POST['firstName']
LastName = request.POST['lastName']
Email = request.POST['email']
Password = request.POST['password']
Repassword = request.POST['repassword']
if Password == Repassword :
if User.objects.filter(username = Username).exists():
messages.info(request,'Username นี้มีคนใช้แล้ว')
return redirect('/createForm')
elif User.objects.filter(email = Email).exists():
messages.info(request,'Email ของคุณนั้นซ้ำกับผู้อื่น')
return redirect('/createForm')
else:
user = User.objects.create_user(
username=Username,
password=Password,
email=Email,
first_name=FirstName,
last_name= LastName
)
user.save()
return redirect('/')
else:
messages.info(request,'รหัสผ่านไม่ตรงกัน !!')
return redirect('/createForm')
def loginForm(request):
return render(request,'login.html')
def login(request):
Username = request.POST['username']
Password = request.POST['password']
#check username password
user = auth.authenticate(username = Username , password = Password)
if user is not None:
auth.login(request,user)
return redirect('/')
else:
messages.info(request,'Username หรือ รหัสผ่านของคุณไม่ถูกต้อง !!')
return redirect('/loginForm')
def logout(request):
auth.logout(request)
return redirect('/')
|
def is_primary(num):
if num == 2 or num == 3:
return True
for i in range(2, num//2):
if num % i == 0:
return False
return True
def genrate_primary(limit):
primeMask = [False] * (limit+1)
primeNumber = []
for i in range(1, limit+1):
if i == 1:
continue
if primeMask[i] == False:
primeNumber.append(i)
count = 2
while i*count < limit+1:
primeMask[i*count] = True
count += 1
return primeNumber
num = int(input())
result = []
if is_primary(num):
result.append(num)
else:
PRIMARY = genrate_primary(num//2)
for p in PRIMARY:
if num == 0:
break
while num % p == 0:
result.append(p)
num //= p
for rt in result:
print(rt)
|
from rest_framework import serializers
class LimitOffsetType(object):
def __init__(self, offset, limit, search, filter_by="all", **kwargs):
self.offset = offset
self.limit = limit
self.filter_by = filter_by
self.search = search
def __unicode__(self):
return str(self)
def __str__(self):
return str(self).encode('utf-8')
def __getitem__(self, item):
return getattr(self, item)
class LimitOffsetSearchSerializer(serializers.Serializer):
limit = serializers.IntegerField(default=20)
offset = serializers.IntegerField()
filter_by = serializers.CharField(allow_null=True, required=False)
search = serializers.CharField(required=False, allow_blank=True)
def create(self, validated_data):
return LimitOffsetType(**validated_data)
|
import json
import boto3
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('customer')
def lambda_handler(event, context):
try:
print("yes")
res = table.get_item(Key={'email':event['email']})
if(res['Item']['password'] == event['password']):
data = res['Item']
else:
data = "0"
except:
data = "0"
return {
'statusCode': 200,
'state' : True,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'data' : data
}
|
#Que1
print("to reverse a list")
myList = [1,2,3,4,5]
print("myList: ", myList)
myList.reverse()
print("reversed list :", myList)
print("\n")
#Que2
print("extract uppercase letters from a string")
myString = "HeLLo WorLd!!"
print(myString)
print("uppercase letters are:")
for i in myString:
if(i.isupper()) == True:
print("\t", i)
print("\n")
#Que3
print("split the user input on commas and the values in a list")
myString = input("enter any string :")
myList = myString.split(',')
print("list formed is :", myList)
print("\n")
#Que4
print("to check whether the string is palindrome or not")
string = input("enter any string : ")
print(string)
x = string[-1::-1]
print(x)
if(string == x):
print("string is palindrome")
else:
print("string is not palindrome")
print("\n")
#Que5
print("deep copy and shallow copy")
import copy
list1 = [1,2,3,4,5]
list2 = copy.copy(list1)
print("original list :",list1)
print("shallow copied list :", list2)
list1.append([6,7])
print("\r")
print("AFTER MAKING CHANGES")
print("original list :", list1)
print("shallow copied list :", list2)
print("\r")
list3 = copy.deepcopy(list1)
print("AFTER MAKING CHANGES")
list1[3] = 789
print("original list :", list1)
print("deep copied list :", list3)
|
# Generated by Django 3.0.3 on 2020-03-01 09:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Livre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titre', models.CharField(max_length=50)),
('auteur', models.CharField(max_length=50)),
('resume', models.TextField(max_length=500)),
('couverture', models.ImageField(upload_to='couvertures/')),
('note', models.PositiveSmallIntegerField()),
('codeBarre', models.CharField(max_length=50)),
('isbn', models.CharField(max_length=50)),
('edition', models.CharField(max_length=50)),
],
),
]
|
# def calculater(a,b,c):
# if c=="add":
# return(a,"aur",b,"ka addition",a+b,"hai")
# elif c=="multy":
# return(a,"aur",b,"ka multiplication",a*b,"hai")
# elif c=="div":
# return(a,"aur",b,"ka dividion",a%b,"hai")
# elif c=="substract":
# return(a,"aur",b,"ka substrstion",a-b,"hai")
# print(calculater(4,5,"substract"))
# print(calculater(3,4,"div"))
# print(calculater(6,7,"multy"))
# print(calculater(2,4,"add"))
|
import numpy
import os, os.path
from tigger.cluda.kernel import render_prelude, render_template
from tigger.cluda.dtypes import ctype, cast
import tigger.cluda.dtypes as dtypes
from tigger.core.transformation import *
from tigger.core.operation import OperationRecorder
class InvalidStateError(Exception):
pass
# Computation is not ready for calling overloaded methods from derived classes.
STATE_NOT_INITIALIZED = 0
# Computation is initialized and ready for calling preparations
# or adding transformations.
STATE_INITIALIZED = 1
# Computation is fully prepared and ready to use
STATE_PREPARED = 2
class Computation:
"""
Creates a computation class and performs basic initialization for the
:py:class:`~tigger.cluda.api.Context` object ``ctx``.
Note that the computation is unusable until :py:func:`prepare`
or :py:func:`prepare_for` is called.
If ``debug`` is ``True``, a couple of additional checks will be performed in runtime
during preparation and calls to computation.
The following methods are for overriding by computations
inheriting :py:class:`Computation` class.
.. py:module:: tigger.core
.. py:method:: _set_argnames(outputs, inputs, scalars)
Special method to use by computations with variable number of arguments.
Should be called before any connections and preparations are made.
.. py:method:: _get_operation_recorder()
Returns an instance of :py:class:`~tigger.core.operation.OperationRecorder` class
which is used in :py:meth:`_construct_operations`.
.. py:method:: _get_argnames()
Must return a tuple ``(outputs, inputs, scalars)``, where each of
``outputs``, ``inputs``, ``scalars`` is a tuple of argument names used by this computation.
If this method is not overridden, :py:meth:`set_argnames` will have to be called
right after creating the computation object.
.. py:method:: _get_argvalues(argnames, basis)
Must return a dictionary with :py:class:`~tigger.core.ArrayValue` and
:py:class:`~tigger.core.ScalarValue` objects assigned to the argument names.
.. py:method:: _get_basis_for(*args, **kwds)
Must return a dictionary with basis values for the computation working with ``args``,
given optional parameters ``kwds``.
If names of positional and keyword arguments are known in advance,
it is better to use them explicitly in the signature.
.. py:method:: _construct_operations(basis, device_params)
Must fill and return the :py:class:`~tigger.core.operation.OperationRecorder`
object with actions required to execute the computation.
See the :py:class:`~tigger.core.operation.OperationRecorder` class reference
for the list of available actions.
The rest is public methods.
"""
def __init__(self, ctx, debug=False):
self._ctx = ctx
self._debug = debug
self._state = STATE_NOT_INITIALIZED
# finish initialization only if the computation has fixed argument list
if hasattr(self, '_get_argnames'):
self._argnames = self._get_argnames()
self._finish_init()
def _finish_init(self):
self._tr_tree = TransformationTree(*self._get_base_names())
self._state = STATE_INITIALIZED
def _set_argnames(self, outputs, inputs, scalars):
if self._state != STATE_NOT_INITIALIZED:
raise InvalidStateError("Argument names were already set once")
self._argnames = (tuple(outputs), tuple(inputs), tuple(scalars))
self._finish_init()
return self
def get_nested_computation(self, cls):
"""
Calls ``cls`` constructor with the same arguments and keywords
as were given to its own constructor.
"""
return cls(self._ctx, debug=self._debug)
def _get_base_names(self):
"""
Returns three lists (outs, ins, scalars) with names of base computation parameters.
"""
return self._argnames
def _get_base_values(self):
"""
Returns a dictionary with names and corresponding value objects for
base computation parameters.
"""
return self._get_argvalues(self._basis)
def _basis_needs_update(self, new_basis):
"""
Tells whether ``new_basis`` has some values differing from the current basis.
"""
for key in new_basis:
if self._basis[key] != new_basis[key]:
return True
return False
def _basis_for(self, args, kwds):
"""
Returns the basis necessary for processing given external arguments.
"""
pairs = self._tr_tree.leaf_signature()
if len(args) != len(pairs):
raise TypeError("Computation takes " + str(len(pairs)) +
" arguments (" + str(len(args)) + " given)")
# We do not need our args per se, just their properies (types and shapes).
# So we are creating mock values to propagate through transformation tree.
values = {}
for i, pair_arg in enumerate(zip(pairs, args)):
pair, arg = pair_arg
name, value = pair
if arg is None:
new_value = ArrayValue(None, None) if value.is_array else ScalarValue(None)
else:
new_value = wrap_value(arg)
if new_value.is_array != value.is_array:
raise TypeError("Incorrect type of argument " + str(i + 1))
values[name] = new_value
# FIXME: this method is not really supposed to change the object state
# First pass
self._tr_tree.propagate_to_base(values)
basis = AttrDict(self._get_basis_for(*self._tr_tree.base_values(), **kwds))
base_values = self._get_argvalues(basis)
# We cannot propagate array types back from base to leaves
# (this creates ambiguity), but we have to set scalar types to those
# set by the computation's preparation function, and rerun the basis generation.
# This will not change results if array types are derived from scalar types
# (by means of result_type(), for example), but will help set the correct leaf type
# if the type of the scalar parameter is enforced by the computation
# (for example, integer inversion value in FFT).
for name, value in base_values.items():
if not value.is_array:
values[name] = value
# Second pass
self._tr_tree.propagate_to_base(values)
return AttrDict(self._get_basis_for(*self._tr_tree.base_values(), **kwds))
def leaf_signature(self):
return self._tr_tree.leaf_signature()
def connect(self, tr, array_arg, new_array_args, new_scalar_args=None):
"""
Connects a :py:class:`~tigger.core.Transformation` instance to the computation.
After the successful connection the computation resets to teh unprepared state.
:param array_arg: name of the leaf computation parameter to connect to.
:param new_array_args: list of the names for the new leaf array parameters.
:param new_scalar_args: list of the names for the new leaf scalar parameters.
"""
if self._state != STATE_INITIALIZED:
raise InvalidStateError(
"Cannot connect transformations after the computation has been prepared")
if new_scalar_args is None:
new_scalar_args = []
self._tr_tree.connect(tr, array_arg, new_array_args, new_scalar_args)
def prepare_for(self, *args, **kwds):
"""
Prepare the computation so that it could run with ``args`` supplied to :py:meth:`__call__`.
"""
if self._state == STATE_NOT_INITIALIZED:
raise InvalidStateError("Computation is not fully initialized")
elif self._state == STATE_PREPARED:
raise InvalidStateError("Cannot prepare the same computation twice")
self._basis = self._basis_for(args, kwds)
self._leaf_signature = self.leaf_signature()
self._operations = self._construct_operations(self._basis, self._ctx.device_params)
self._operations.optimize_execution()
self._state = STATE_PREPARED
return self
def _get_operation_recorder(self):
return OperationRecorder(
self._ctx, self._tr_tree.copy(), self._basis, self._get_base_values())
def signature_str(self):
"""
Returns a string with the signature of the computation,
containing argument names, types and shapes (in case of arrays).
This is primarily a debug method.
"""
res = []
for name, value in self._tr_tree.leaf_signature():
res.append("({argtype}) {name}".format(
name=name, argtype=str(value)))
return ", ".join(res)
def __call__(self, *args, **kwds):
"""
Execute computation with given arguments.
The order and types of arguments are defined by the base computation
and connected transformations.
The signature can be also viewed by means of :py:meth:`signature_str`.
"""
if self._state != STATE_PREPARED:
raise InvalidStateError("The computation must be fully prepared before execution")
if self._debug:
new_basis = self._basis_for(args, kwds)
if self._basis_needs_update(new_basis):
raise ValueError("Given arguments require different basis")
else:
if len(kwds) > 0:
raise ValueError("Keyword arguments should be passed to prepare_for()")
if len(args) != len(self._leaf_signature):
raise TypeError("Computation takes " + str(len(self._leaf_signature)) +
" arguments (" + str(len(args)) + " given)")
# Assign arguments to names and cast scalar values
arg_dict = dict(self._operations.allocations)
for pair, arg in zip(self._leaf_signature, args):
name, value = pair
if not value.is_array:
arg = cast(value.dtype)(arg)
assert name not in arg_dict
arg_dict[name] = arg
# Call kernels with argument list based on their base arguments
for operation in self._operations.operations:
op_args = [arg_dict[name] for name in operation.leaf_argnames]
operation(*op_args)
|
# -*- coding:utf-8 -*-
""""
桶排序
"""
import numpy as np
def create_array(num):
return np.random.randint(50, size=num)
def bucket_sort(arr):
max_elm = np.max(arr)
string = str(max_elm)
lenght = len(string)
for i in range(lenght):
b = [[] for _ in range(10)]
for elem in arr:
if len(str(elem)) - 1 < i: #如何数字小于符合这一轮桶的排序的范围
b[0].append(elem)
else:
# 取整数的各个位数
list_elem = list(str(elem))
list_elem.reverse()
flag = list_elem[i]
# 按位数加入桶中
b[int(flag)].append(elem)
ls = []
for x in b:
ls.extend(x)
arr = ls
return arr
if __name__ == '__main__':
a = create_array(10)
print(a)
a = bucket_sort(a)
print(a)
|
def common_letters(word1, word2):
common = ""
for letter in word1:
if letter in word2:
common += letter
return "Common letters: " + ','.join(common)
print(common_letters("house","computers")) |
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
import webbrowser
def open_url(url):
webbrowser.open(url)
img=cv2.imread('link.png')
decoded=pyzbar.decode(img)
print(decoded)
for data in decoded:
print("DATA ", data.data)
x=data.data
string=x.decode("utf-8")
print(string)
open_url(string)
cv2.imshow("IMAGE",img)
cv2.waitKey(0)
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from datetime import datetime
from decimal import Decimal
from pytz import UTC
from webservice.webmodel.RequestParameters import RequestParameters
from webservice.webmodel.StatsComputeOptions import StatsComputeOptions
class NexusRequestObjectTornadoFree(StatsComputeOptions):
shortNamePattern = re.compile("^[a-zA-Z0-9_\-,\.]+$")
floatingPointPattern = re.compile('[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?')
def __init__(self, request_handler):
self.__log = logging.getLogger(__name__)
if request_handler is None:
raise Exception("Request handler cannot be null")
StatsComputeOptions.__init__(self)
self._dataset = self._parse_dataset(request_handler)
self._bounding_box = self._parse_bounding_box(request_handler)
self._start_time = self._parse_start_time(request_handler)
self._end_time = self._parse_end_time(request_handler)
self._nparts = self._parse_nparts(request_handler)
self._content_type = self._parse_content_type(request_handler)
def get_dataset(self):
return self._dataset
def get_bounding_box(self):
return self._bounding_box
def get_start_datetime(self):
return self._start_time
def get_end_datetime(self):
return self._end_time
def get_nparts(self):
return self._nparts
def get_content_type(self):
return self._content_type
def _parse_dataset(self, request_handler):
ds = request_handler.get_argument(RequestParameters.DATASET, None)
if ds is not None and not self.__validate_is_shortname(ds):
raise Exception("Invalid shortname")
return ds
def _parse_bounding_box(self, request_handler):
b = request_handler.get_argument("b", '')
if b:
min_lon, min_lat, max_lon, max_lat = [float(e) for e in b.split(",")]
else:
max_lat = request_handler.get_argument("maxLat", 90)
max_lat = Decimal(max_lat) if self.__validate_is_number(max_lat) else 90
min_lat = request_handler.get_argument("minLat", -90)
min_lat = Decimal(min_lat) if self.__validate_is_number(min_lat) else -90
max_lon = request_handler.get_argument("maxLon", 180)
max_lon = Decimal(max_lon) if self.__validate_is_number(max_lon) else 180
min_lon = request_handler.get_argument("minLon", -90)
min_lon = Decimal(min_lon) if self.__validate_is_number(min_lon) else -90
return min_lon, min_lat, max_lon, max_lat
def _parse_start_time(self, request_handler):
return self._parse_time(request_handler, RequestParameters.START_TIME, default=0)
def _parse_end_time(self, request_handler):
return self._parse_time(request_handler, RequestParameters.END_TIME, default=-1)
def _parse_time(self, request_handler, arg_name, default=None):
time_str = request_handler.get_argument(arg_name, default)
try:
dt = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=UTC)
except ValueError:
dt = datetime.utcfromtimestamp(int(time_str)).replace(tzinfo=UTC)
return dt
def _parse_nparts(self, request_handler):
return int(request_handler.get_argument(RequestParameters.NPARTS, 0))
def _parse_content_type(self, request_handler):
return request_handler.get_argument(RequestParameters.OUTPUT, "JSON")
def __validate_is_shortname(self, v):
if v is None or len(v) == 0:
return False
return self.shortNamePattern.match(v) is not None
def __validate_is_number(self, v):
if v is None or (type(v) == str and len(v) == 0):
return False
elif type(v) == int or type(v) == float:
return True
else:
return self.floatingPointPattern.match(v) is not None |
# coding=utf-8
#
import math
def calc_sqrt_3():
f_x = lambda x: float(math.pow(x, 2) - 3)
f_d_x = lambda x: float(2 * x)
h_x = lambda x: x - f_x(x)/f_d_x(x)
# init (2, 1)
xn = 1
xn1 = 2
round_count = 0
while True:
round_count += 1
print "round: %f, xn: %f" % (round_count, xn)
xn1 = h_x(xn)
if abs(xn - xn1) < 0.000000000001:
print xn1
break
else:
xn = xn1
pass
pass
if __name__ == '__main__':
print "hello world"
calc_sqrt_3()
|
'''
=========================================================================
(c) Danmarks Tekniske Universitet 2020
Script : Processes data extracted from .csv files
Author : Corey Kok
Project : -
========================================================================
Input : - NS
- Prices
- Storage
Output : - Arrays with the values:
- NS (Number of times LP will be solved in simulation)
- t
- s
- Time
- Pi
=========================================================================
'''
if Model['i_fulltraj'][0] == 1:
NS = len(Prices) - Model['t_horizon'][0] # Times optimisation needs to be solved
else:
NS = 1
# SETS
t = range(1, Model['t_horizon'][0] + 1) # time step array
s = Storage.index # set of storage devices
g = Generation.index # set of storage devices
k = StorPenFun.index # set of piecewise cost functions
sk = StorPenFun['s']
# PARAMETERS - cover entire optimisation horizon
Time = Prices['Time'] # Time
Pi = Prices['Pi'] # Market price
dem = Prices['dem'] # Market price
|
# -*- coding: utf-8 -*-
import os
from pypinyin import pinyin, Style
GB2312_path = '../data/GB2312.txt'
def parse(data_path, output_dir):
with open(data_path) as f:
data = [l.rstrip() for l in f.readlines() if l.rstrip() != '']
print(data)
print('data nums: {}'.format(len(data)))
data_pinyin = [pinyin(i, style=Style.TONE3, heteronym=True)[0] for i in data]
print(data_pinyin)
# 使用[1-4]表示声调,其中不加数字表示轻声 >> 使用[1-5],其中5表示轻声。
data_pinyin = [[w + '5' if w[-1] not in '1234' else w for w in i] for i in data_pinyin]
print(data_pinyin)
data_dict = {}
for i, v in enumerate(data_pinyin):
for w in v:
data_dict[w] = data_dict.get(w, '') + data[i]
print(data_dict)
print('data_dict len: {}'.format(len(data_dict)))
with open(os.path.join(output_dir, 'gb2312_pinyin_dict.txt'), 'w') as f:
f.write('\n'.join([','.join([k, data_dict[k]]) for k in sorted(data_dict.keys())]))
with open(os.path.join(output_dir, 'bg2312_pinyin_alphabet.txt'), 'w') as f:
f.write('\n'.join(sorted(data_dict.keys())))
if __name__ == '__main__':
parse(GB2312_path, '../output')
|
from flask import Flask, jsonify, request
from models import setup_db, Plant
from flask_cors import CORS
def create_app(test_config=None):
app = Flask(__name__)
setup_db(app)
CORS(app)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PATCH, DELETE, OPTIONS')
return response
@app.route('/plants')
def get_plants():
page = request.args.get('page', 1, type=int)
start = (page-1) * 10
end = start + 10
plants = Plant.query.all()
formatted_plants = [plant.format() for plant in plants]
return jsonify({
'suceess': True,
'plants': formatted_plants[start:end],
'total_plants': len(formatted_plants)
})
return app |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from collections import namedtuple
import time
import gc
Item = namedtuple("Item", ['index', 'value', 'weight'])
def solve_it(input_data):
# Modify this code to run your optimization algorithm
# parse the input
lines = input_data.split('\n')
# Get the number of the items in the problem statement
item_count = int(lines[0].split()[0])
# Get the maximum capacity of the knapsack
capacity = int(lines[0].split()[1])
# Define the list of items in the knapsack
items = []
# Parse the value and theight of all the itmes in the file
for i in range(1, item_count + 1):
line = lines[i]
parts = line.split()
items.append(Item(i - 1, int(parts[0]), int(parts[1])))
estimate = maxvalue(items, capacity)[0]
# print "Total items: {0}".format(item_count)
# print "Maximum capacity: {0}".format(capacity)
# print "Theoretical maximum value: {0:.1f}".format(estimate)
# Attempt several versions of greedy algorithms
allres = []
# # 1. Trivial greedy algorithm
# start_time = time.time() # Get the execution start time
# res = greedy_trivial(items, capacity)
# allres = [res]
# print "Greedy (trivial) ----> value: {1} ({3:.2f}%), weight: {0} run time: {2:.3f} seconds".format(res[1], res[0], time.time() - start_time, 100.0 * res[0] / estimate)
# gc.collect()
#
# # 2. Greedy algorithm that prefers items with higher value
# start_time = time.time() # Get the execution start time
# res = greedy_by_value(items, capacity)
# allres[len(allres):] = [res]
# print "Greedy (value) ------> value: {1} ({3:.2f}%), weight: {0} run time: {2:.3f} seconds".format(res[1], res[0], time.time() - start_time, 100.0 * res[0] / estimate)
# gc.collect()
#
# # 3. Greedy algorithm that prefers items with lower wight
# start_time = time.time() # Get the execution start time
# res = greedy_by_weight(items, capacity)
# allres[len(allres):] = [res]
# print "Greedy (weight) -----> value: {1} ({3:.2f}%), weight: {0} run time: {2:.3f} seconds".format(res[1], res[0], time.time() - start_time, 100.0 * res[0] / estimate)
# gc.collect()
#
# # 4. Greedy algorithm that prefers items with high value/weight
# start_time = time.time() # Get the execution start time
# res = greedy_by_density(items, capacity)
# allres[len(allres):] = [res]
# print "Greedy (density) ----> value: {1} ({3:.2f}%), weight: {0} run time: {2:.3f} seconds".format(res[1], res[0], time.time() - start_time, 100.0 * res[0] / estimate)
# gc.collect()
#
# 5. Dynamic algorithm with bottom up approach
start_time = time.time() # Get the execution start time
res = dynamic_bottom_up(items, capacity)
allres[len(allres):] = [res]
print "Dynamic (dense) -----> value: {1} ({3:.2f}%), weight: {0} run time: {2:.3f} seconds".format(res[1], res[0], time.time() - start_time, 100.0 * res[0] / estimate)
gc.collect()
# 5. Dynamic algorithm with bottom up approach
start_time = time.time() # Get the execution start time
res = branch_and_bound_deep(items, capacity)
allres[len(allres):] = [res]
print "Branch Bound (deep) -> value: {1} ({3:.2f}%), weight: {0} run time: {2:.3f} seconds".format(res[1], res[0], time.time() - start_time, 100.0 * res[0] / estimate)
gc.collect()
# Select result with the maximum value
weight = 0
value = 0
taken = [0] * len(items)
index = 0
# print "Selecting the best solution out of {0} solutions".format(len(allres))
for i, res in enumerate(allres):
if res[0] > value:
index = i
value = res[0]
taken = res[2]
# print "best soulution number: {0}".format(index)
# prepare the solution in the specified output format
output_data = str(value) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, taken))
return output_data
# define the function that computes the absolute maximum value in the knapsack
def maxvalue(items, capacity):
"""Computes the theoretical maximum value of items in the knapsack"""
# define weight and value accumulators
weight = 0
value = 0
taken = [0] * len(items)
#items = sorted(items, key=lambda x: 1.0 * x.value / x.weight, reverse=True)
sorter = sorted(enumerate(items), key=lambda x: 1.0 * x[1].value / x[1].weight, reverse=True)
sorted_items = map(lambda x: x[1], sorter)
sorted_indices = map(lambda x: x[0], sorter)
# iterate though the sorted items and compute the maximum value
i = 0
for item in sorted_items:
if weight + item.weight <= capacity:
value += item.value
weight += item.weight
taken[i] = 1
i += 1
else:
break
# add fraction of the next value in the sorted items list to compute the
# maximum attainable value
d = 1.0 * (capacity - weight) / sorted_items[i].weight
taken[i] = d
value += d * sorted_items[i].value
out_taken = [0] * len(sorted_items)
# Sort taken value coefficients into the original order
for i in xrange(0, len(items)):
out_taken[i] = taken[sorted_indices.index(i)]
# return the optimum value
return value, out_taken, sorted_items, sorted_indices, taken
def maxvalue_simple(items, capacity):
"""Computes the theoretical maximum value of items in the knapsack"""
print items
# define weight and value accumulators
weight = 0
value = 0
# iterate though the sorted items and compute the maximum value
i = 0
for item in items:
if weight + item.weight <= capacity:
value += item.value
weight += item.weight
i += 1
else:
break
# add fraction of the next value in the sorted items list to compute the
# maximum attainable value
if i < len(items):
value += (1.0 * (capacity - weight) / items[i].weight) * items[i].value
# return the optimum value
return value
def greedy_trivial(items, capacity):
"Greedy (trivial) implementation - items are taken in the order they appear in the list"
# Accumulators for item total weight and value
weight = 0
value = 0
taken = [0] * len(items)
# Iterate through the items
for item in items:
# Check if the total weight has not yet exceeded the maximum capacity
if weight + item.weight <= capacity:
# Flag the item as taken
taken[item.index] = 1
# Accumulate the value and the weight
value += item.value
weight += item.weight
return value, weight, taken
def greedy_by_value(items, capacity):
"Greedy (value) implementation - items are taken in the order increasing value"
# Sort items by their value
sitems = sorted(items, key=lambda x: x.value, reverse=True)
return greedy_trivial(sitems, capacity)
def greedy_by_weight(items, capacity):
"Greedy (weight) implementation - items are taken in the order diminishing weight"
# Sort items by their value
sitems = sorted(items, key=lambda x: x.weight, reverse=False)
return greedy_trivial(sitems, capacity)
def greedy_by_density(items, capacity):
"Greedy (density) implementation - items are taken in the order increasing value/weight"
# Sort items by their value
sitems = sorted(items, key=lambda x: 1.0 * x.value / x.weight, reverse=True)
return greedy_trivial(sitems, capacity)
def dynamic_bottom_up(items, capacity):
"""Dynamic algorithm: bottom up solution"""
# start_time = time.time() # Get the execution start time
# Allocate table with the dynamic solution results
table = [[0 for x in range(len(items) + 1)] for x in range(capacity + 1)]
# print("--- %s seconds ---" % (time.time() - start_time))
# start_time = time.time() # Get the execution start time
# Define recursive function that populates the table
def recursive(k, j, items):
# print "k = {0}\tj = {1}".format(k, j)
jj = j - 1 # index of the current item in the knapsack
res = 0 # intermediate result
# If the number of items in the knapsack is 0 - do nothing and return 0
if j == 0: return 0
# If combination of the knapsack capacity and number of items was already visited and
# added to the table, just return the pre-computed value
if table[k][j] != 0: return table[k][j]
# If the weight of current item is less then the capacity of the knapsack
if items[jj].weight <= k:
# Select the maximum value of either adding the value of the current item into the knapsack
# or not adding it
res = max(recursive(k, j - 1, items),
items[jj].value + recursive(k - items[jj].weight, j - 1, items))
else:
res = recursive(k, j - 1, items)
# Buffer up the result in the table
table[k][j] = res
return res
# Compute the maximum value of the items in the knapsack and populate the table in the
# process of computing
value = recursive(capacity, len(items), items)
# Print the table
# strb = ""
# for k in xrange(0, capacity+1):
# strb += "{0:4}:\t".format(k)
# for j in xrange (0, len(table[k])):
# strb += "{0:4}\t".format(table[k][j])
# strb += "\n"
# print strb
# Work backwards through the table and determine the indices of the items that made
# into the knapsack
# define the list of taken items
taken = [-1 for x in range(1, len(items) + 1)]
# iterate through the columns of the table (items) in reverse order
k = capacity
for j in xrange(len(items), 0, -1):
# Select the maximum value entity in the column
if table[k][j] == table[k][j - 1]:
# No change in the value, therefore the j-th item was not added
taken[j - 1] = 0
else:
# Change in the value found - the item was added to the knapsack
taken[j - 1] = 1
k -= items[j - 1].weight
# Compute the total weight of items in the knapsack
weight = 0
vlck = 0
for i, t in enumerate(taken):
weight += items[i].weight * t
# vlck += items[i].value * t
# print "Weight = {0} -> Value = {1}".format(weight, vlck)
# print("--- %s seconds ---" % (time.time() - start_time))
return value, weight, taken # return dummy
def branch_and_bound_deep(items, capacity):
"""Branch and bound algorithm with depth first search"""
# define class for branch and bound search
class Searcher:
# constructor
def __init__(self):
self.best = 0
self.taken = []
self.counter = 0
def max_value(self, items, capacity):
# define weight and value accumulators
weight = 0
value = 0
self.capacity = capacity
self.sorted_taken = [0] * len(items)
#items = sorted(items, key=lambda x: 1.0 * x.value / x.weight, reverse=True)
sorter = sorted(enumerate(items), key=lambda x: 1.0 * x[1].value / x[1].weight, reverse=True)
self.sorted_items = map(lambda x: x[1], sorter)
self.sorted_indices = map(lambda x: x[0], sorter)
# iterate though the sorted items and compute the maximum value
i = 0
for item in self.sorted_items:
if weight + item.weight <= capacity:
value += item.value
weight += item.weight
self.sorted_taken[i] = 1
i += 1
else:
break
# add fraction of the next value in the sorted items list to compute the
# maximum attainable value
d = 1.0 * (capacity - weight) / self.sorted_items[i].weight
self.sorted_taken[i] = d
value += d * self.sorted_items[i].value
self.out_taken = [0] * len(self.sorted_items)
# Sort taken value coefficients into the original order
for i in xrange(0, len(items)):
self.out_taken[i] = self.sorted_taken[self.sorted_indices.index(i)]
# return the optimum value
return value
def recompute_estimate(self, index, items, taken, capacity):
#print "-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0-0"
weight = 0
value = 0
exit_loop = False
for i in xrange(0, len(items)):
si = self.sorted_indices[i]
sindex = self.sorted_indices[index]
if si < index:
take = taken[si]
elif si > index:
take = 1
else:
take = 0
d = 0
if take == 1:
if weight + items[si].weight <= capacity:
d = 1.0
weight += items[si].weight
value += items[si].value
else:
d = 1.0 * (capacity - weight) / items[si].weight
weight += d * items[si].weight
value += d * items[si].value
exit_loop = True
#print "index = {6} i = {0} sorted i = {1} value = {2:.2f} weight = {3} take = {5} taken = {4} d = {8:.3f} sorted taken = {7}".format(i, si, value, weight, taken, take, index, self.sorted_taken, d)
if exit_loop:
break
return value
def recursive(self, items, level, taken, value, capacity, estimate):
#print "{6}: level = {0} taken = {1} value = {2} capacity = {3} estimate = {4:.2f} best = {5} taken = {7}".format(level, taken, value, capacity, estimate, self.best, self.counter, self.taken)
# increment recursive call counter
self.counter += 1
# check if any capacity is left in the knapsack
if capacity >= 0:
# update the best estimate
if self.best < value:
self.best = value
self.taken = taken[:]
# weight = 0
# for i in xrange(0, len(items)):
# weight += taken[i] * items[i].weight
# print "+++@ Updated best result: {0} with {1} (weight = {2})".format(self.best, self.taken, weight)
else:
return
if capacity == 0:
return
if level < len(items):
tl1 = taken[:]
# Proceed to the take item branch if enough capacity will be left in the knapsack after the item is taken
#print "Checking capacity if the item {1} is taken... capacity = {0}, item {1} weight = {2}, residual = {3}".format(capacity, level, items[level].weight, capacity - items[level].weight)
if capacity - items[level].weight >= 0:
#print "---> Branching: take item {0}".format(level + 1)
tl1[level] = 1
self.recursive(items, level+1, tl1, value + items[level].value, capacity - items[level].weight, estimate)
est = self.recompute_estimate(level, items, taken, self.capacity)
#print "Checking the estimate if the item {0} with value of {3} is not taken... estimate = {1:.2f}, best value = {2}".format(level, est, self.best, items[level].value)
#if est - items[level].value > self.best:
if est > self.best:
#print "---> Branching: ignore item {0}".format(level + 1)
self.recursive(items, level+1, taken, value, capacity, est)
# create the searcher
searcher = Searcher()
estimated = searcher.max_value(items, capacity)
#searcher.recursive(sorted_items, 0, [0] * len(items), 0, capacity, estimated)
searcher.recursive(items, 0, [0] * len(items), 0, capacity, estimated)
weight = 0
chvalue = 0
for i in xrange(0, len(items)):
weight += searcher.taken[i] * items[i].weight
chvalue += searcher.taken[i] * items[i].value
print "Weight = {0}, Verification value = {1} taken = {2}".format(weight, chvalue, searcher.taken)
print "Nodes explored: {0}".format(searcher.counter)
return searcher.best, weight, searcher.taken
import sys
FOLDER = "C:\\Users\\Nevalyashka\\Documents\\GitHub\\DiscreteOptimization\\knapsack\\data"
FILE = "\\ks_50_0"
SOURCE = FOLDER + "\\" + FILE
if __name__ == '__main__':
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
input_data_file = open(file_location, 'r')
input_data = ''.join(input_data_file.readlines())
input_data_file.close()
print solve_it(input_data)
else:
file_location = SOURCE
input_data_file = open(file_location, 'r')
input_data = ''.join(input_data_file.readlines())
input_data_file.close()
print solve_it(input_data)
|
import fcntl, socket, struct, dweepy, time, platform, random
import main
import schedule
import time
import clock
import json
import threading
dweet_controller = dweet_controller = 'talking_alarm_set_time'
globvar = "00:00"
def job(): #for testing purposes
print("I'm working...")
def set_time_manually(): #for testing purposes
dweepy.dweet_for(dweet_controller, {'time_hour': '18','time_minute': '45'})
def set_alarm_time(): #check for updates in alarm time via dweet
dweet = dweepy.get_latest_dweet_for(dweet_controller)
parsed_data = json.dumps(dweet)
json_object = json.loads(parsed_data)
time_hour = translatedObject = json_object[0]['content']['time_hour']
time_minute = translatedObject = json_object[0]['content']['time_minute']
s = ":"
sequence = (str(time_hour), str(time_minute))
alarm_time = s.join(sequence)
global globvar # use global variable to store time dweet
globvar = alarm_time
print globvar
return;
def trigger_alarm():
year, month, day, hour, minute, seconds = time.strftime("%Y,%m,%d,%H,%M,%s").split(',')
time_now = hour + ":" + minute
time_alarm = globvar
if time_now == globvar:
main.main()
else:
print time_now
print time_alarm
time.sleep(1)
# schedule.every(1).minutes.do(set_time_manually)
schedule.every(1).minutes.do(set_alarm_time)
threading.Timer(30.0, trigger_alarm).start()
while 1:
schedule.run_pending()
time.sleep(1)
|
import PyLongQt as pylqt
proto = pylqt.Protocols.GridProtocol()
settings = pylqt.Misc.SettingsIO.getInstance()
settings.readSettings(proto,'/home/dgratz/Documents/data112917-1426/simvars.xml')
ci = pylqt.Structures.CellInfo(0,1,cell=pylqt.Cells.DumbyCell())
#proto.grid.setCell(ci)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.