text stringlengths 8 6.05M |
|---|
""" Note, sort the input.txt file numerically before running
sort -n input.txt > tmp.txt && mv -f tmp.txt input.txt
"""
import sys
class Project(object):
def __init__(self, fh):
self.input = fh
def int_to_ip(self, data):
binary = bin(data)[2:].zfill(32)
sects = []
while len(binary) > 0:
sects.append(str(int(binary[0:8], 2)))
binary = binary[8:]
return '.'.join(sects)
def run1(self, ):
greatest = 0
for line in self.input:
parts = line.strip().split('-')
low = int(parts[0])
high = int(parts[1])
if low > greatest + 1:
return greatest + 1
self.int_to_ip(greatest + 1)
if high > greatest:
greatest = high
def run2(self, ):
greatest = 0
valid = 0
for line in self.input:
parts = line.strip().split('-')
low = int(parts[0])
high = int(parts[1])
if low > greatest + 1:
valid += low - greatest - 1
if high > greatest:
greatest = high
return valid
if __name__ == '__main__':
with open('input.txt', 'r') as f:
p = Project(f)
print "Part 1:", p.run1()
f.seek(0)
print "Part 2:", p.run2()
|
import os,sys,re
import json
import operator
import itertools
import common
from common import *
import logging
logger = logging.getLogger('logcat')
class LogcatLogLine(object):
THREADTIME_PATTERN = re.compile(r'(?P<date>\d{4}-\d{2}-\d{2})\s+(?P<time>\d{2}:\d{2}:\d{2}.\d+)\s+(?P<pid>\d+)\s+(?P<tid>\d+)\s+(?P<priority>[VDIWE])\s+(?P<tag>.*?): (?P<message>.*)$')
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
def logcat_generator(file_path):
with open(file_path) as f:
for line in f:
try:
m = LogatLogLine.THREADTIME_PATTERN.match(line)
if m:
j = json.loads(m.group('message'))
except Exception, e:
#logger.warn('Failed to convert line :%s' % (line))
continue
yield j
def logcat_parse(file_path, tag_pattern='.*'):
tag_pattern = re.compile(tag_pattern)
lines = []
with open(file_path, 'rb') as f:
for line in f:
try:
m = LogcatLogLine.THREADTIME_PATTERN.match(line)
if m and tag_pattern.match(m.group('tag')):
j = json.loads(m.group('message'))
lines.append(j)
except Exception, e:
#logger.warn('Failed to convert line :%s' % (line))
continue
return lines
def find_nearest_power_sync(logcat_power_syncs, timestamp):
nearest_ps_timestamp = logcat_power_syncs[0]['powerSyncEnd']
best_time_diff = timestamp - nearest_ps_timestamp
for ps in logcat_power_syncs:
ps_timestamp = ps[0]['powerSyncEnd']
time_diff = timesetamp - ps_timestamp
if time_diff < best_time_diff:
best_time_diff = time_diff
nearest_ps = ps
return nearest_ps
def find_logcat_edge(loglines):
edges_dbg = []
edges = []
pattern = re.compile('.*[pP]owerSync\d?End')
for l in loglines:
line_edges = []
for key in l.keys():
if pattern.match(key):
line_edges.append({key : l[key]})
edges.append(l[key])
line_edges.sort(key=lambda x: operator.itemgetter(1))
edges_dbg.extend(line_edges)
edges.sort()
if len(edges) != 1:
logger.warn('Warning: Found more than 1 power sync in logcat')
logger.warn('Returning only the first edge')
return edges[0]
def get_edge_timestamps(loglines, start_edge, logcat_lines):
logcat_edge = find_logcat_edge(logcat_lines)
logcat_time_offset = logcat_edge
battor_first_edge_timestamp = loglines[start_edge[0]].timestamp
logger.info('logcat origin: %.2f, battor origin: %.2f' % (logcat_time_offset, battor_first_edge_timestamp))
logger.info('battor start: %.2f, battor end: %.2f' % (loglines[0].timestamp, loglines[-1].timestamp))
return logcat_time_offset, battor_first_edge_timestamp
def get_wifi_rssi_values(logcat_lines, start=0, end=None):
rssi_values = []
for line in logcat_lines:
# If line between start and end
if line['timestamp'] >= start:
if not end or (end and line['timestamp'] < end):
# Then
try:
if common.ACTION_WIFI_RSSI_CHANGED == line['action']:
# RSSI CHANGED
rssi_values.append(line)
elif common.ACTION_WIFI_SCAN_RESULTS == line['action']:
# SCAN_RESULTS
rssi_values.append(line)
except Exception, e:
logger.error(e)
raise e
return rssi_values
def logcat_lines_to_experiments(loglines, start_edge, logcat_lines, names=['wifi', 'cellular'], handover=None):
experiments = []
colocated_experiments = []
logcat_time_offset, battor_first_edge_timestamp = get_edge_timestamps(loglines, start_edge, logcat_lines)
# Find all available scan results
for line in logcat_lines:
expts = {}
for name in names:
try:
expt_start = line['%sBegin' % (name)]
expt_end = line['%sEnd' % (name)]
except Exception, e:
logger.debug('Failed to find key :%s' % (str(e)))
continue
try:
start_time = ((expt_start - logcat_time_offset) / 1e3)
end_time = ((expt_end - logcat_time_offset) / 1e3)
logger.info('%s exp start: %.2f, exp end: %.2f' % (name, start_time, end_time))
start_logline = find_nearest_logline(loglines, (battor_first_edge_timestamp + start_time), 5)
start_idx = loglines.index(start_logline)
end_logline = find_nearest_logline(loglines, (battor_first_edge_timestamp + end_time), 5)
end_idx = loglines.index(end_logline)
expt_loglines = loglines[start_idx:end_idx]
except Exception, e:
logger.warn('Failed to convert edge to logline :%s' % (str(e)))
continue
try:
expt = Experiment(name, loglines[start_idx:end_idx], line)
expts[name] = expt
experiments.append(expt)
except Exception, e:
logger.warn('Failed to convert to Experiment :%s' % (str(e)))
continue
# If cellular, get handover data if available from expt_start to expt_end
if name == 'cellular' and handover:
data = []
try:
data = handover.get_data(start=expt_start, end=expt_end)
except Exception, e:
logger.warn('Failed to acquire handover data: %s' % (str(e)))
expt.handover = data
# Try to get wifi RSSI values for this experiment
if name == 'wifi':
rssi_values = []
try:
rssi_values = get_wifi_rssi_values(logcat_lines, start=expt_start - 60000, end=expt_end + 60000)
logger.debug('Found %d wifi RSSI values' % (len(rssi_values)))
except Exception, e:
logger.warn('Failed to acquire wifi RSSI data: %s' % (str(e)))
expt.wifi_rssi = rssi_values
# If expts has values for all names, then this logline has co-located experiments
if len(expts.keys()) == len(names):
colocated_experiments.append(expts.values())
return experiments, colocated_experiments
|
import tensorflow as tf
x1 = tf.constant([2, 3, 4])
x2 = tf.constant([4, 0, 1])
y = tf.add(x1, x2)
with tf.Session() as sess:
print(sess.run(y))
|
from game.items import HollowLog
from game.models.model import Tree
from game.skills import SkillTypes
class HollowTree(Tree):
name = 'Hollow Tree'
health = 1
xp = {SkillTypes.woodcutting: 82.5}
skill_requirement = {SkillTypes.woodcutting: 45}
resource = HollowLog
|
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponse , HttpResponseBadRequest,HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from linebot import LineBotApi,WebhookParser
from linebot.exceptions import InvalidSignatureError ,LineBotApiError
from linebot.models import MessageEvent ,TextSendMessage , ImageSendMessage,StickerSendMessage,LocationSendMessage,QuickReply,QuickReplyButton,MessageAction,TextMessage, PostbackEvent
from .module.func import *
from urllib.parse import parse_qsl
line_bot_api = LineBotApi(settings.LINE_CHANNEL_ACCESS_TOKEN)
parser = WebhookParser(settings.LINE_CHANNEL_SECRET)
@csrf_exempt
def callback(request):
if request.method == 'POST':
signature = request.META['HTTP_X_LINE_SIGNATURE']
body = request.body.decode('utf-8')
print(body)
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
return HttpResponseForbidden()
except LineBotApiError:
return HttpResponseBadRequest()
for event in events:
if isinstance(event, MessageEvent):
if isinstance(event.message, TextMessage):
mtext = event.message.text
if mtext == '@按鈕樣板':
sendButton(event)
elif mtext == '@購買披薩':
sendPizza(event)
elif mtext == '@圖片地圖':
sendImgmap(event)
elif mtext == '@日期時間':
sendDatetime(event)
elif mtext =="@傳送大溪豆乾活動":
sendText(event)
elif mtext =="@傳送通識活動":
sendImage(event)
elif mtext =="@傳送石門水庫熱氣球活動":
sendText2(event)
elif mtext =="@傳送貼圖":
sendStick(event)
elif mtext =="@金師獎資訊":
sendMulti(event)
elif mtext =="@聯絡方式與地址":
sendPosition(event)
elif mtext =="@快速選單":
sendQuickreply(event)
elif mtext=="@大溪景點":
sendCarousel(event)
elif mtext == "@大溪名產":
sendCarouselImg(event)
else:
output = "RRRR 快去買!!!"
line_bot_api.reply_message(event.reply_token,TextSendMessage(text = output))
if isinstance(event, PostbackEvent): #PostbackTemplateAction觸發此事件
backdata = dict(parse_qsl(event.postback.data)) #取得Postback資料
if backdata.get('action') == 'buy':
sendBack_buy(event, backdata)
return HttpResponse()
else:
return HttpResponseBadRequest()
# def callback(request):
# if request.method =='POST':
# signature = request.META['HTTP_X_LINE_SIGNATURE']
# body = request.body.decode('utf-8')
# try:
# events = parser.parse(body,signature)
# except InvalidSignatureError:
# return HttpResponseForbidden()
# except LineBotApiError:
# return HttpResponseBadRequest()
# for event in events:
# if isinstance(event , MessageEvent):
# mtext = event.message.text
# if mtext =="@傳送文字":
# sendText(event)
# elif mtext =="@傳送圖片":
# sendImage(event)
# elif mtext =="@傳送貼圖":
# sendStick(event)
# elif mtext =="@多項傳送":
# sendMulti(event)
# elif mtext =="@傳送位置":
# sendPosition(event)
# elif mtext =="@快速選單":
# sendQuickreply(event)
# else:
# output = "RRRR 我聽不懂!!!"
# line_bot_api.reply_message(event.reply_token,TextSendMessage(text = output))
# return HttpResponse()
# else:
# return HttpResponseBadRequest()# Create your views here.
|
film_type = input().lower()
rows = int(input())
cols = int(input())
total_seats = rows * cols
price_Of_Billet = 0.0
if film_type == "premiere":
price_Of_Billet = 12.0
elif film_type == "normal":
price_Of_Billet = 7.50
elif film_type == "discount":
price_Of_Billet = 5.0
money_earned = price_Of_Billet * total_seats
print("{0:.2f} leva".format(money_earned))
|
# -*- coding: utf-8 -*-
import tkinter as tk # 使用Tkinter前需要先導入
# 第1步,產生實體object,建立視窗window
window = tk.Tk()
# 第2步,給窗口的視覺化起名字
window.title('My Window')
# 第3步,設定窗口的大小(長 * 寬)
window.geometry('500x300') # 這裡的乘是小x
# 第4步,grid 放置方法
for i in range(3):
for j in range(3):
tk.Label(window, text=1).grid(row=i, column=j, padx=10, pady=10, ipadx=10, ipady=10)
# 第5步,主視窗迴圈顯示
window.mainloop()
|
from selenium import webdriver
import time
import os
driver = webdriver.Chrome()
file_path = 'file:///'+os.path.abspath("C:\\课件\\我的课件\\测试\\selenium2\\locateElement\\selenium2html\\modal.html")
driver.get(file_path)
driver.maximize_window()
#点击click
driver.find_element_by_id("show_modal").click()
time.sleep(5)
#点击 click me
ddiv = driver.find_element_by_class_name("modal-body")
ddiv.find_element_by_id("click").click()
time.sleep(5)
#关闭alert
buttons = driver.find_elements_by_tag_name("button")
buttons[0].click()
time.sleep(5)
driver.quit()
|
from surveymonkey.calls.base import Call
class Collectors(Call):
def __create_collector(self, survey_id, collector, **kwargs):
params = {
'survey_id': survey_id,
'collector': collector
}
assert collector.get('type') == 'weblink', \
"Only supported collector type for this call is 'weblink'"
return self.make_call(self.__create_collector, params, kwargs)
__create_collector.allowed_params = [
'survey_id', 'collector'
]
create_collector = __create_collector
|
# connection to database is done by this file
# has to be included in main.py before importing the views
# as we are using mongo-db as our database
# the database name should be lunchbox and collection should be same view names
# I'm guessing we are using pymongo for now
# import necessary libraries & just create a client that is global for now
from pymongo import MongoClient
client = MongoClient('127.0.0.1', 27017)
db = client['lunchbox']
|
import numpy as np
import pandas as pd
import h5py
import json
import os
os.chdir('C:/Users/llavin/Desktop/PRAS')
from HDF5_utils import load_seams,clean_tx,clean_gen,add_gen,create_gen_failure_recovery_cols,HDF5Case
#data loads
#pickle these eventually to speed up code
seams_transmission_df = load_seams('Transmission', wb="NREL-Seams Model (MISO).xlsx")
seams_generation_df = load_seams('Generation', wb="NREL-Seams Model (MISO).xlsx")
seams_load_df = load_seams('Load', wb="NREL-Seams Model (MISO).xlsx")
seams_mapping_df = load_seams('Mapping', wb="NREL-Seams Model (MISO).xlsx")
#double load at just the one bus
#print(seams_load_df.MEC.values)
#seams_load_df.MEC = 2*seams_load_df.MEC.values
#print(seams_load_df.MEC.values)
#additional gens to throw in
additional_gen = ['Solar1','Solar','MEC_33',33,100,'MISO-9',0.0,0.0,0.0,0.0]
additional_gen2 = ['Wind1','Wind','MEC_33',33,250,'MISO-9',0.0,0.0,0.0,0.0]
additional_gen3 = ['Wind2','Wind','CBPC-NIPCO_7',7,250,'MISO-9',0.0,0.0,0.0,0.0]
#and select which to use
gens_to_add = [additional_gen2]
#need also shape for solar,wind
solar_shape = [0.5,0.5,0.5,0.5,0.5,0.5,.05,.1,.2,.4,.6,.8,.9,.8,.6,.4,.3,.2,.05,0.5,0.5,0.5,0.5,0.5]*365
wind_shape = [1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.]*365
#wind_shape = [.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5,.5]*365
#print(len(wind_shape))
#clean tx
retain_cols = ['Line','From','To','FW','BW','Area From','Area To']
seams_transmission_df = clean_tx(seams_transmission_df,retain_cols)
#clean gen
seams_generation_df = clean_gen(seams_generation_df,seams_mapping_df)
#seams_generation_df = add_gen(seams_generation_df,additional_gen)
for g_list in gens_to_add:
seams_generation_df = add_gen(seams_generation_df,g_list)
seams_generation_df = create_gen_failure_recovery_cols(seams_generation_df)
#define case metadata
vgbool = True
metadata = {'pras_dataversion':'v0.5.0',
'start_timestamp':'2012-01-01T00:00:00-05:00',
'timestep_count':24,
'timestep_length':1,
'timestep_unit':'h',
'power_unit':'MW',
'energy_unit':'MWh'}
#create and export case
case = HDF5Case(seams_transmission_df,seams_generation_df,seams_load_df,seams_mapping_df,
metadata['timestep_count'],solar_shape,wind_shape,include_vg=vgbool)
case.create_all()
case.write_HDF5('perfect_vg_test_allzones.pras',**metadata) |
import gmpy2
import rsa
p = 275127860351348928173285174381581152299
q = 319576316814478949870590164193048041239
n = 87924348264132406875276140514499937145050893665602592992418171647042491658461
e = 65537
d = int(gmpy2.invert(e , (p-1) * (q-1)))
privatekey = rsa.PrivateKey(n , e , d , p , q) #根据已知参数,计算私钥
with open("flag.enc" , "rb") as f:
print(rsa.decrypt(f.read(), privatekey).decode()) #使用私钥对密文进行解密,并打印 |
import math
def _iterate_first_quartor(N):
y = x = 0
# The quartor should include the right boarder
for y in range(0, (N+1)//2):
# The quartor does not include the bottom boarder
for x in range(0, N//2):
yield (y, x)
def _rotate_coordinate(y, x, N):
new_y = x
new_x = N - 1 - y
return new_y, new_x
def rotate(matrix):
N = len(matrix)
for row in matrix:
if len(row) != N:
raise ValueError
for (y, x) in _iterate_first_quartor(N):
last_quartor_value = matrix[y][x]
# Need to rotate 4 times (one for each quartor)
for i in range(4):
y, x = _rotate_coordinate(y, x, N)
# Swap values.
matrix[y][x], last_quartor_value = last_quartor_value, matrix[y][x]
return matrix
if __name__ == '__main__':
assert rotate([]) == []
assert rotate([[1]]) == [[1]]
assert (
rotate([[1, 2],
[3, 4]]) ==
[[3, 1],
[4, 2]]
)
assert (
rotate([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]) ==
[[7, 4, 1],
[8, 5, 2],
[9, 6, 3]]
)
print('test passed...')
|
from django.apps import AppConfig
class PhishingDetectionConfig(AppConfig):
name = 'phishing_detection'
|
from preprocess_rnn import get_data
from keras.models import Sequential
from keras.layers import Bidirectional, Masking
import h5py
from keras.layers.core import Dense, Dropout
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
feature_size = 39
import numpy as np
def create_model():
model = Sequential()
model.add(Masking(mask_value=0, input_shape=(777, feature_size)))
model.add(Bidirectional(LSTM(150, return_sequences = True, dropout = 0.1, kernel_initializer='normal')))
model.add(BatchNormalization())
model.add(LSTM(100, return_sequences = True, dropout = 0.1, kernel_initializer='normal'))
model.add(BatchNormalization())
model.add(Dense(units = 100, activation = 'relu', kernel_initializer = 'normal'))
model.add(Dropout(0.2))
model.add(Dense(units = 100, activation = 'relu', kernel_initializer = 'normal'))
model.add(Dropout(0.2))
model.add(Dense(units =64, activation = 'relu', kernel_initializer = 'normal'))
print('model created')
return model
def train():
X,Y,ids,x_len, y_len = get_data()
print(np.array(X).shape)
model = create_model()
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.fit(X, Y, batch_size = 30, epochs=30, validation_split = 0.1)
score = model.evaluate(X, Y, batch_size=100)
print(score)
model.save("model.h5")
if __name__ == "__main__":
print('here we go!')
train()
|
import csv
from get_data import GetData
from lineup import Lineup
from opt import Opt
def get_credentials():
with open('./data/credentials.csv', "r") as f:
reader = csv.reader(f, delimiter="\t")
credentials = []
for line in reader:
if ',' in line[0]:
credentials.append(line[0].split(",")[1])
if credentials:
return {'user': credentials[0], 'pass': credentials[1]}
else:
return False
def opt(opt_p, budget, desired, remove, n=None):
if n:
squad = Opt.transfer_simulation(opt_p, processed_data, n, budget, desired, remove)
else:
squad = Opt.wildcard_simulation(opt_p, processed_data, budget, desired, remove)
if squad.prob.status == 1:
player, subs = squad.extract_subs()
old, new = squad.calculate_improvement()
print('\n\nSubstitution simulation complete (', opt_p, '),', sep='')
print('previous ', opt_p, ' - ', round(old, 1), '\n', 'new ', opt_p, ' - ', round(new, 1), sep='')
print('That\'s an improvement of -', round(new - old, 1))
print('\nOptimal subs:')
for p, s in zip(player, subs):
print(p['web_name'], '>', s['web_name'])
print('\nBest team/formation is as follows:')
Lineup(squad.squad, param=opt_p).print_lineup()
else:
print('Unable to find solution with specified parameters. Simulation status code:', squad.prob.status)
params = ['bonus',
'bps',
'creativity',
'dreamteam_count',
'ep_next',
'ep_this',
'event_points',
'form',
'goals_scored',
'ict_index',
'influence',
'KPI',
'points_per_game',
# 'price_change',
'selected_by_percent',
'threat',
'top_50_count',
'total_points',
'transfers_in',
'transfers_in_event',
'value_form',
'value_season',
]
opt_param = 'ep_next'
num_subs = 1
desired = ['Lundstram', 'De Bruyne', 'Maddison', 'Vardy', 'Abraham']
remove = []
creds = get_credentials()
if creds:
processed_data = GetData(creds['user'], creds['pass'], reduce=False, refresh=False).data
budget = round(processed_data.account_data['total_balance'], 1)
# transfer sim
# opt(opt_param, budget, desired, remove, num_subs)
# wildcard sim
opt(opt_param, budget, desired, remove)
ender = True
|
#from sympy.logic.utilities.dimacs import load_file,load
from os.path import join
import math
import re
sudoku_rules_path = "input"
sudoku_rules = {
4 : "sudoku-rules-4x4.txt",
9 : "sudoku-rules-9x9.txt",
16 : "sudoku-rules-16x16.txt"
}
# For SUDOKU-16, 10-16 become A-E
def letter_gen(x):
if x >= 10:
return chr(ord('A') + x - 10)
else:
return str(x)
# Converts one line of dot format (one puzzle) into DIMACS
def get_dimacs_string(line):
sudoku_string = ""
cnt = 0
sudoku_size = math.isqrt(len(line))
for tok in line:
cnt += 1
if tok.isalnum():
sudoku_string += letter_gen((cnt - 1) // sudoku_size + 1)
sudoku_string += letter_gen(cnt % sudoku_size if cnt % sudoku_size != 0 else sudoku_size)
sudoku_string += tok
sudoku_string += " 0\n"
return sudoku_string
# Gets the SUDOKU rules corresponding to the size as CNF clause
def parse_sudoku_rules(sudoku_size):
return load_dimacs_file(join(sudoku_rules_path, sudoku_rules[sudoku_size]))
# Gets the puzzles from the file as SAT CNF clauses
def parse_sudoku_puzzles(puzzles_file):
puzzles = []
all_predicates = set()
line = puzzles_file.readline()
clauses, predicates = dimacs_to_cnf(get_dimacs_string(line))
puzzles.append(clauses)
all_predicates = all_predicates.union(predicates)
puzzle_size = math.isqrt(len(line))
for line in puzzles_file.readline():
clauses, predicates = dimacs_to_cnf(get_dimacs_string(line))
puzzles.append(clauses)
all_predicates = all_predicates.union(predicates)
return puzzle_size, puzzles, all_predicates
# Converts a string in DIMACS format to a CNF as a list of sets
# Does not validate DIMACS format, assumes input is correct
def dimacs_to_cnf(dimacs_string):
clauses = []
predicates = set()
rows = dimacs_string.split('\n')
# Exclude comments or summary
exclusion_regex = re.compile('(c.*|p\s*cnf\s*(\d*)\s*(\d*))')
for row in rows:
if not exclusion_regex.match(row):
literals = row.rstrip('0').split()
clause = set()
for literal in literals:
#int_literal = int(literal)
clause.add(literal)
predicates.add(literal.lstrip('-'))
if len(clause) > 0:
clauses.append(clause)
return clauses, predicates
# Reads a DIMACS from a file into a CNF expression as a list of sets
def load_dimacs_file(filename):
f = open(filename)
content = f.read()
f.close()
return dimacs_to_cnf(content)
|
import xml.etree.cElementTree as ET
import re
import csv
import codecs
import cerberus
from unittest import TestCase
import a_audit_data
import c_create_schema
lower_case_colon = re.compile(r'^([a-z]|_)+:([a-z]|_)+')
problematic = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
OSM_PATH = "data/bengaluru_india.osm"
NODE_TAGS_PATH = "data/bi_nodes_tags.csv"
NODES_PATH = "data/bi_nodes.csv"
WAYS_PATH = "data/bi_ways.csv"
WAY_TAGS_PATH = "data/bi_ways_tags.csv"
WAY_NODES_PATH = "data/bi_ways_nodes.csv"
SCHEMA = c_create_schema.schema
node_fields = ['id', 'lat', 'lon', 'user', 'uid', 'version', 'changeset', 'timestamp']
node_tag_fields = ['id', 'key', 'value', 'type']
bi_way_fields = ['id', 'user', 'uid', 'version', 'changeset', 'timestamp']
bi_way_tag_fields = ['id', 'key', 'value', 'type']
bi_way_node_fields = ['id', 'node_id', 'position']
def shape(elem, node_attr_fields=node_fields, way_attr_fields=bi_way_fields,
problem_chars=problematic, default_tag_type='regular'):
n_attributes = {}
w_attributes = {}
tags = []
w_nodes = []
if elem.tag == 'node':
for attrib in elem.attrib:
if attrib in node_fields:
n_attributes[attrib] = elem.attrib[attrib]
for child in elem:
node_tag = {}
if lower_case_colon.match(child.attrib['k']):
node_tag['type'] = child.attrib['k'].split(':',1)[0]
node_tag['key'] = child.attrib[ 'k'].split(':',1)[1]
node_tag['id'] = elem.attrib['id']
if child.attrib['k']=="addr:street" and a_audit_data.street_audit_2( child.attrib['k']):
node_tag['value'] = a_audit_data.name_update(child.attrib['v'], a_audit_data.tomap)
else:
node_tag['value'] = child.attrib['v']
tags.append(node_tag)
elif problematic.match(child.attrib['k']):
continue
else:
node_tag['type'] = 'regular'
node_tag['key'] = child.attrib['k']
node_tag['id'] = elem.attrib['id']
if child.attrib['k']=="addr:street" and a_audit_data.street_audit_2( child.attrib['k']):
node_tag['value'] = a_audit_data.name_update(child.attrib['v'], a_audit_data.tomap)
else:
node_tag['value'] = child.attrib['v']
tags.append(node_tag)
return {'node': n_attributes, 'node_tags': tags}
elif elem.tag == 'way':
for attrib in elem.attrib:
if attrib in bi_way_fields:
w_attributes[attrib] = elem.attrib[attrib]
position = 0
for child in elem:
way_tag = {}
way_node = {}
if child.tag == 'tag':
if lower_case_colon.match(child.attrib['k']):
way_tag['type'] = child.attrib['k'].split(':',1)[0]
way_tag['key'] = child.attrib['k'].split(':',1)[1]
way_tag['id'] = elem.attrib['id']
if child.attrib['k']=="addr:street" and a_audit_data.street_audit_2( child.attrib['k']):
way_tag['value'] = a_audit_data.name_update(child.attrib['v'], a_audit_data.tomap)
else:
way_tag['value'] = child.attrib['v']
tags.append(way_tag)
elif problematic.match(child.attrib['k']):
continue
else:
way_tag['type'] = 'regular'
way_tag['key'] = child.attrib['k']
way_tag['id'] = elem.attrib['id']
if child.attrib['k']=="addr:street" and a_audit_data.street_audit_2( child.attrib['k']):
way_tag['value'] = a_audit_data.name_update(child.attrib['v'], a_audit_data.tomap)
else:
way_tag['value'] = child.attrib['v']
tags.append(way_tag)
elif child.tag == 'nd':
way_node['id'] = elem.attrib['id']
way_node['node_id'] = child.attrib['ref']
way_node['position'] = position
position += 1
w_nodes.append(way_node)
return {'way': w_attributes, 'w_nodes': w_nodes, 'way_tags': tags}
def get(osm_file, tags=('node', 'way', 'relation')):
context = ET.iterparse(osm_file, events=('start', 'end'))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def validate_ele(elem, validator, schema=SCHEMA):
if validator.validate(elem, schema) is not True:
field, errors = next(iter(validator.errors.items()))
message_string = "\nElement of type '{0}' has the following errors:\n{1}"
error_strings = (
"{0}: {1}".format(k, v if isinstance(v, str) else ", ".join(v))
for k, v in errors.items()
)
raise cerberus.ValidationError(
message_string.format(field, "\n".join(error_strings))
)
class UnicodeDictWriter(csv.DictWriter, object):
def writerow(self, row):
super(UnicodeDictWriter, self).writerow({
k: (v.encode('utf-8') if isinstance(v, str) else v) for k, v in list(row.items())
})
def writerows(self, rows):
for row in rows:
self.writerow(row)
def process(file_in, validate):
with codecs.open(NODES_PATH, 'w') as nodes_file, \
codecs.open(NODE_TAGS_PATH, 'w') as nodes_tags_file, \
codecs.open(WAYS_PATH, 'w') as ways_file, \
codecs.open(WAY_NODES_PATH, 'w') as way_nodes_file, \
codecs.open(WAY_TAGS_PATH, 'w') as way_tags_file:
nodes_writer = UnicodeDictWriter(nodes_file, node_fields)
node_tags_writer = UnicodeDictWriter(nodes_tags_file, node_tag_fields)
ways_writer = UnicodeDictWriter(ways_file, bi_way_fields)
way_nodes_writer = UnicodeDictWriter(way_nodes_file, bi_way_node_fields)
way_tags_writer = UnicodeDictWriter(way_tags_file, bi_way_tag_fields)
nodes_writer.writeheader()
node_tags_writer.writeheader()
ways_writer.writeheader()
way_nodes_writer.writeheader()
way_tags_writer.writeheader()
validator = cerberus.Validator()
for elem in get(file_in, tags=('node', 'way')):
el = shape(elem)
if el:
if validate is True:
validate_ele(el, validator)
if elem.tag == 'node':
nodes_writer.writerow(el['node'])
node_tags_writer.writerows(el['node_tags'])
elif elem.tag == 'way':
ways_writer.writerow(el['way'])
way_nodes_writer.writerows(el['w_nodes'])
way_tags_writer.writerows(el['way_tags'])
if __name__ == '__main__':
process(OSM_PATH, validate=True) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
``__hash__(self)`` 定义了 hash(obj)的行为。
"""
class MyClass(object):
def __init__(self, a):
self.a = a
def __hash__(self):
return hash(self.a)
def __eq__(self, other):
return self.a == other.a
if __name__ == "__main__":
m1 = MyClass("a")
m2 = MyClass("a")
s = set()
s.add(m1)
# set在添加一个元素时会判断是否定义了 __hash__ 方法, 以及有没有其他元素
# 与之相等。所以s.add(m2)不会将m2添加进去。
s.add(m2)
assert len(s) == 1
|
'''
Created on Nov 16, 2014
@author: Jigar
'''
from __future__ import division
import numpy
import timeit
time = -timeit.default_timer()
path=r'fv3.txt'
file1=open(path,'r')
content=file1.read()
data=content.split("\n")
data=data[:-1]
data=data[1:]
fv = []
for datas in data:
arr =eval(datas)
for i in range(0,len(arr)):
if arr[i]>1:
arr[i]=1
fv.append(numpy.array(arr))
# outputFile1=open(r"NumpyJaccardMatrix.txt","w+")
jac = numpy.ones((len(fv), len(fv)))
# jaccard = []
# for i in range(0,len(fv)):
# jaccard.append([])
for i in range(0,len(fv)-1):
for j in range(i,len(fv)):
f11=numpy.dot(fv[i],fv[j])
f=numpy.dot(fv[i],fv[i])+numpy.dot(fv[j],fv[j])-numpy.dot(fv[i],fv[j])
jacc=0.0
jacc = f11/f
jac[i][j]=jacc
numpy.savetxt("NumpyJaccard.txt",jac)
time+=timeit.default_timer()
print "Time to compute",time
# outputFile1.write(str(jacc))
# outputFile1.write('\n')
|
import re
from collections import Counter
# Using Viterbi Algorithm to find the hidden states in the given text
def viterbiAlgorithm(text):
probs = [1.0]
lasts = [0]
for i in range(1, len(text) + 1):
temp_prob_k = 0
temp_k = 0
for j in range(max(0, i - max_word_length), i):
currProb = probs[j] * word_prob(text[j:i])
if currProb > temp_prob_k:
temp_prob_k = max(temp_prob_k, currProb)
temp_k = j
prob_k, k = temp_prob_k, temp_k
probs.append(prob_k)
lasts.append(k)
#print "probK : ", temp_prob_k, "lastsK : ", temp_k
words = []
i = len(text)
while i > 0:
words.append(text[lasts[i]:i])
i = lasts[i]
words.reverse()
return words, probs[-1]
# find the frequency probability of a word from the Dictionary
def word_prob(word):
if not wordPattern.match(word.lower()) and len(word) ==1:
return 1
prob = dictionary[word.lower()] / total
return prob
def words(text):
return re.findall('[a-z]+', text.lower())
wordPattern = re.compile('[a-z]+')
# Creating a frequency dictionary of words in the given file
dictionary = Counter(words(open('Seg.txt').read()))
# Findind the length of the longest word in the dictionary
max_word_length = max(map(len, dictionary))
# Finding the total number of words in the dictionary
total = float(sum(dictionary.values()))
# The size of the longest word
def TextSegmentation(message):
applyVertibi3 = viterbiAlgorithm(message)
Vert = ""
V = applyVertibi3[0]
for x in V :
Vert = Vert + str(x) +" "
return Vert
'''
applyVertibi3 = viterbiAlgorithm('Letusmeetthisafternoon')
# applyVertibi = Viterbi.viterbi_segment('itseasyformetosplitlongruntogetherblocks')
#
# print applyVertibi
Vert = ""
V = applyVertibi3[0]
for x in V :
Vert = Vert + str(x) +" "
print Vert
'''
x = TextSegmentation('Letusmeetthisafternoon')
#print(x)
|
# all endpoints related to institution view
from flask_app import *
from db_connector import *
from common import *
from bson.json_util import dumps
import json
#Signup
@app.route('/api/v1/institution/signup', methods = [ 'POST' ] )
def signup():
if request.method == 'POST':
i_name = request.json.get('i_name')
username = request.json.get('username')
password = request.json.get('password').upper()
#print(json.loads(dumps(db['metadata'].find())))
metadata = db['metadata'].find_one()
last_iid = metadata['last_iid']
last_uid = metadata['last_uid']
iid = last_iid + 1
uid = last_uid + 1
db['metadata'].update( { 'last_iid': last_iid, 'last_uid':last_uid}, { "$set": { 'last_iid':iid, 'last_uid':uid }} )
iid = 'i' + str(iid)
uid = 'u' + str(uid)
#print(password)
i_collection = db['institutions']
u_collection = db['users']
i_collection.insert_one({ "iid": iid, "uid": uid, "i_name": i_name, "caterers": {} })
u_collection.insert_one({ "uid": uid, "username": username, "password": password, "account_type": "Institution" , "iid":iid, "name" :i_name })
resp = make_response(jsonify({"success": "created"}), 201)
resp.set_cookie('uid',value=uid, max_age=60*60*24*365*2)
resp.set_cookie('iid',value=iid, max_age=60*60*24*365*2)
resp.set_cookie('user_type',value="Institution",max_age=60*60*24*365*2)
return resp
#Add customer/canteen
@app.route('/api/v1/institution/add/<user_type>', methods = [ 'PUT' ])
def add(user_type):
if request.method == 'PUT':
if user_type == 'Customer':
for request_one in request.json:
#print(len(request.json))
username = request_one['username']
password = request_one['password'].upper()
name = request_one['name']
iid = request.cookies.get('iid')
user_collection = db['users']
metadata = db['metadata'].find_one() #CHECK
last_uid = metadata['last_uid']
uid = last_uid + 1
db['metadata'].update({'last_uid':last_uid}, { "$set": {'last_uid':uid }})
uid = 'u' + str(uid)
user_collection.insert_one({ "uid": uid, "name":name, "username": username, "password": password, "account_type": "Customer", "wallet": 5000, "iid":iid })
return jsonify ( { "success":'Created' }), 201
elif user_type == 'Canteen':
for request_one in request.json:
username = request_one['username']
password = request_one['password'].upper()
establishment_name = request_one['establishment_name']
owner = request_one['owner']
iid = request.cookies.get('iid')
metadata = db['metadata'].find_one()
last_uid = metadata['last_uid']
last_can_id = metadata['last_can_id']
uid = last_uid + 1
can_id = last_can_id + 1
db['metadata'].update({'last_uid':last_uid, 'last_can_id':last_can_id }, { "$set" :{'last_uid':uid, 'last_can_id':can_id }})
uid = 'u' + str(uid)
can_id = 'can' + str(can_id)
user_collection = db['users']
user_collection.insert_one({ "uid": uid, "username": username, "password": password, "account_type": "Canteen", "iid":iid })
can_collection = db['canteens']
can_collection.insert_one({ 'can_id': can_id, 'uid': uid, 'establishment_name': establishment_name, 'owner': owner })
return jsonify ( { "success":'created' }), 201
@app.route('/api/v1/institution/add_cat', methods = [ 'PUT' ])
def add_cat():
if request.method == 'PUT':
iid = request.cookies.get('iid')
i_collection = db['institutions']
cat_id = request.json.get('cat_id')
found = i_collection.find_one({'iid': iid })
i_collection.update( { 'iid' : iid }, { "$push": { "caterers": cat_id }} )
return jsonify ( { "success":'created' }), 201
@app.route('/api/v1/institution/view/<user_type>', methods = [ 'GET' ])
def view(user_type):
if request.method == 'GET':
if(user_type == 'Customer'):
return_list = []
iid = request.cookies.get('iid')
user_collection = db['users']
for record in user_collection.find({ 'iid':iid, "account_type":"Customer" }):
#record = dumps(record)
record_json={}
record_json["uid"] = record["uid"]
record_json["username"] = record["username"]
record_json["wallet"] = record["wallet"]
record_json["iid"] = record["iid"]
record_json["name"] = record["name"]
return_list.append(record_json)
return jsonify(return_list), 200
elif(user_type == 'Canteen'):
return_list = []
iid = request.cookies.get('iid')
user_collection = db['users']
can_collection = db['canteens']
for record in user_collection.find({ 'iid':iid, "account_type":"Canteen" }):
#record = dumps(record)
print(dumps(record))
record_json={}
record_json["uid"] = record["uid"]
uid_temp = record_json["uid"]
record2 = can_collection.find_one({ "uid": uid_temp })
record_json["can_id"] = record2["can_id"]
record_json["estabishment_name"] = record2["establishment_name"]
record_json["owner"] = record2["owner"]
return_list.append(record_json)
return jsonify(return_list), 200
elif(user_type == 'Caterer'):
return_list = []
iid = request.cookies.get('iid')
i_collection = db['institutions']
record_2 = i_collection.find_one({'iid':iid})
cat_list = record_2['caterers']
cat_collection = db['caterers']
for value in cat_list:
#record = dumps(record)
record = cat_collection.find_one({ 'cat_id':value })
record_json={}
record_json["cat_id"] = record["cat_id"]
record_json["uid"] = record["uid"]
record_json["estabishment_name"] = record["establishment_name"]
record_json["location"] = record["location"]
record_json["owner"] = record["owner"]
return_list.append(record_json)
return jsonify(return_list), 200
@app.route('/api/v1/institution/view_all_cat', methods = [ 'GET' ])
def view_all_cat():
cat_collection = db['caterers']
return_list = []
for record in cat_collection.find():
#record = dumps(record)
record_json={}
record_json["cat_id"] = record["cat_id"]
record_json["uid"] = record["uid"]
record_json["estabishment_name"] = record["establishment_name"]
record_json["location"] = record["location"]
record_json["owner"] = record["owner"]
return_list.append(record_json)
return jsonify(return_list), 200
@app.route('/api/v1/institution/update_can', methods = ['POST'] )
def update_can():
if request.method == 'POST':
establishment_name = request.json.get('establishment_name')
can_id = request.json.get('can_id')
owner = request.json.get('owner')
username = request.json.get('username')
db.canteens.update( { "can_id": can_id }, { "$set" :{ "establishment_name":establishment_name, "owner":owner } })
record=db.canteens.find_one({"can_id":can_id})
uid = record['uid']
db.users.update({ "uid": uid }, { "$set" :{ "username":username, "establishment_name":establishment_name, "owner":owner } })
return jsonify ( { "success":'updated' }), 200
@app.route('/api/v1/institution/remove_user/<user_type>',methods = ['DELETE'])
def remove_user(user_type):
if request.method == 'DELETE':
if user_type == 'Customer':
uid = request.json.get('uid')
db.users.remove( { 'uid': uid })
elif user_type == 'Canteen':
can_id = request.json.get('can_id')
record=db.canteens.find_one({"can_id":can_id})
uid = record['uid']
db.canteens.remove( { 'can_id': can_id } )
db.users.remove({ 'uid':uid })
elif user_type == 'Caterer':
cat_id = request.json.get('cat_id')
iid = request.cookies.get('iid')
record = db.institutions.find_one({ 'iid': iid })
cat_list = record['caterers']
print(cat_list)
cat_list.remove(cat_id)
db.institutions.update( { "iid":iid }, { "$set" : { "caterers": cat_list }})
return jsonify ( { "success":'deleted' }), 200
@app.route('/api/v1/institution/count', methods = ['GET'])
def count():
if request.method == 'GET':
user_type = request.args.get('user_type')
cnt = 0
for record in db.users.find({ 'account_type' : user_type }):
cnt=cnt+1
return jsonify( { "success": "return", "count": cnt } ), 200 |
'''
Created on 02.09.2018
@author: FM
'''
import unittest
import unittest.mock as mock
from test.testing_tools import mock_scandir_gen, mock_assert_msg, mock_assert_many_msg
import CLI
from CLI import detect_file_sets
mock_scandir = mock.MagicMock(name='scandir')
mock_FileSet = mock.MagicMock(name='FileSet')
@mock.patch('CLI.os.scandir', new=mock_scandir)
@mock.patch('CLI.FileSet', new=mock_FileSet)
class DetectFileSetsTests(unittest.TestCase): #TODO: this doesn't actually test whether the FileSets are really packed into the file_set_cache in the end. Only whether they're correctly created
def tearDown(self):
mock_scandir.reset_mock()
mock_FileSet.reset_mock()
def test_simple_set_alone(self):
"""The CLI should be able to recognize and create a simple file set sitting alone in the directory."""
test_files = [('test (0).jpg', True), ('test (1).jpg', True), ('test (2).jpg', True), ('test (3).jpg', True), ('test (4).jpg', True)]
mock_scandir.return_value = mock_scandir_gen(test_files)
detect_file_sets()
mock_assert_msg(
mock_FileSet.assert_called_once_with,
[('test (', ')'), ['test (0).jpg', 'test (1).jpg', 'test (2).jpg', 'test (3).jpg', 'test (4).jpg']],
"The CLI fails to find a lonely file set in a directory."""
)
def test_simple_set_dirt_files(self):
"""The CLI should be able to recognize and create a simple file set even if it's surrounded in unrelated files."""
test_files = [('TEPPYZGM.png', True), ('test (0).gif', True), ('dirt.gif', True), ('test (1).mp4', True), ('VzC.pdf', True), ('test (2).pdf', True),
('dirt.m4a', True), ('test (3).gif', True), ('test (4).m4a', True), ('test (5).jpg', True), ('test (6).m4a', True), ('test (7).mp4', True)]
mock_scandir.return_value = mock_scandir_gen(test_files)
detect_file_sets()
mock_assert_msg(
mock_FileSet.assert_called_once_with,
[('test (', ')'), ['test (0).gif', 'test (1).mp4', 'test (2).pdf', 'test (3).gif', 'test (4).m4a', 'test (5).jpg', 'test (6).m4a', 'test (7).mp4']],
"The CLI fails to find the correct files and the correct file set if there are dirt files around."
)
def test_various_sets(self):
"""The CLI should be able to recognize and create all available file sets in the directory."""
test_files = [('TEPPYZG09M.png', True), ('test (0).gif', True), ('dirt41.gif', True), ('test (1).mp4', True), ('V57zC.pdf', True), ('test (2).pdf', True),
('dirt90.m4a', True), ('test (3).gif', True), ('test (4).m4a', True), ('test (5).jpg', True), ('test (6).m4a', True), ('test (7).mp4', True)]
mock_scandir.return_value = mock_scandir_gen(test_files)
detect_file_sets()
assertion_calls = [
(mock_FileSet.assert_any_call, [('test (', ')'), ['test (0).gif', 'test (1).mp4', 'test (2).pdf', 'test (3).gif', 'test (4).m4a', 'test (5).jpg', 'test (6).m4a', 'test (7).mp4']]),
(mock_FileSet.assert_any_call, [('TEPPYZG', 'M'), ['TEPPYZG09M.png']]),
(mock_FileSet.assert_any_call, [('dirt', ''), ['dirt41.gif', 'dirt90.m4a']]),
(mock_FileSet.assert_any_call, [('V', 'zC'), ['V57zC.pdf']])
]
mock_assert_many_msg(assertion_calls, "The CLI fails to find all existent FileSets in a directory.")
def test_multi_extension_types(self):
"""The CLI should correctly identify a set's pattern even if it's files contain multiple extensions (e.g. .tar.gz)."""
test_files = [('test (0).mp4', True), ('test (1).png', True), ('test (2).gif', True), ('test (3).tar.gz', True), ('test (4).p.n.g', True), ('test (5).jp.gz', True)]
mock_scandir.return_value = mock_scandir_gen(test_files)
detect_file_sets()
mock_assert_msg(
mock_FileSet.assert_called_once_with,
[('test (', ')'), ['test (0).mp4', 'test (1).png', 'test (2).gif', 'test (3).tar.gz', 'test (4).p.n.g', 'test (5).jp.gz']],
"The CLI fails to deal with files that have a multi-extension."
)
def test_hidden_file_set(self):
"""The CLI should correctly identify a hidden set that starts with a dot."""
test_files = [('.hidden0.jpg', True), ('.hidden1.jpg', True), ('.hidden2.jpg', True), ('.hidden3.jpg', True), ('.hidden4.jpg', True)]
mock_scandir.return_value = mock_scandir_gen(test_files)
detect_file_sets()
mock_assert_msg(
mock_FileSet.assert_called_once_with,
[('.hidden', ''), ['.hidden0.jpg', '.hidden1.jpg', '.hidden2.jpg', '.hidden3.jpg', '.hidden4.jpg']],
"The CLI fails to detect and create a hidden file set (i.e. one which's pattern starts with a dot)."
)
def test_detect_remove_file_set(self):
"""The CLI should correctly identify a file set that is used for the remove operation by default and set it as a global attribute accordingly."""
test_files = [('test (0).jpg', True), ('test (1).jpg', True), ('test (2).jpg', True), ('test (3).jpg', True),
('RMVD0.jpg', True), ('RMVD1.jpg', True), ('RMVD2.jpg', True), ('RMVD3.jpg', True)]
mock_scandir.return_value = mock_scandir_gen(test_files)
detect_file_sets()
assertion_calls = [
(mock_FileSet.assert_any_call, [('test (', ')'), ['test (0).jpg', 'test (1).jpg', 'test (2).jpg', 'test (3).jpg']]),
(mock_FileSet.assert_any_call, [('RMVD', ''), ['RMVD0.jpg', 'RMVD1.jpg', 'RMVD2.jpg', 'RMVD3.jpg']])
]
mock_assert_many_msg(assertion_calls, "The CLI fails to recognize and create the two file sets.")
self.assertNotEqual(CLI.default_remove_set, None, "The CLI fails to recognize and set the default remove set after stumbling upon it.")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
#!/usr/bin/env python
PACKAGE="asr_direct_search_manager"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
# Name, Type, Reconfiguration level,
# Description,
# Default, Min, Max
gen.add("fovH", double_t, 0, "",
15.0, 0.0, 1000.0)
gen.add("fovV", double_t, 0, "",
10.0, 0.0, 1000.0)
gen.add("clearVision", double_t, 0, "",
30.0, 0.0, 1000.0)
gen.add("directSearchMode", int_t, 0,
"The mode defining which poses to take. 1: grid_manager, 2: recording_manager, 3: grid_initialisation",
1, 1, 3)
gen.add("distanceFunc", int_t, 0,
"The disctance function to use. 1: GetDistance from next_best_view (accurate, slow), 2: euclidean distance (approximative, fast)",
2, 1, 2)
gen.add("reorderPosesByNBV", bool_t, 0,
"If true the poses will be reordered by nbv, so that poses which have a higher chance to detect an object will be searched first",
True)
gen.add("reorderPosesByTSP", bool_t, 0,
"If the poses of the robot_states should be reordered with TSP (nearest_neighbour and two_opt)",
True)
gen.add("viewCenterPositionDistanceThreshold", double_t, 0, "The threshold when two positions of viewcenter_poses will be seen as approx_equale for filtering already seen viewports",
0.02, 0.0, 1000.0)
gen.add("filterMinimumNumberOfDeletedNormals", int_t, 0,
"Remove all robot_states which have not at least this number of normals deleted while the poses were recorded",
1, 1, 1000)
gen.add("filterIsPositionAllowed", bool_t, 0,
"Remove all robot_states which the robot can not reach",
True)
gen.add("concatApproxEqualsPoses", bool_t, 0,
"Concatenate robot_poses which are approx_equale to one with multiple PTU_tuples",
False)
gen.add("concatRobotPosePositionDistanceThreshold", double_t, 0, "The threshold when two positions of robot poses will be seen as approx_equale for concatenating two robot poses",
0.02, 0.0, 1000.0)
gen.add("concatRobotPoseOrientationRadDistanceThreshold", double_t, 0, "The threshold when two orientations (in rad) of robot poses will be seen as approx_equale for concatenating two robot poses",
5.0, 0.0, 1000.0)
gen.add("gridFilePath", str_t, 0, "The config generated by the grid_creator", "PATH/rsc/config.xml")
gen.add("initializedGridFilePath", str_t, 0, "The initialized grid generated by grid_init_sm in scene_exploration", "PATH/rsc/initializedGrid.xml")
gen.add("recordFilePath", str_t, 0, "The cropbox recording generated by cropbox_record in scene_exploration", "PATH/rsc/cropBoxWholeRoomRecording.xml")
exit(gen.generate(PACKAGE, "asr_direct_search_manager", "DynamicParameters")) |
from datetime import datetime, timedelta
import pymysql.cursors
from pymysql import MySQLError, converters
import lab4.rozwiazanie.nbp as nbp
# Connect to the database
conv = converters.conversions.copy()
conv[10] = str
connection = pymysql.connect(host='localhost',
user='root',
password='',
db='classicmodels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
conv=conv)
def create_table_with_exchange_rates():
start_date = '2003-1-1'
end_date = '2005-12-30'
rates = nbp.get_currency_rates_from_date_range('usd', start_date, end_date)
merged_list = [[rates[1][i], round(rates[0][i], 2)] for i in range(0, len(rates[0]))]
try:
with connection.cursor() as cursor:
sql = "CREATE TABLE IF NOT EXISTS usd_pln (date DATE UNIQUE, usd REAL DEFAULT 1 , pln REAL)"
cursor.execute(sql)
connection.commit()
with connection.cursor() as cursor:
sql = "INSERT INTO usd_pln (date, pln) VALUES (%s, %s)"
cursor.executemany(sql, merged_list)
connection.commit()
except MySQLError as e:
print('Got error {!r}, errno is {}'.format(e, e.args[0]))
def get_sales_list_from_db(start_date, end_date):
sd = datetime.strptime(start_date, "%Y-%m-%d").date()
ed = datetime.strptime(end_date, "%Y-%m-%d").date()
all_dates = [str(sd + timedelta(days=i)) for i in range((ed - sd).days)]
try:
with connection.cursor() as cursor:
sql = """SELECT o.orderDate, ROUND(SUM(od.quantityOrdered * (od.priceEach * usdpl.pln)),2) cenapl, ROUND(SUM(od.quantityOrdered * (od.priceEach * usdpl.usd) ),2) cenausd
FROM orders o
JOIN orderdetails od ON o.orderNumber = od.orderNumber
JOIN usd_pln usdpl ON o.orderDate = usdpl.date
WHERE o.orderDate BETWEEN %s AND %s
GROUP BY o.orderDate"""
cursor.execute(sql, (start_date, end_date))
rows = cursor.fetchall()
result = [[], [], []]
for row in rows:
result[0].append(row['orderDate'])
result[1].append(row['cenausd'])
result[2].append(row['cenapl'])
for i in range(len(all_dates)):
if i >= len(result[0]) or all_dates[i] != result[0][i]:
result[0].insert(i, all_dates[i])
result[1].insert(i, 0)
result[2].insert(i, 0)
return result
except MySQLError as e:
print('Got error {!r}, errno is {}'.format(e, e.args[0]))
|
from __future__ import division
import os
import numpy as np
import scipy.io as sio
from imageio import imread
import torch
import torch.utils.data as data
from datasets import pms_transforms
from . import util
np.random.seed(0)
class UpsDiLiGenTDataset(data.Dataset):
def __init__(self, args, split='train'):
self.root = os.path.join(args.bm_dir)
self.split = split
self.args = args
self.objs = util.read_list(os.path.join(self.root, 'objects.txt'), sort=False)
self.names = util.read_list(os.path.join(self.root, 'names.txt'), sort=False)
self.l_dir = util.light_source_directions()
print('[%s Data] \t%d objs %d lights. Root: %s' %
(split, len(self.objs), len(self.names), self.root))
self.ints = {}
ints_name = 'light_intensities.txt'
print('Files for intensity: %s' % (ints_name))
for obj in self.objs:
self.ints[obj] = np.genfromtxt(os.path.join(self.root, obj, ints_name))
def _get_mask(self, obj):
mask = imread(os.path.join(self.root, obj, 'mask.png'))
if mask.ndim > 2: mask = mask[:,:,0]
mask = mask.reshape(mask.shape[0], mask.shape[1], 1)
return mask / 255.0
def __getitem__(self, index):
np.random.seed(index)
obj = self.objs[index]
select_idx = range(len(self.names))
img_list = [os.path.join(self.root, obj, self.names[i]) for i in select_idx]
ints = [np.diag(1 / self.ints[obj][i]) for i in select_idx]
dirs = self.l_dir[select_idx]
normal_path = os.path.join(self.root, obj, 'Normal_gt.mat')
normal = sio.loadmat(normal_path)
normal = normal['Normal_gt']
imgs = []
for idx, img_name in enumerate(img_list):
img = imread(img_name).astype(np.float32) / 255.0
if not self.args.int_aug:
img = np.dot(img, ints[idx])
imgs.append(img)
img = np.concatenate(imgs, 2)
mask = self._get_mask(obj)
if self.args.test_resc:
img, normal = pms_transforms.rescale(img, normal, [self.args.test_h, self.args.test_w])
mask = pms_transforms.rescale_single(mask, [self.args.test_h, self.args.test_w], 0)
img = img * mask.repeat(img.shape[2], 2)
normal = pms_transforms.normalize_to_unit_len(normal, dim=2)
normal = normal * mask.repeat(3, 2)
item = {'normal': normal, 'img': img, 'mask': mask}
downsample = 4
for k in item.keys():
item[k] = pms_transforms.imgsize_to_factor_of_k(item[k], downsample)
proxys = pms_transforms.get_proxy_features(self.args, normal, dirs)
for k in proxys:
item[k] = proxys[k]
for k in item.keys():
item[k] = pms_transforms.array_to_tensor(item[k])
item['dirs'] = torch.from_numpy(dirs).view(-1, 1, 1).float()
item['ints'] = torch.from_numpy(self.ints[obj][select_idx]).view(-1, 1, 1).float()
item['obj'] = obj
item['path'] = os.path.join(self.root, obj)
return item
def __len__(self):
return len(self.objs)
|
myset = {"apple", "banana", "cherry"}
print(myset)
for x in myset:
print(x)
myset.add("orange")
print(myset)
myset.update(["grapes","mango"])
print(myset)
myset.remove("banana")
#myset.remove("banana1") #KeyError: 'banana1'
print(myset)
myset.discard("cherry")
myset.discard("cherry1")
print(myset)
myset.pop()
print(myset)
#myset.clear()
#del myset #name 'myset' is not definedx
print(myset)
myset1 = set(("apple", "banana", "cherry")) # note the double round-brackets
print(myset1)
print("copy to another set")
myset2 = myset1.copy()
print(myset2)
|
# -*- coding: UTF-8 -*-
# Copyright 2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
A user interface for Lino applications that uses FaceBooks React JS framework.
.. autosummary::
:toctree:
views
renderer
models
"""
from lino.api.ad import Plugin
class Plugin(Plugin):
# ui_label = _("React")
ui_handle_attr_name = 'react_handle'
needs_plugins = ['lino.modlib.jinja', 'lino.modlib.memo']
disables_plugins = ['tinymce', 'extensible']
url_prefix = 'react'
media_name = 'react'
# media_root = None
# media_base_url = "http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/"
def on_ui_init(self, kernel):
from .renderer import Renderer
self.renderer = Renderer(self)
# ui.bs3_renderer = self.renderer
kernel.extjs_renderer = self.renderer
def get_patterns(self):
from django.conf.urls import url
from django.urls import path
from . import views
rx = '^'
self.renderer.build_site_cache()
urls = [
url(rx + r'$', views.Index.as_view()),
url(rx + r'user/settings', views.UserSettings.as_view()),
url(rx + r'auth$', views.Authenticate.as_view()),
url(rx + r"null/", views.Null.as_view()),
url(rx + r'api/main_html$', views.MainHtml.as_view()),
path('dashboard/<int:index>', views.DashboardItem.as_view()),
# To be fased out
url(rx + r'restful/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.ApiList.as_view()),
url(rx + r'restful/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>.+)$',
views.ApiElement.as_view()),
# From extjs
url(rx + r'api/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.ApiList.as_view()),
url(rx + r'api/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>[^/]+)$',
views.ApiElement.as_view()),
url(rx + r'api/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>[^/]+)/(?P<field>\w+)/suggestions$',
views.Suggestions.as_view()),
url(rx + r'choices/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.Choices.as_view()),
url(rx + r'choices/(?P<app_label>\w+)/(?P<actor>\w+)/'
'(?P<field>\w+)$',
views.Choices.as_view()),
url(rx + r'apchoices/(?P<app_label>\w+)/(?P<actor>\w+)/'
'(?P<an>\w+)/(?P<field>\w+)$',
views.ActionParamChoices.as_view()),
# For generating views
# url(rx + r'callbacks/(?P<thread_id>[\-0-9a-zA-Z]+)/'
# '(?P<button_id>\w+)$',
# views.Callbacks.as_view()),
#
url(rx+ r'choicelists/',
views.ChoiceListModel.as_view()),
]
return urls
def get_detail_url(self, ar, actor, pk, *args, **kw):
return self.build_plain_url(
"#",
"api",
actor.actor_id.replace(".", "/"),
str(pk), *args, **kw)
def get_used_libs(self, html=False):
if html is not None:
yield ("React", '16.6', "https://reactjs.org/")
|
import unittest
from PageObjModelDemo.tests.googleSearchTest import GoogleSearch
from PageObjModelDemo.tests.newLogin import NewLoginTests
from PageObjModelDemo.tests.loginTests import LoginTests
from PageObjModelDemo.Utilities.Utils import Utils
testCase1 = unittest.TestLoader().loadTestsFromTestCase(GoogleSearch)
testCase2 = unittest.TestLoader().loadTestsFromTestCase(NewLoginTests)
testCase3 = unittest.TestLoader().loadTestsFromTestCase(LoginTests)
smokeTest = unittest.TestSuite([testCase1, testCase2])
regressionTest = unittest.TestSuite([testCase1, testCase2, testCase3])
u = Utils()
config = u.propertiesFileReader("../Configuration/config.properties")
suiteToRun = config["suiteName"]
# unittest.TextTestRunner().run(regressionTest)
if suiteToRun == 'smoke':
print("Running Smoke Suite")
unittest.TextTestRunner().run(smokeTest)
if suiteToRun == "regression":
print("Running Regression Suite")
unittest.TextTestRunner().run(regressionTest)
|
"""Модуль юнит-теста. Проверяет код ответа фласка, проверяет тестовую страницу."""
import unittest
import os
import sys
import inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(os.path.dirname(current_dir))
sys.path.insert(0, parent_dir)
import web_view as tested_app
class FlaskAppTests(unittest.TestCase):
"""Класс юнит-теста."""
def setUp(self):
"""Инициализация."""
tested_app.app.config['TESTING'] = True
self.app = tested_app.app.test_client()
def test_get_hello_endpoint(self):
"""Проверка на доступность тестовой страницы."""
resp = self.app.get('/unit_test')
self.assertEqual(resp.data, b'Hello World!')
def test_post_hello_endpoint(self):
"""Проверка на отклик - должен выдать код 200 у главной страницы, при успешном запуске
Фласка."""
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
"""
Flatten
¯¯¯¯¯¯¯
This function can be used to flatten a message, whose elements are deeply nested in groups.
"""
import json
import collections
def flatten(d, parent_key='', sep='_'):
"""
source: https://stackoverflow.com/a/6027615
:param d: The dict, representing the json message
:param parent_key: Auxiliary accumulator
:param sep: A string that is used for separating the accumulated key
:return: a flattened dict
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def handle(req):
""" Filters out all elements, that are referenced in 'keys'
Args:
req (str): request body
"""
msg = json.loads(req)
data = flatten(msg.get('data'))
msg['data'] = data
return json.dumps(msg)
|
from PIL import Image
from tools.image import sliding_window, draw_red_square, create_dump_folder_for_images, convert_image_to_array, get_percentage_of_white
from tools.classifier import get_trained_classifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
import sys
import pyttsx3
def main():
arguments = sys.argv[1:]
"""
--- Help option
"""
if "-h" in arguments or "--help" in arguments:
print(f"""
This program uses machine learning methods to predict written letters of different fonts, sizes, rotations, etc.
Authors: johannkv, hmrydber, paaledwl
Usage: python ocr.py [METHOD] [--FLAGS]
Available commands: \n
svc: Use SVC.
ann: Use ANN.
knn: Use KNN.\n
--help / -h: Display this text. \n
--no-save: Do not pickle and save trained classifier. \n
--train: Ignore existing pickles and retrain classifier. \n
--use-tts: Uses text-to-speech to read the perceived text. \n
--no-dump: Do not save output image to file. \n
--image: File name of an image in detection-images which will be used for the prediction.
""")
return
"""
--- End help option
"""
### SVC classification ###
def SVC_training_method(x_training, x_testing, y_training, y_testing):
classifier_SVC = SVC(gamma="scale", verbose=False, probability=False)
classifier_SVC.fit(x_training, y_training)
print(f"\n\nUsing SVC algorithm:\nClassifying: {y_training[0]} and got {classifier_SVC.predict([x_training[0]])}\n")
return classifier_SVC
### K-nearest neighbors classification ###
def KNN_training_method(x_training, x_testing, y_training, y_testing):
classifier_KN = KNeighborsClassifier(n_neighbors=6, weights="distance")
classifier_KN.fit(x_training, y_training)
print(f"\n\nUsing K-nearest neighbor algorithm:\nClassifying: {y_training[0]} and got {classifier_KN.predict([x_training[0]])}\n")
return classifier_KN
### ANN classification ###
def ANN_training_method(x_training, x_testing, y_training, y_testing):
classifier_ANN = MLPClassifier(solver="adam", alpha=0.0001, learning_rate_init=0.001, max_iter=20000, activation="logistic", learning_rate="adaptive")
classifier_ANN.fit(x_training, y_training)
print(f"\n\nUsing neural network algorithm:\nClassifying: {y_training[0]} and got {classifier_ANN.predict([x_training[0]])}\n")
return classifier_ANN
# Testing with different classifiers
if not arguments:
print("No method specified from command line, using ANN as default")
check_windows_in_image_with_classifier(classifier = get_trained_classifier("ann.pkl", ANN_training_method, True, True))
elif arguments[0] == "svc":
print("Using SVC")
check_windows_in_image_with_classifier(classifier = get_trained_classifier("svc.pkl", SVC_training_method, True, True))
elif arguments[0] == "knn":
print("Using KNN")
check_windows_in_image_with_classifier(classifier = get_trained_classifier("knn.pkl", KNN_training_method, True, True))
elif arguments[0] == "ann":
print("Using ANN")
check_windows_in_image_with_classifier(classifier = get_trained_classifier("ann.pkl", ANN_training_method, True, True))
else:
print("Did not recognize method specified from command line, using ANN as default")
check_windows_in_image_with_classifier(classifier = get_trained_classifier("ann.pkl", ANN_training_method, True, True))
# Scan the target image for areas in close vicinity of the current window
# If a better candidate is found, i.e. less white parts in the window, return it along with its position and white percentage.
def scan_image_for_area_with_less_white(x, y, image, white_percentage = 1):
best_white = white_percentage
best_image = None
image_coordinates = None
for x1 in range(x - 15, x + 15):
for y1 in range(y - 15, y + 15):
candidate = image.crop([x1, y1, x1 + 20, y1 + 20])
white_in_candidate = get_percentage_of_white(candidate)
if white_in_candidate < best_white:
best_white = white_in_candidate
best_image = candidate
image_coordinates = (x1, y1)
return best_image, best_white, image_coordinates
"""
This is a cache of squares already checked.
Whenever a new window is checked, after retrieving the best candidate in a square around the window,
it is checked for previous existence in the cache.
If the coordinates for this new candidate is inside another previous candidate, the prediction is added to a dictionary of possibilites for a square.
This can be utilized such that red squares only are drawn at the end, once for each most probable letter and that the printed text only
containes one of each character given a square
"""
def update_window_cache(window_cache, candidate_coords, prediction):
updated = False
# Check for matching coords in the already existing coords, and update if match in range
for (x1, y1) in window_cache.keys():
between_x = candidate_coords[0] > (x1 - 15) and candidate_coords[0] < (x1 + 20)
between_y = candidate_coords[1] > (y1 - 15) and candidate_coords[1] < (y1 + 20)
if between_x and between_y:
score_dict = window_cache[(x1, y1)]
if prediction in score_dict:
score_dict[prediction] += 1
else:
score_dict[prediction] = 1
window_cache[(x1, y1)] = score_dict
updated = True
break
# If no match was found, create a new entry with the given prediction
if not updated:
window_cache[candidate_coords] = {prediction: 1}
return window_cache
def check_windows_in_image_with_classifier(classifier, image_path = "./dataset/detection-images/detection-2.jpg"):
# Override default image path if specified in arguments
if "--image" in sys.argv[1:]:
index_of_file_path = sys.argv.index("--image")
index_of_image_name = index_of_file_path + 1
if index_of_image_name > (len(sys.argv) -1):
print("Please specify the name of an image in the 'dataset/detection-images/'-folder when using --image.")
print("Stopping due to error above...")
return
image_path = f"./dataset/detection-images/{sys.argv[index_of_image_name]}"
print(f"Running OCR on {image_path}")
try:
img = Image.open(image_path)
except FileNotFoundError:
print("\nERROR!")
print(f"{image_path} did not match any files in the 'dataset/detection-images/'-folder")
return
imgCopy = img.convert(mode = "RGB")
winHeight = 20
winWidth = 20
string = ""
checked_squares = {}
for (x, y, window) in sliding_window(img, stepSize = 8, windowSize=(winHeight, winWidth)):
white_percentage = get_percentage_of_white(window)
# If more than 70 percent of the image is white, it is highly probable that the classifier will be incorrect
# and that a better candidate probably will be found by the sliding window technique.
if white_percentage > 0.7:
continue
# Check for other candidates in the vicinity of the window
best_candidate, best_white, best_cand_coords = scan_image_for_area_with_less_white(x, y, img, white_percentage)
# If a better candidate is found set this to be the selected window crop.
if best_candidate:
window = best_candidate
# If image has passed criterias, prepare it for prediction
img_array = convert_image_to_array(window, use_hog = True, expand_inverted = False)
# Skip empty images that passed previous tests.
if len(img_array) == 0:
continue
# Predict based on the current window
predicted = classifier.predict(img_array.reshape(1, -1))
# If no better candidate was found, use x,y
best_cand_coords = best_cand_coords if best_cand_coords else (x, y)
# Init cache if empty
if len(checked_squares.keys()) == 0:
checked_squares[best_cand_coords] = {predicted[0]: 1}
else:
checked_squares = update_window_cache(window_cache = checked_squares, candidate_coords = best_cand_coords, prediction = predicted[0])
"""
Sort through the cache and construct a sentence of the letters found.
If, for a given area, more letters have been suggested, use the letter predicted most times.
"""
cache_prediction = ""
for predictions in checked_squares.values():
most_probable_prediction = max(predictions.keys(), key=lambda key: predictions[key])
cache_prediction += most_probable_prediction
print(f"Most probable single solution: {cache_prediction}")
# For every box registered with a prediction, draw a red square.
for (x1, y1) in checked_squares.keys():
imgCopy = draw_red_square(x = x1, y = y1, target_image = imgCopy)
# Text-to-speech, a funny addition for entertainment purposes.
if len(sys.argv) > 1 and "--use-tts" in sys.argv[1:]:
try:
tts_engine = pyttsx3.init()
tts_engine.setProperty('rate', 20)
tts_engine.say(cache_prediction)
tts_engine.runAndWait()
except OSError:
print("Failed to access local text-to-speech method for this device.")
# Save output image to dump folder
create_dump_folder_for_images()
if not "--no-dump" in sys.argv[1:]:
img_name = image_path.split("/")[-1].split(".")[0]
imgCopy.save(f"./dump/output_{img_name}.png", "PNG")
imgCopy.show()
print("\nIf the result image did not automatically open, it can be found in the dump folder.")
if __name__ == "__main__":
main()
|
from django.core.urlresolvers import reverse
from django.core.exceptions import MultipleObjectsReturned
from django.utils.safestring import mark_safe
from django.http import HttpResponse
def export_csv(modeladmin, request, queryset):
import csv
from django.utils.encoding import smart_str
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=mymodel.csv'
writer = csv.writer(response, csv.excel)
response.write(u'\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)
raw_field_names = queryset.model._meta.fields
field_names = [f.name for f in raw_field_names]
writer.writerow(field_names)
for obj in queryset:
field_names = [getattr(obj, f.name) for f in raw_field_names]
writer.writerow([unicode(f).encode('utf8') for f in field_names])
return response
def add_link_field(target_model=None, field='', app='', field_name='link',
link_text=unicode, short_description=None):
"""
decorator that automatically links to a model instance in the admin;
inspired by http://stackoverflow.com/questions/9919780/how-do-i-add-a-link-from-the-django-admin-page-of-one-object-
to-the-admin-page-o
:param target_model: modelname.lower or model
:param field: fieldname
:param app: appname
:param field_name: resulting field name
:param link_text: callback to link text function
:param short_description: list header
:return:
"""
def add_link(cls):
reverse_name = target_model or cls.model.__name__.lower()
def link(self, instance):
app_name = app or instance._meta.app_label
reverse_path = "admin:%s_%s_change" % (app_name, reverse_name)
link_obj = getattr(instance, field, None) or instance
# manyrelatedmanager with one result?
if link_obj.__class__.__name__ == "RelatedManager":
try:
link_obj = link_obj.get()
except MultipleObjectsReturned:
return u"multiple, can't link"
except link_obj.model.DoesNotExist:
return u""
url = reverse(reverse_path, args = (link_obj.id,))
return mark_safe(u"<a href='%s'>%s</a>" % (url, link_text(link_obj)))
link.allow_tags = True
link.short_description = short_description or (reverse_name)
setattr(cls, field_name, link)
cls.readonly_fields = list(getattr(cls, 'readonly_fields', [])) + \
[field_name]
return cls
return add_link |
# coding: utf-8
# ##A. match_ends
# Given a list of strings, return the count of the number of strings where the string length is 2 or more and the first and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
# In[22]:
def match_ends(words):
count=0
for i in words:
if len(i)>=2 and i[0]==i[len(i)-1]:
count=count+1
return count
# In[31]:
list=["letter","abcbca","test","winter","xanadu","xax"]
print match_ends(list)
# ##B. front_x
# Given a list of strings, return a list with the strings in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them before combining them.
# In[35]:
def front_x(words):
words_in_X=[]
other_words=[]
for i in words:
if i[0]=="x" or i[0]=="X":
words_in_X.append(i)
else:
other_words.append(i)
words_in_X.sort()
other_words.sort()
return words_in_X+ other_words
# In[36]:
print front_x(list)
# ##C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
#
# In[47]:
def key(tuple):
return tuple[len(tuple)-1]
def sort_last(tuples):
sort_tuple =sorted(tuples, key=lambda tuple: tuple[len(tuple)-1] )
return sort_tuple
# In[48]:
list2= [(1, 7), (1, 3), (3, 4, 5), (2, 2)]
print sort_last(list2)
# ##Testing the A,B and C fuctions :
# In[49]:
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
main()
# ##D. remove_adjacent
# Given a list of numbers, return a list where all adjacent == elements have been reduced to a single element, so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or modify the passed in list.
# In[53]:
def remove_adjacent(nums):
new_nums=[]
for i in nums:
if i not in new_nums:
new_nums.append(i)
return new_nums
# In[54]:
list3=[1,1,1,2,3,3]
print remove_adjacent(list3)
# ##E. linear_merge
# Given two lists sorted in increasing order, create and return a merged list of all the elements in sorted order. You may modify the passed in lists. Ideally, the solution should work in "linear" time, making a single pass of both lists.
#
# Note: the solution above is kind of cute, but unforunately list.pop(0) is not constant time with the standard python list implementation, so the above is not strictly linear time. An alternate approach uses pop(-1) to remove the endmost elements from each list, building a solution list which is backwards. Then use reversed() to put the result back in the correct order. That solution works in linear time, but is more ugly.
#
# In[68]:
def linear_merge(list1, list2):
for i in list1:
list2.append(i)
list2.sort()
return list2
# In[66]:
list4=["abc","zer","diu"]
list5=["abc","kjt"]
print linear_merge(list4,list5)
# ##Testing the D and E functions
# In[69]:
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
main()
# In[ ]:
|
#!/usr/bin/python
# -*- coding: utf8 -*-
"""
AstroRobot
Exceptions
"""
from __future__ import unicode_literals
class InitError(Exception):
"""
A class to represent incorrect setting up of your situation
"""
pass
class DownloadingError(Exception):
"""
When a download / http fetch fails.
"""
pass |
import cStringIO, gzip
''' Helper function to gzip JSON data (used in data API views)'''
def gzip_data(json):
# GZip all requests for lighter bandwidth footprint
gzip_buffer = cStringIO.StringIO()
gzip_file = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=gzip_buffer)
gzip_file.write(json)
gzip_file.close()
return gzip_buffer.getvalue()
|
'''
author: juzicode
address: www.juzicode.com
公众号: juzicode/桔子code
date: 2020.6.2
'''
print('\n')
print('-----欢迎来到www.juzicode.com')
print('-----公众号: juzicode/桔子code\n')
print('list类型例程:')
print('\nlist添加元素-extend:')
l1 = [1,2,3]
print('l1:',l1)
l1.append([4,5,6])
print('l1经过append后:',l1)
l2 = [1,2,3]
print('l2:',l2)
l2.extend([4,5,6])
print('l2经过extend后:',l2)
|
#coding=utf-8
#1. Study how to get text by attrs
# <span class="body"> xxx </span> ==> tag.find(attrs="body")
#<div class="content"> ==> tag.find(attrs="content").get_text()[1:]
# <div id="qiushi_tag_118914192" class="article block untagged mb15"> ==> soup.find_all(attrs="article block untagged mb15"), tag['id']
#2. Study to use raw_input control
import urllib2
from bs4 import BeautifulSoup
def getHtml(url):
header = {"User-Agent":'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.101 Safari/537.36'}
request = urllib2.Request(url,headers=header) #init user request with url and headers
response = urllib2.urlopen(request) #open url
text = response.read()
return text
#get url from source page
def get_tag_html(html):
soup = BeautifulSoup(html,'lxml')
all_tag = soup.find_all(attrs="article block untagged mb15")
Num = 1
for tag in all_tag:
#Get article
full_content = tag.find(attrs="content").get_text()[1:]
if len(full_content) > 50:
print str(Num) + ": " + full_content[:50]
print full_content[51:]
else:
print str(Num) + ": " + full_content
#Get comments
href = 'http://www.qiushibaike.com/article/' + tag['id'].strip()[-9:]
commpage = getHtml(href)
commentFloor =1
soup = BeautifulSoup(commpage, 'lxml')
for comments in soup.find_all(attrs="body"): #
print " ",commentFloor," reply ",comments.get_text()
commentFloor +=1
Num +=1
articleUrl = "http://www.qiushibaike.com/textnew/page/%d"
commentUrl = "http://www.qiushibaike.com/article/%s"
page = 0
while True:
raw = raw_input("Print Enter or exit,pls make your choice:")
if raw == "exit":
break
page += 1
html = getHtml(articleUrl % page)
get_tag_html(html) |
"""
thing module
"""
class Thing(object):
"""
Thing
"""
def __init__(self):
pass
|
# Django
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.urls import reverse
def login_required(f):
def wrap(self, request, *args, **kwargs):
if not request.user:
return HttpResponseRedirect(reverse("login"))
return f(self, request, *args, **kwargs)
return wrap
def login_required_json(f):
def wrap(self, request, *args, **kwargs):
if not request.user:
return JsonResponse({
"status" : "unauthorized"
}, status=401)
return f(self, request, *args, **kwargs)
return wrap |
# BUCKET-SORT(A)
# Sorts the array A containing values that are distributed over the interval [0, 1) given by some given uniform distribution
# X. The interval [0, 1) is divided into n-equal sized buckets (or subintervals). Since the inputs are unformaly distributed,
# bucket sizes should be small. We then simply sort each bucket (using a stable sorting algorithm)
# Analysis:
# Time Complexity: O(n)
# Chapter 8.4, Page 201
from algo_crate.sorting.quicksort import quicksort
from math import floor
def bucket_sort(a):
n = len(a)
b = [[]] * n
for i in range(n):
j = floor(n * a[i])
b[j].append(a[i])
c = []
for x in b:
quicksort(x, 0, len(x) - 1)
c += x
return c
|
'''
Permutations without Dups: Write a method to compute all permutations of a string
of unique characters.
'''
def getPerm(str):
if str is None or len(str) == 0:
return None
return getPermHelper(str)
def getPermHelper(str):
if len(str) == 0:
return [""]
firstChar = str[0]
remainingPerm = getPermHelper(str[1:])
results = []
for str in remainingPerm:
for index in range(0, len(str)+1):
results.append(str[:index] + firstChar + str[index:])
return results + remainingPerm
if __name__ == '__main__':
print(getPerm("sb"))
print(getPerm("sbc")) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: Tangxiaocu
def talk_with_daddy():
def main():
who = '糖小醋的老妈 '
good_description = "西双版纳大白菜"
is_cheap = False
good_price = 2 #每降低一元,多买一斤
reasonable_price = 5
buy_amount = 2 #每降低一元,多买一斤
print '%s上街看到了%s, 卖 %d 元/斤' % (who, good_description, good_price)
if good_price <= reasonable_price:
print '她认为便宜'
is_cheap = True
#解决老妈最终买几斤的问题
#5-2 4-3 3-4 2-4
buy_amount = (reasonable_price - good_price)+2
if buy_amount >= 4:
print '她买了 4 斤'
else :
print '她买了 %d 斤' % (buy_amount)
else:
print '她认为贵了'
is_cheap = False
print '她并没有买,扬长而去'
#show time
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
"""Variable Scope - Global variables, varaibles that are visible inside and outside of functions. NOTE: careful with assigning names to globals, as you could 'expose' this to other functions in your workspace/namespace"""
__author__ = 'Saul Moore (sm5911@imperial.ac.uk)'
__version__ = '0.0.1'
_a_global = 10
def a_function(): # 'def' defines a function within the module, in this case, 'a_function'
_a_global = 5 # _a_global variable, assigned as 5
_a_local = 4 # _a_local variable, assigned as 4
print "Inside the function, the value is ", _a_global
print "Inside the function, the value is ", _a_local
return None
a_function()
print "Outside the function, the value is ", _a_global
_a_global = 10 # Global variable re-assigned as 10
def a_function():
global _a_global
_a_global = 5
_a_local = 4
print "Inside the function, the value is ", _a_global
print "Inside the function, the value is ", _a_local
return None
a_function()
print "Outside the function, the value is ", _a_global
|
import logging
from math import sqrt
from configparser import ConfigParser
class Data_Processing(object):
def __init__(self, comm):
self.logger = logging.getLogger('COMM')
self.logger.info("[+] Initializing Data Processor")
self.config = ConfigParser()
self.config.read('config.ini')
self.frame_rate = self.config['DataProc'].getint('frame_rate')
self.comm = comm
def run(self):
while True:
self.update()
def update(self):
"""
Do the averaging and deviation checking in this function
"""
try:
# Check to make sure none of the sensors have stopped sending data
packet1 = self.comm.controller1.get(timeout=self.timeout)
packet2 = self.comm.controller2.get(timeout=self.timeout)
packet3 = self.comm.controller3.get(timeout=self.timeout)
packet4 = self.comm.controller4.get(timeout=self.timeout)
packet5 = self.comm.controller5.get(timeout=self.timeout)
except: # queue.Empty exception
self.logger.critical("[+] Microcontroller timed out!")
self.sm.on_event('estop')
return
# System checks (Parameters to this are going to be the packets)
HPS_check(packet1, packet2, packet3, packet4)
VPS_check(packet1, packet2, packet3, packet4)
IMU_check(packet1, packet2, packet3, packet4)
BMS_check(packet5)
sum_of_diffs = 0
mean_of_diffs = 0
new_sum = 0
sum_count = 0
original_mean_imu_accel_x = 0
standard_dev_imu_accel_x = 0
nominal_mean_imu_accel_x = 0
original_mean_imu_accel_x = packet1["accelerometer"]["acceleration"]["x"] + packet2["accelerometer"]["acceleration"]["x"] + packet3["accelerometer"]["acceleration"]["x"] + packet4["accelerometer"]["acceleration"]["x"]
diff_1 = (original_mean_imu_accel_x - packet1["accelerometer"]["acceleration"]["x"])*(original_mean_imu_accel_x - packet1["accelerometer"]["acceleration"]["x"])
diff_2 = (original_mean_imu_accel_x - packet2["accelerometer"]["acceleration"]["x"])*(original_mean_imu_accel_x - packet2["accelerometer"]["acceleration"]["x"])
diff_3 = (original_mean_imu_accel_x - packet3["accelerometer"]["acceleration"]["x"])*(original_mean_imu_accel_x - packet3["accelerometer"]["acceleration"]["x"])
diff_4 = (original_mean_imu_accel_x - packet4["accelerometer"]["acceleration"]["x"])*(original_mean_imu_accel_x - packet4["accelerometer"]["acceleration"]["x"])
sum_of_diffs = diff_1 + diff_2 + diff_3 + diff_4
mean_of_diffs = sum_of_diffs / 4
standard_dev_imu_accel_x = sqrt(mean_of_diffs)
if packet1["accelerometer"]["acceleration"]["x"] < (original_mean_imu_accel_x - standard_dev_imu_accel_x) or packet1["accelerometer"]["acceleration"]["x"] > (original_mean_imu_accel_x + standard_dev_imu_accel_x):
""" calc new mean if nums are out of 1 SD """
packet1["accelerometer"]["acceleration"]["x"] = False
else:
new_sum = new_sum + packet1["accelerometer"]["acceleration"]["x"]
sum_count = sum_count + 1
if packet2["accelerometer"]["acceleration"]["x"] < (original_mean_imu_accel_x - standard_dev_imu_accel_x) or packet2["accelerometer"]["acceleration"]["x"] > (original_mean_imu_accel_x + standard_dev_imu_accel_x):
""" calc new mean if nums are out of 1 SD """
packet2["accelerometer"]["acceleration"]["x"] = False
else:
new_sum = new_sum + packet2["accelerometer"]["acceleration"]["x"]
sum_count = sum_count + 1
if packet3["accelerometer"]["acceleration"]["x"] < (original_mean_imu_accel_x - standard_dev_imu_accel_x) or packet3["accelerometer"]["acceleration"]["x"] > (original_mean_imu_accel_x + standard_dev_imu_accel_x):
""" calc new mean if nums are out of 1 SD """
packet3["accelerometer"]["acceleration"]["x"] = False
else:
new_sum = new_sum + packet3["accelerometer"]["acceleration"]["x"]
sum_count = sum_count + 1
if packet4["accelerometer"]["acceleration"]["x"] < (original_mean_imu_accel_x - standard_dev_imu_accel_x) or packet4["accelerometer"]["acceleration"]["x"] > (original_mean_imu_accel_x + standard_dev_imu_accel_x):
""" calc new mean if nums are out of 1 SD """
packet4["accelerometer"]["acceleration"]["x"] = False
else:
""" calc new mean when no nums are out of 1 SD """
new_sum = new_sum + packet4["accelerometer"]["acceleration"]["x"]
sum_count = sum_count + 1
nominal_mean_imu_accel_x = new_sum / sum_count
original_mean_imu_accel_y = 0
standard_dev_imu_accel_y = 0
nominal_mean_imu_accel_y = 0
original_mean_imu_accel_y = packet1["accelerometer"]["acceleration"]["y"] + packet2["accelerometer"]["acceleration"]["y"] + packet3["accelerometer"]["acceleration"]["y"] + packet4["accelerometer"]["acceleration"]["y"]
diff_1 = (original_mean_imu_accel_y - packet1["accelerometer"]["acceleration"]["y"])*(original_mean_imu_accel_y - packet1["accelerometer"]["acceleration"]["y"])
diff_2 = (original_mean_imu_accel_y - packet2["accelerometer"]["acceleration"]["y"])*(original_mean_imu_accel_y - packet2["accelerometer"]["acceleration"]["y"])
diff_3 = (original_mean_imu_accel_y - packet3["accelerometer"]["acceleration"]["y"])*(original_mean_imu_accel_y - packet3["accelerometer"]["acceleration"]["y"])
diff_4 = (original_mean_imu_accel_y - packet4["accelerometer"]["acceleration"]["y"])*(original_mean_imu_accel_y - packet4["accelerometer"]["acceleration"]["y"])
sum_of_diffs = diff_1 + diff_2 + diff_3 + diff_4
mean_of_diffs = sum_of_diffs / 4
standard_dev_imu_accel_y = sqrt(mean_of_diffs)
if packet1["accelerometer"]["acceleration"]["y"] < (original_mean_imu_accel_y - standard_dev_imu_accel_y) or packet1["accelerometer"]["acceleration"]["y"] > (original_mean_imu_accel_y + standard_dev_imu_accel_y):
""" calc new mean if nums are out of 1 SD """
packet1["accelerometer"]["acceleration"]["y"] = False
else:
new_sum = new_sum + packet1["accelerometer"]["acceleration"]["y"]
sum_count = sum_count + 1
if packet2["accelerometer"]["acceleration"]["y"] < (original_mean_imu_accel_y - standard_dev_imu_accel_y) or packet2["accelerometer"]["acceleration"]["y"] > (original_mean_imu_accel_y + standard_dev_imu_accel_y):
""" calc new mean if nums are out of 1 SD """
packet2["accelerometer"]["acceleration"]["y"] = False
else:
new_sum = new_sum + packet2["accelerometer"]["acceleration"]["y"]
sum_count = sum_count + 1
if packet3["accelerometer"]["acceleration"]["y"] < (original_mean_imu_accel_y - standard_dev_imu_accel_y) or packet3["accelerometer"]["acceleration"]["y"] > (original_mean_imu_accel_y + standard_dev_imu_accel_y):
""" calc new mean if nums are out of 1 SD """
packet3["accelerometer"]["acceleration"]["y"] = False
else:
new_sum = new_sum + packet3["accelerometer"]["acceleration"]["y"]
sum_count = sum_count + 1
if packet4["accelerometer"]["acceleration"]["y"] < (original_mean_imu_accel_y - standard_dev_imu_accel_y) or packet4["accelerometer"]["acceleration"]["y"] > (original_mean_imu_accel_y + standard_dev_imu_accel_y):
""" calc new mean if nums are out of 1 SD """
packet4["accelerometer"]["acceleration"]["y"] = False
else:
""" calc new mean when no nums are out of 1 SD """
new_sum = new_sum + packet4["accelerometer"]["acceleration"]["y"]
sum_count = sum_count + 1
nominal_mean_imu_accel_y = new_sum / sum_count
original_mean_imu_accel_z = 0
standard_dev_imu_accel_z = 0
nominal_mean_imu_accel_z = 0
original_mean_imu_accel_z = packet1["accelerometer"]["acceleration"]["z"] + packet2["accelerometer"]["acceleration"]["z"] + packet3["accelerometer"]["acceleration"]["z"] + packet4["accelerometer"]["acceleration"]["z"]
diff_1 = (original_mean_imu_accel_z - packet1["accelerometer"]["acceleration"]["z"])*(original_mean_imu_accel_z - packet1["accelerometer"]["acceleration"]["z"])
diff_2 = (original_mean_imu_accel_z - packet2["accelerometer"]["acceleration"]["z"])*(original_mean_imu_accel_z - packet2["accelerometer"]["acceleration"]["z"])
diff_3 = (original_mean_imu_accel_z - packet3["accelerometer"]["acceleration"]["z"])*(original_mean_imu_accel_z - packet3["accelerometer"]["acceleration"]["z"])
diff_4 = (original_mean_imu_accel_z - packet4["accelerometer"]["acceleration"]["z"])*(original_mean_imu_accel_z - packet4["accelerometer"]["acceleration"]["z"])
sum_of_diffs = diff_1 + diff_2 + diff_3 + diff_4
mean_of_diffs = sum_of_diffs / 4
standard_dev_imu_accel_z = sqrt(mean_of_diffs)
if packet1["accelerometer"]["acceleration"]["z"] < (original_mean_imu_accel_z - standard_dev_imu_accel_z) or packet1["accelerometer"]["acceleration"]["z"] > (original_mean_imu_accel_z + standard_dev_imu_accel_z):
""" calc new mean if nums are out of 1 SD """
packet1["accelerometer"]["acceleration"]["z"] = False
else:
new_sum = new_sum + packet1["accelerometer"]["acceleration"]["z"]
sum_count = sum_count + 1
if packet2["accelerometer"]["acceleration"]["z"] < (original_mean_imu_accel_z - standard_dev_imu_accel_z) or packet2["accelerometer"]["acceleration"]["z"] > (original_mean_imu_accel_z + standard_dev_imu_accel_z):
""" calc new mean if nums are out of 1 SD """
packet2["accelerometer"]["acceleration"]["z"] = False
else:
new_sum = new_sum + packet2["accelerometer"]["acceleration"]["z"]
sum_count = sum_count + 1
if packet3["accelerometer"]["acceleration"]["z"] < (original_mean_imu_accel_z - standard_dev_imu_accel_z) or packet3["accelerometer"]["acceleration"]["z"] > (original_mean_imu_accel_z + standard_dev_imu_accel_z):
""" calc new mean if nums are out of 1 SD """
packet3["accelerometer"]["acceleration"]["z"] = False
else:
new_sum = new_sum + packet3["accelerometer"]["acceleration"]["z"]
sum_count = sum_count + 1
if packet4["accelerometer"]["acceleration"]["z"] < (original_mean_imu_accel_z - standard_dev_imu_accel_z) or packet4["accelerometer"]["acceleration"]["z"] > (original_mean_imu_accel_z + standard_dev_imu_accel_z):
""" calc new mean if nums are out of 1 SD """
packet4["accelerometer"]["speed"]["z"] = False
else:
""" calc new mean when no nums are out of 1 SD """
new_sum = new_sum + packet4["accelerometer"]["speed"]["z"]
sum_count = sum_count + 1
nominal_mean_imu_accel_z = new_sum / sum_count
original_mean_imu_speed = 0
standard_dev_imu_speed = 0
nominal_mean_imu_speed = 0
original_mean_imu_speed = (packet1["accelerometer"]["speed"]["x"] + packet2["accelerometer"]["speed"]["x"] + packet3["accelerometer"]["speed"]["x"] + packet4["accelerometer"]["speed"]["x"])/4
diff_1 = (original_mean_imu_speed - packet1["accelerometer"]["speed"]["x"])*(original_mean_imu_speed - packet1["accelerometer"]["speed"]["x"])
diff_2 = (original_mean_imu_speed - packet2["accelerometer"]["speed"]["x"])*(original_mean_imu_speed - packet2["accelerometer"]["speed"]["x"])
diff_3 = (original_mean_imu_speed - packet3["accelerometer"]["speed"]["x"])*(original_mean_imu_speed - packet3["accelerometer"]["speed"]["x"])
diff_4 = (original_mean_imu_speed - packet4["accelerometer"]["speed"]["x"])*(original_mean_imu_speed - packet4["accelerometer"]["speed"]["x"])
sum_of_diffs = diff_1 + diff_2 + diff_3 + diff_4
mean_of_diffs = sum_of_diffs / 4
standard_dev_imu_speed = sqrt(mean_of_diffs)
if packet1["accelerometer"]["speed"]["x"] < (original_mean_imu_speed - standard_dev_imu_speed) or packet1["accelerometer"]["speed"]["x"] > (original_mean_imu_speed + standard_dev_imu_speed):
""" calc new mean if nums are out of 1 SD """
packet1["accelerometer"]["speed"]["x"] = False
else:
new_sum = new_sum + packet1["accelerometer"]["speed"]["x"]
sum_count = sum_count + 1
if packet2["accelerometer"]["speed"]["x"] < (original_mean_imu_speed - standard_dev_imu_speed) or packet2["accelerometer"]["speed"]["x"] > (original_mean_imu_speed + standard_dev_imu_speed):
""" calc new mean if nums are out of 1 SD """
packet2["accelerometer"]["speed"]["x"] = False
else:
new_sum = new_sum + packet2["accelerometer"]["speed"]["x"]
sum_count = sum_count + 1
if packet3["accelerometer"]["speed"]["x"] < (original_mean_imu_speed - standard_dev_imu_speed) or packet3["accelerometer"]["speed"]["x"] > (original_mean_imu_speed + standard_dev_imu_speed):
""" calc new mean if nums are out of 1 SD """
packet3["accelerometer"]["speed"]["x"] = False
else:
new_sum = new_sum + packet3["accelerometer"]["speed"]["x"]
sum_count = sum_count + 1
if packet4["accelerometer"]["speed"]["x"] < (original_mean_imu_speed - standard_dev_imu_speed) or packet4["accelerometer"]["speed"]["x"] > (original_mean_imu_speed + standard_dev_imu_speed):
""" calc new mean if nums are out of 1 SD """
packet4["accelerometer"]["speed"]["x"] = False
else:
""" calc new mean when no nums are out of 1 SD """
new_sum = new_sum + packet4["accelerometer"]["speed"]["x"]
sum_count = sum_count + 1
nominal_mean_imu_speed = new_sum / sum_count
original_mean_imu_pos = 0
standard_dev_imu_pos = 0
nominal_mean_imu_pos = 0
original_mean_imu_pos = (packet1["accelerometer"]["position"]["x"] + packet2["accelerometer"]["position"]["x"] + packet3["accelerometer"]["position"]["x"] + packet4["accelerometer"]["position"]["x"])/4
diff_1 = (original_mean_imu_pos - packet1["accelerometer"]["position"]["x"])*(original_mean_imu_pos - packet1["accelerometer"]["position"]["x"])
diff_2 = (original_mean_imu_pos - packet2["accelerometer"]["position"]["x"])*(original_mean_imu_pos - packet2["accelerometer"]["position"]["x"])
diff_3 = (original_mean_imu_pos - packet3["accelerometer"]["position"]["x"])*(original_mean_imu_pos - packet3["accelerometer"]["position"]["x"])
diff_4 = (original_mean_imu_pos - packet4["accelerometer"]["position"]["x"])*(original_mean_imu_pos - packet4["accelerometer"]["position"]["x"])
sum_of_diffs = diff_1 + diff_2 + diff_3 + diff_4
mean_of_diffs = sum_of_diffs / 4
standard_dev_imu_pos = sqrt(mean_of_diffs)
if packet1["accelerometer"]["position"]["x"] < (original_mean_imu_pos - standard_dev_imu_pos) or packet1["accelerometer"]["position"]["x"] > (original_mean_imu_pos + standard_dev_imu_pos):
""" calc new mean if nums are out of 1 SD """
packet1["accelerometer"]["position"]["x"] = False
else:
new_sum = new_sum + packet1["accelerometer"]["position"]["x"]
sum_count = sum_count + 1
if packet2["accelerometer"]["position"]["x"] < (original_mean_imu_pos - standard_dev_imu_pos) or packet2["accelerometer"]["position"]["x"] > (original_mean_imu_pos + standard_dev_imu_pos):
""" calc new mean if nums are out of 1 SD """
packet2["accelerometer"]["position"]["x"] = False
else:
new_sum = new_sum + packet2["accelerometer"]["position"]["x"]
sum_count = sum_count + 1
if packet3["accelerometer"]["position"]["x"] < (original_mean_imu_pos - standard_dev_imu_pos) or packet3["accelerometer"]["position"]["x"] > (original_mean_imu_pos + standard_dev_imu_pos):
""" calc new mean if nums are out of 1 SD """
packet3["accelerometer"]["position"]["x"] = False
else:
new_sum = new_sum + packet3["accelerometer"]["position"]["x"]
sum_count = sum_count + 1
if packet4["accelerometer"]["position"]["x"] < (original_mean_imu_pos - standard_dev_imu_pos) or packet4["accelerometer"]["position"]["x"] > (original_mean_imu_pos + standard_dev_imu_pos):
""" calc new mean if nums are out of 1 SD """
packet4["accelerometer"]["position"]["x"] = False
else:
""" calc new mean when no nums are out of 1 SD """
new_sum = new_sum + packet4["accelerometer"]["position"]["x"]
sum_count = sum_count + 1
nominal_mean_imu_pos = new_sum / sum_count
original_mean_vps = 0
standard_dev_vps = 0
nominal_mean_vps = 0
original_mean_vps = (packet1["vertical"]["position"]["y"] + packet2["vertical"]["position"]["y"] + packet3["vertical"]["position"]["y"] + packet4["vertical"]["position"]["y"])/4
diff_1 = (original_mean_vps_pos - packet1["vertical"]["position"]["y"])*(original_mean_vps_pos - packet1["vertical"]["position"]["y"])
diff_2 = (original_mean_vps_pos - packet2["vertical"]["position"]["y"])*(original_mean_vps_pos - packet2["vertical"]["position"]["y"])
diff_3 = (original_mean_vps_pos - packet3["vertical"]["position"]["y"])*(original_mean_vps_pos - packet3["vertical"]["position"]["y"])
diff_4 = (original_mean_vps_pos - packet4["vertical"]["position"]["y"])*(original_mean_vps_pos - packet4["vertical"]["position"]["y"])
sum_of_diffs = diff_1 + diff_2 + diff_3 + diff_4
mean_of_diffs = sum_of_diffs / 4
standard_dev_vps_pos = sqrt(mean_of_diffs)
if packet1["vertical"]["position"]["x"] < (original_mean_vps_pos - standard_dev_vps_pos) or packet1["vertical"]["position"]["x"] > (original_mean_vps_pos + standard_dev_vps_pos):
""" calc new mean if nums are out of 1 SD """
packet1["vertical"]["position"]["x"] = False
else:
new_sum = new_sum + packet1["vertical"]["position"]["x"]
sum_count = sum_count + 1
if packet2["vertical"]["position"]["x"] < (original_mean_vps_pos - standard_dev_vps_pos) or packet2["vertical"]["position"]["x"] > (original_mean_vps_pos + standard_dev_vps_pos):
""" calc new mean if nums are out of 1 SD """
packet2["vertical"]["position"]["x"] = False
else:
new_sum = new_sum + packet2["vertical"]["position"]["x"]
sum_count = sum_count + 1
if packet3["vertical"]["position"]["x"] < (original_mean_vps_pos - standard_dev_vps_pos) or packet3["vertical"]["position"]["x"] > (original_mean_vps_pos + standard_dev_vps_pos):
""" calc new mean if nums are out of 1 SD """
packet3["vertical"]["position"]["x"] = False
else:
new_sum = new_sum + packet3["vertical"]["position"]["x"]
sum_count = sum_count + 1
if packet4["vertical"]["position"]["x"] < (original_mean_vps_pos - standard_dev_vps_pos) or packet4["vertical"]["position"]["x"] > (original_mean_vps_pos + standard_dev_vps_pos):
""" calc new mean if nums are out of 1 SD """
packet4["vertical"]["position"]["x"] = False
else:
""" calc new mean when no nums are out of 1 SD """
new_sum = new_sum + packet4["vertical"]["position"]["x"]
sum_count = sum_count + 1
nominal_mean_vps_pos = new_sum / sum_count
original_mean_hps = 0
standard_dev_hps = 0
nominal_mean_hps = 0
original_mean_hps = (packet1["horizontal"]["position"]["z"] + packet2["horizontal"]["position"]["z"] + packet3["horizontal"]["position"]["z"] + packet4["horizontal"]["position"]["z"])/4
diff_1 = (original_mean_hps_pos - packet1["horizontal"]["position"]["y"])*(original_mean_hps_pos - packet1["horizontal"]["position"]["y"])
diff_2 = (original_mean_hps_pos - packet2["horizontal"]["position"]["y"])*(original_mean_hps_pos - packet2["horizontal"]["position"]["y"])
diff_3 = (original_mean_hps_pos - packet3["horizontal"]["position"]["y"])*(original_mean_hps_pos - packet3["horizontal"]["position"]["y"])
diff_4 = (original_mean_hps_pos - packet4["horizontal"]["position"]["y"])*(original_mean_hps_pos - packet4["horizontal"]["position"]["y"])
sum_of_diffs = diff_1 + diff_2 + diff_3 + diff_4
mean_of_diffs = sum_of_diffs / 4
standard_dev_hps_pos = sqrt(mean_of_diffs)
if packet1["horizontal"]["position"]["x"] < (original_mean_hps_pos - standard_dev_hps_pos) or packet1["horizontal"]["position"]["x"] > (original_mean_hps_pos + standard_dev_hps_pos):
""" calc new mean if nums are out of 1 SD """
packet1["horizontal"]["position"]["x"] = False
else:
new_sum = new_sum + packet1["horizontal"]["position"]["x"]
sum_count = sum_count + 1
if packet2["horizontal"]["position"]["x"] < (original_mean_hps_pos - standard_dev_hps_pos) or packet2["horizontal"]["position"]["x"] > (original_mean_hps_pos + standard_dev_hps_pos):
""" calc new mean if nums are out of 1 SD """
packet2["horizontal"]["position"]["x"] = False
else:
new_sum = new_sum + packet2["horizontal"]["position"]["x"]
sum_count = sum_count + 1
if packet3["horizontal"]["position"]["x"] < (original_mean_hps_pos - standard_dev_hps_pos) or packet3["horizontal"]["position"]["x"] > (original_mean_hps_pos + standard_dev_hps_pos):
""" calc new mean if nums are out of 1 SD """
packet3["horizontal"]["position"]["x"] = False
else:
new_sum = new_sum + packet3["horizontal"]["position"]["x"]
sum_count = sum_count + 1
if packet4["horizontal"]["position"]["x"] < (original_mean_hps_pos - standard_dev_hps_pos) or packet4["horizontal"]["position"]["x"] > (original_mean_hps_pos + standard_dev_hps_pos):
""" calc new mean if nums are out of 1 SD """
packet4["horizontal"]["position"]["x"] = False
else:
""" calc new mean when no nums are out of 1 SD """
new_sum = new_sum + packet4["horizontal"]["position"]["x"]
sum_count = sum_count + 1
nominal_mean_hps_pos = new_sum / sum_count
def HPS_check(self, packet1, packet2, packet3, packet4):
"""
HPS error checking
"""
self.hpsfailcount = 0
if packet1["horizontal"]["error"] != 0:
self.hpsfailcount += 1
self.logger.info("[!!!] Error with HPS #1")
if packet2["horizontal"]["error"] != 0:
self.hpsfailcount += 1
self.logger.info("[!!!] Error with HPS #2")
if packet3["horizontal"]["error"] != 0:
self.hpsfailcount += 1
self.logger.info("[!!!] Error with HPS #3")
if packet4["horizontal"]["error"] != 0:
self.hpsfailcount += 1
self.logger.info("[!!!] Error with HPS #3")
if self.hpsfailcount >= 2:
self.sm.on_event('estop')
def VPS_check(self, packet1, packet2, packet3, packet4):
"""
VPS error checking
"""
self.vpsfailcount = 0
if packet1["vertical"]["error"] != 0:
self.vpsfailcount += 1
self.logger.info("[!!!] Error with VPS #1")
if packet2["vertical"]["error"] != 0:
self.vpsfailcount += 1
self.logger.info("[!!!] Error with VPS #2")
if packet3["vertical"]["error"] != 0:
self.vpsfailcount += 1
self.logger.info("[!!!] Error with VPS #3")
if packet4["vertical"]["error"] != 0:
self.vpsfailcount += 1
self.logger.info("[!!!] Error with VPS #4")
if self.vpsfailcount >= 2:
self.sm.on_event('estop')
def IMU_check(self, packet1, packet2, packet3, packet4):
"""
IMU error checking
"""
self.imufailcount = 0
if packet1["accelerometer"]["error"] != 0:
self.imufailcount += 1
self.logger.info("[!!!] Error with IMU #1")
if packet2["accelerometer"]["error"] != 0:
self.imufailcount += 1
self.logger.info("[!!!] Error with IMU #2")
if packet3["accelerometer"]["error"] != 0:
self.imufailcount += 1
self.logger.info("[!!!] Error with IMU #3")
if packet4["accelerometer"]["error"] != 0:
self.imufailcount += 1
self.logger.info("[!!!] Error with IMU #4")
if self.imufailcount >= 2:
self.sm.on_event('estop')
def BMS_check(self, BMS_packet):
"""
BMS error checking (This needs to be changed a bit because the for loop won't work)
"""
if packet5["error"] != 0:
self.logger.critical("[+] Microcontroller five, error code: %d", packet4["error"])
self.sm.on_event('estop')
return
self.bms_failcount = 0
for k1, v1 in packet5.items():
for k2, v2 in v1.items():
if v2["error"] != 0:
self.bms_failcount += 1
self.logger.critical("[+] Microcontroller five, error: %s", k2)
if self.bms_failcount >= self.bms_allowed_errors:
self.logger.critical("[+] Microcontroller five error. Too many errors")
self.sm.on_event('estop')
return
|
__author__='alberto'
|
from tabula import read_pdf, convert_into
#can convet into csv , json
filename = "name of file"
d = convert_into(filename, "test.csv", output_format="csv" , pages="all")
|
from django.db import connection
def get_data(self):
with connection.cursor() as cursor:
cursor.execute(
"SELECT count(character_id) FROM death_manner WHERE manner!='Unknown' GROUP BY manner ORDER BY count(character_id) DESC LIMIT 10;")
data = cursor.fetchall()
return data
def get_providers(self):
with connection.cursor() as cursor:
cursor.execute(
"SELECT manner FROM death_manner WHERE manner!='Unknown' GROUP BY manner ORDER BY count(character_id) DESC LIMIT 10;")
providers = cursor.fetchall()
return providers
|
"""
Sample Controller File
A Controller should be in charge of responding to a request.
Load models to interact with the database and load views to render them to the client.
Create a controller using this template
"""
from system.core.controller import *
import oauth2 as oauth
import json
import os
class Welcome(Controller):
def __init__(self, action):
super(Welcome, self).__init__(action)
CONSUMER_KEY = os.environ['CONSUMER_KEY']
CONSUMER_SECRET = os.environ['CONSUMER_SECRET']
ACCESS_KEY = os.environ['ACCESS_KEY']
ACCESS_SECRET = os.environ['ACCESS_SECRET']
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
access_token = oauth.Token(key=ACCESS_KEY, secret=ACCESS_SECRET)
global client
client = oauth.Client(consumer, access_token)
self.load_model('WelcomeModel')
self.db = self._app.db
def index(self):
timeline_endpoint = "https://api.twitter.com/1.1/statuses/home_timeline.json"
response, data = client.request(timeline_endpoint)
tweets = json.loads(data)
return self.load_view('index.html', tweets=tweets)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('invoice', '0001_initial'),
('case', '0015_auto_20170706_0105'),
]
operations = [
migrations.RemoveField(
model_name='case',
name='invoice_generated',
),
migrations.AddField(
model_name='case',
name='invoice',
field=models.ForeignKey(blank=True, to='invoice.Invoice', null=True),
),
]
|
# -*- coding: UTF-8 -*-
from nltk.util import ngrams
import sys
import operator
n = int(sys.argv[2])
#print n
i = 0
mydict={}
temp=[]
#輸入第一個值 是檔案名稱
file_input = file(sys.argv[1],"r")
file_output = open("Data_Output.txt", "w",)
while True:
line = file_input.readline()
#replace mark
line=line.replace('\n',' ')
line=line.replace('.',' . ')
line=line.replace(',',' , ')
line=line.replace(':',' : ')
line=line.replace(';',' ; ')
line=line.replace('{',' { ')
line=line.replace('}',' } ')
line=line.replace('[',' [ ')
line=line.replace(']',' ] ')
line=line.replace('!',' ! ')
line=line.replace('+',' + ')
line=line.replace('-',' - ')
line=line.replace('"',' " ')
line=line.replace('*',' * ')
line=line.replace('/',' / ')
line=line.replace('=',' = ')
line=line.replace('(',' ( ')
line=line.replace(')',' ) ')
line=line.replace('>',' > ')
line=line.replace('<',' < ')
#去除標點符號
if line =='':
break
else:
line_temp = line.split()
for word in line_temp:
#print word
temp.append(word)
#print temp
file_output.write('token:')
file_output.write('\ntoken:'.join(temp))
file_input.close()
file_output.close() |
#!/usr/bin/env python
distance_file = open('distance.txt', 'r+')
snmp_file = open('host2_log.txt', 'r+')
#signal_file = open('wlan0_output.txt', 'r+')
distance = distance_file.readlines()
snmp = snmp_file.readlines();
#signal = signal_file.readlines();
final_product = ""
for snmp_line in snmp:
combine_line = ""
snmp_time = int(snmp_line[0:6])
print "snmp_time: " + str(snmp_time)
for dist_line in distance:
dist_time = int(dist_line[0:6])
print "dist_time: " + str(dist_time)
if dist_time == snmp_time:
print "Match found with distance"
#for sig_line in signal:
#sig_time = int(sig_line[0:6])
#print "sig_time: " + str(sig_time)
#if sig_time == dist_time == snmp_time:
#print "Match found with signal"
combine_line = dist_line[7:-1] + "," + snmp_line[22:-1]
print combine_line
final_product = final_product + combine_line + "\n"
#break
print "Next Loop"
output = open('data_set.txt', 'r+')
output.write(final_product)
|
import numpy as np
import matplotlib.pyplot as plt
xpoints = np.array(range(1, 101))
ypoints = xpoints * xpoints
plt.plot(xpoints, ypoints, label = 'xsquared')
plt.plot(xpoints, xpoints, label = 'straight', color = 'green')
#plt.savefig('lecture1.png')
randompoints = np.random.randint(1, 1000, 100)
plt.scatter(xpoints, randompoints, label = 'random')
plt.legend()
plt.show() |
import requests
import sys, getopt
from network import FixerAPI
import database_api
from database import ExchangeRateModel
from datetime import datetime,date
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database import ExchangeRateModel, Base
engine = create_engine('sqlite:///currency_tool.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
def main(argv):
# initialization with default values
base = 'EUR'
currency = 'USD'
date = ''
# Commend line interface
try:
opts, args = getopt.getopt(argv,"hb:c:d:",["base","currency","date"])
except getopt.GetoptError:
print('app.py -b <base> -c <currency> -d <date>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('''Welcome to Fixer Api\n
usage: app.py [option] ... [-b] [-c] [-d] [arg] ..\n
-b <base> : Quote against a different currency by setting the base parameter.\n
-c <currency> : specific exchange rates by setting the currency parameter.\n
-d <date> : Format YYYY-MM-DD Get historical rates for any day since 1999.\n''')
sys.exit()
elif opt in ("-d","--date"):
date= arg
elif opt in ("-b", "--base"):
base = arg.upper()
elif opt in ("-c", "--currency"):
currency = arg.upper()
fixerAPI = FixerAPI()
# Check if user enter specific date
if date:
dateQuery = formatDate(date)
# Query Database to see if we had cache the same request
rate = database_api.getRateForDate(session=session,base=base,currency=currency,date=dateQuery)
# if rate is cached print it
if rate:
print(rate.rate)
else:
# otherwise make request to get data
rate = fixerAPI.getExchangeRate(base=base,currency=currency,date=date)
if rate['rates']:
print(rate['rates'][currency])
insertRate(rate)
else:
print("Error : %s Currency not found" %currency)
else:
# If no date enter request API with the lastest data avaiable
rate = fixerAPI.getExchangeRate(base=base,currency=currency);
if rate['rates']:
print(rate['rate'][currency])
insertRate(rate)
else:
print("Error : %s Currency not found" %currency)
def insertRate(rate):
# Query database to see if we had cache the same request before
# create instant of date on YYYY-MM-DD format
dateQuery = formatDate(rate['date'])
base = rate['base']
if rate['rates'].keys():
currency = list(rate['rates'].keys())[0]
rateValue = rate['rates'][currency]
rateQuery = database_api.getRateForDate(session=session,base=base,currency=currency,date=dateQuery)
if not rateQuery:
# Save the rate response in database
rateModel = ExchangeRateModel(base=base,currency=currency,date=dateQuery,rate=rateValue)
database_api.insert(session=session,model=rateModel)
def formatDate(date):
return datetime.strptime(date,'%Y-%m-%d').date()
if __name__ == "__main__":
main(sys.argv[1:])
|
# coding: utf-8
from me.tool import Data
from numpy import *
import operator
from math import log
import pickle
# 多数投票决定该节点类型
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
# 计算数据集的香农熵
def calcShannonEnt(dataSet, classLabels, subD):
numEntries = len(dataSet)
labelCounts = set(classLabels)
shannonEnt = 0.0
for item in labelCounts:
numItem = ones(numEntries)
for i in range(numEntries):
if classLabels[i] != item:
numItem[i] = 0
weights = mat(numItem)
weights = weights * subD
weight = (weights.getA())[0][0]
numTemp = numItem.tolist().count(1)
prob = numTemp / float(numEntries)
if (prob != 0.0):
shannonEnt -= prob * weight * log(prob, 2)
return shannonEnt
# 按照给定特征划分数据集
def splitDataSet(dataSet, classLabels, axis, value, D):
retDataSet = []
tempD = ((D.T).getA())[0]
resultD = []
resultLabels = []
for featVec in dataSet:
if (featVec[axis] == value):
reduceFeatVec = featVec[:axis]
reduceFeatVec.extend(featVec[axis + 1:])
retDataSet.append(reduceFeatVec)
resultD.append(tempD[dataSet.index(featVec)])
resultLabels.append(classLabels[dataSet.index(featVec)])
return retDataSet, mat(resultD).T, resultLabels
# 选择最好的数据集划分方式
def chooseBestFeatureToSplit(dataSet, classLabels, D):
numFeatures = len(dataSet[0])
baseEntropy = calcShannonEnt(dataSet, classLabels, D)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
subDataSet, subD, subClassLabels = splitDataSet(dataSet, classLabels, i, value, D)
prob = len(subDataSet) / float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet, subClassLabels, subD)
infoGain = baseEntropy - newEntropy
if infoGain > bestInfoGain:
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def createTree(trainData, classLabels, featName, depth, D):
# 当类别完全相同时,停止继续划分
tempSet = set(classLabels)
if (len(tempSet) == 1):
return classLabels[0]
# 遍历完所有时,若分类还未结束,则返回出现次数最多的作为该节点类型
if len(trainData) == 1:
return majorityCnt(classLabels)
if depth == 0:
return majorityCnt(classLabels)
bestFeat = chooseBestFeatureToSplit(trainData, classLabels, D)
bestFeatName = featName[bestFeat]
myTree = {bestFeatName: {}}
del (featName[bestFeat])
featValues = [example[bestFeat] for example in trainData]
uniqueVals = set(featValues)
for value in uniqueVals:
subFeat = featName[:]
subDataSet, subD, subClassLabels = splitDataSet(trainData, classLabels, bestFeat, value, D)
myTree[bestFeatName][value] = createTree(
subDataSet, subClassLabels, subFeat, depth - 1, subD
)
return myTree
def classify(inputTree, featLabels, testVec):
keyList = inputTree.keys()
if len(keyList) >= 2:
keyList.remove('alpha')
firstStr = keyList[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
classLabel = 1
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
def buildDecTree(trainData, classLabels, featName, depth, D):
featNameLabels = featName[:]
decTree = createTree(trainData, classLabels, featNameLabels, depth, D)
print 'train one dec tree successfully'
classEst = []
dataNum = len(trainData)
errorNum = 0
for i in range(dataNum):
testResult = classify(decTree, featName, trainData[i])
classEst.append(testResult)
if int(testResult) != int(classLabels[i]):
errorNum += 1
error = errorNum / float(dataNum)
return decTree, error, classEst
def adaBoostTrainDT(trainData, classLabels, featName, depth, numIt=40):
weakClassArr = []
m = shape(trainData)[0]
D = mat(ones((m, 1)) / m)
aggClassEst = mat(zeros((m, 1)))
for i in range(numIt):
decTree, error, classEst = buildDecTree(trainData, classLabels, featName, depth, D)
# print "D:", D.T
alpha = float(0.5 * log((1.0 - error) / max(error, 1e-16)))
decTree['alpha'] = alpha
weakClassArr.append(decTree)
print 'depth: ' + str(depth) + '—num:' + str(numIt) + '第' + str(i) + '个完成'
# print "classEst:", classEst
# 为下一次迭代计算D
# 此处的multiply为对应元素相乘
expon = multiply(-1 * alpha * mat(classLabels), classEst)
# exp(x)返回x的指数
D = multiply(D, exp(expon.T))
D = D / D.sum()
# 错误率累加计算
aggClassEst += alpha * mat(classEst).T
# print "aggClassEst: ", aggClassEst.T
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T, ones((m, 1)))
errorRate = aggErrors.sum() / m
print "total error: ", errorRate, "\n"
# if errorRate <= 0.13: break
return weakClassArr
def getDataAndLabel(fileName):
data = Data.getDataFromFile(fileName)
ndata = array(data)
labels = (ndata[:, 30:]).T
result = ndata[:, :30]
return result.tolist(), labels[0].tolist()
def getData(fileName):
data = Data.getDataFromFile(fileName)
return data
def getFeatureName():
featName = ['having_IP_Address', 'URL_Length', 'Shortining_Service', 'having_At_Symbol', 'double_slash_redirecting',
'Prefix_Suffix', 'having_Sub_Domain', 'SSLfinal_State', 'Domain_registeration_length', 'Favicon',
'port',
'HTTPS_token', 'Request_URL', 'URL_of_Anchor', 'Links_in_tags', 'SFH', 'Submitting_to_email',
'Abnormal_URL', 'Redirect', 'on_mouseover', 'RightClick', 'popUpWidnow',
'Iframe', 'age_of_domain', 'DNSRecord', 'web_traffic', 'Page_Rank', 'Google_Index',
'Links_pointing_to_page', 'Statistical_report', 'Result', ]
return featName
def storeTree(inputTree, fileName):
fw = open(fileName, 'w')
pickle.dump(inputTree, fw)
fw.close()
def grabTree(filename):
fr = open(filename)
return pickle.load(fr)
def trainAda(dataFile, storeFile, depth, num):
trainData, trainLabel = getDataAndLabel(dataFile)
featName = getFeatureName()
weakClassArr = adaBoostTrainDT(trainData, trainLabel, featName, depth, num)
storeTree(weakClassArr, storeFile)
return 0
def classifySelf(inputTree, featLabels, testVec):
value = 0.0
for i in range(len(inputTree)):
value = value + inputTree[i].get('alpha') * classify(inputTree[i], featLabels, testVec)
if value >= 0:
return 1
else:
return -1
def testAda(treeFile, testFile):
tree = grabTree(treeFile)
testData, testLabel = getDataAndLabel(testFile)
featName = getFeatureName()
errorNum = 0
allNum = len(testData)
fnNum = 0
tpNum = 0
for i in range(len(testData)):
classRet = classifySelf(tree, featName, testData[i])
if classRet != testLabel[i]:
errorNum += 1
if (testLabel[i] == -1):
fnNum += 1
else:
if (testLabel[i] == -1):
tpNum += 1
print treeFile + '|||||' + '错误率为: %f, 误判率为: %f' % (float(errorNum) / allNum, fnNum / float(fnNum + tpNum))
def trainAllFuc(numArray, depthArray, algorithmName, trainArray):
for depth in depthArray:
for num in numArray:
for i in [1, 2, 3, 4]:
tempStore = str(depth) + '/' + algorithmName + '_' + str(num) + '_' + str(i) + '.txt'
trainAda(trainArray[i - 1], tempStore, depth, num)
return
def testAllFuc(numArray, depthArray, algorithmName, testArray):
for depth in depthArray:
for num in numArray:
for i in [1, 2, 3, 4]:
tempStore = str(depth) + '/' + algorithmName + '_' + str(num) + '_' + str(i) + '.txt'
testAda(tempStore, testArray[i - 1])
return
def mainFunc():
algorithmName = 'ada_3000'
trainArray = ['train_3000_1.txt',
'train_3000_2.txt',
'train_3000_3.txt',
'train_3000_4.txt']
testArray = ['test_3000_1.txt',
'test_3000_2.txt',
'test_3000_3.txt',
'test_3000_4.txt']
trainAllFuc([10, 30, 60], [10], algorithmName, trainArray)
#testAllFuc([10, 30, 60], [3, 5], algorithmName, testArray)
return 0
mainFunc()
|
from book import Book
from recipe import Recipe
cookbook = Book("cookbook")
recipe1 = Recipe("cake", 2, 60, ["eggs","tet"], "dessert")
# les self ne se mettent pas automatiquement ????
cookbook.add_recipe("plop")
cookbook.get_recipes_by_types("dessert")
cookbook.get_recipe_by_name("cake") |
#!/usr/bin/env python3
__version__ = "0.3.0"
__copyright__ = """
Copyright (c) 2009-2021 Bogdan Tataroiu
"""
__license__ = """
All source code available in this repository is covered by a GPLv2 license.
"""
import argparse
import copy
import logging
import pathlib
import shutil
import subprocess
import tempfile
from mplayer_backend import MPlayerBackend
from gstreamer_backend import GStreamerBackend
from util import add_app_path_arg, font_file_arg, safe_int_log
# Returns a humanized string for a given amount of seconds
def time_format(seconds):
seconds = int(seconds)
return "%d:%02d:%02d" % (
seconds / 3600,
(seconds % 3600) / 60,
seconds % 60)
# Returns a humanized string for a given amount of bytes
def file_size_format(bytes, precision=2):
bytes = int(bytes)
if bytes == 0:
return '0 B'
log = safe_int_log(bytes, 1024)
return "%.*f%s" % (
precision,
bytes / (1024.0 ** log),
["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"][log])
backends = {
"mplayer": MPlayerBackend,
"gstreamer": GStreamerBackend
}
class CLIMain:
def __init__(self):
# Build command line arguments parser
parser = argparse.ArgumentParser(
usage="%(prog)s [options] FILE [FILE ...]",
description=(
"Cross-platform python tool which generates a video's "
"index preview with multiple screen capture thumbnails."),
add_help=False)
parser.add_argument(
"--version",
action="version",
version=f"%(prog)s {__version__}")
# Custom help flag which adds flags from all backends
class CustomHelpAction(argparse.Action):
def __init__(self, *args, **kwargs):
super().__init__(*args, nargs=0, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
for backend in backends.values():
argument_group = backend.get_argument_parser_group(parser)
if argument_group:
parser.add_argument_group(argument_group)
parser.print_help()
parser.exit()
parser.add_argument(
"-h", "--help",
action=CustomHelpAction,
help="show this help message and exit")
logging_group = parser.add_mutually_exclusive_group()
logging_group.set_defaults(logging_level=logging.INFO)
logging_group.add_argument(
"-v", "--verbose",
help="Print more detalied information",
action="store_const",
const=logging.DEBUG,
dest="logging_level")
logging_group.add_argument(
"-q", "--quiet",
help="Refrain from outputing anything",
action="store_const",
const=logging.CRITICAL,
dest="logging_level")
# Add options to specify paths for each needed application
self.app_list = ("convert", "montage")
for app in self.app_list:
add_app_path_arg(parser, app=app)
# Add options related to the resulting thumbnail such as
# number of rows or columns, width and height of the thumbnails,
# the space between them etc
capture_args = parser.add_argument_group("Capture options")
capture_args.add_argument(
"-r", "--rows",
help=(
"Number of rows the generated grid "
"should contain (default %(default)s)."),
type=int,
dest="grid_rows",
default=6)
capture_args.add_argument(
"-c", "--cols", "--columns",
help=(
"Number of columns the generated grid "
"should contain (default %(default)s)."),
type=int,
dest="grid_cols",
default=4)
capture_args.add_argument(
"-t", "--title",
help="Title for the thumbnail (video's name is default).",
dest="title",
default=None)
capture_args.add_argument(
"-W", "--width",
help="The width of a single image in the grid in pixels.",
type=int,
dest="thumbnail_width",
default=None)
capture_args.add_argument(
"-H", "--height",
help=(
"The height of a single image in the grid in pixels. "
"If only one of the width and height argument are "
"specified, the other one will be determined so that the "
"aspect ratio of the movie is preserved."),
type=int,
dest="thumbnail_height",
default=None)
capture_args.add_argument(
"-S", "--spacing",
help=(
"The space between images in the grid in pixels. "
"(default %(default)s)"),
type=int,
dest="grid_spacing",
default=4)
capture_args.add_argument(
"--focus",
help=(
"Focus on the beginning or the ending of the movie. That "
"means a greater number of thumbnails will be generated "
"in the specified area than in the other part. For "
"example if the focus is on the beginning of the movie, "
"the frequency of captures drops as time goes by. "
"Possible values are 'begin', 'end' and 'none'. (default "
"is '%(default)s')"),
choices=("begin", "end", "none"),
dest="capture_focus",
default="none")
# Add style related options
style_args = parser.add_argument_group("Style options")
style_args.add_argument(
"--background",
help="Background color (e.g. '#00ff00')",
dest="background",
default="#2f2f2f")
style_args.add_argument(
"--font-family",
help=(
"Font used for text, either as a .ttf file or as a "
"fontconfig pattern (default '%(default)s')"),
dest="font_family",
type=font_file_arg,
default="DejaVu Sans")
style_args.add_argument(
"--font-size",
help="Size of text in pixels",
type=int,
dest="font_size",
default=12)
style_args.add_argument(
"--font-color",
help="Color of the text (e.g. 'black', '#000000')",
dest="font_color",
default="#eeeeee")
style_args.add_argument(
"--heading-font-family",
help=(
"Font used for heading, either as a .ttf file or as a "
"fontconfig pattern (default '%(default)s')"),
dest="heading_font_family",
type=font_file_arg,
default="DejaVu Sans:style=bold")
style_args.add_argument(
"--heading-font-size",
help="Size of heading in pixels",
type=int,
dest="heading_font_size",
default=24)
style_args.add_argument(
"--heading-font-color",
help="Color of the heading (e.g. 'black', '#000000')",
dest="heading_color",
default="#575757")
parser.add_argument(
"files",
nargs="+",
metavar="FILE",
type=pathlib.Path)
# Add backend options
parser.add_argument(
"-b", "--backend",
help=(
"Backend used to capture images from video. Possible "
"values are 'gstreamer' (default) and 'mplayer'. The "
"gstreamer backend is recommended because it is faster, "
"has better support for video formats and more correctly "
"determines thumbnail timestamps."),
choices=list(backends.keys()),
dest="backend",
default="gstreamer")
# Obtain backend, add backend argument group and reparse
args, _ = parser.parse_known_args()
backend = backends[args.backend]
backend_argument_group = backend.get_argument_parser_group(parser)
if backend_argument_group:
parser.add_argument_group(backend_argument_group)
args = parser.parse_args()
self.args = args
self.backend = backend
self.files = args.files
def start(self):
logging.basicConfig(level=self.args.logging_level)
# Create temporary directory
with tempfile.TemporaryDirectory(prefix="video_previewer") as tmp_dir:
self.tmp_dir = pathlib.Path(tmp_dir)
self.backend = self.backend(self.args, self.tmp_dir)
# Start working
for file in self.files:
if not file.is_file():
logging.error(f"File '{file}' does not exist.")
continue
self.process_file(file)
# Generate thumbnail for a video
def process_file(self, file):
logging.info(f"Started processing file '{file}'")
info = self.backend.load_file(file)
width = self.args.thumbnail_width
height = self.args.thumbnail_height
# If neither width nor height is specified in the options, determine
# the values so that both of the sizes are greater than 150 and the
# movie's aspect ratio is preserved
if width is None and height is None:
if info["height"] < info["width"]:
height = 150
else:
width = 150
# If one of width or height is specified in options and the other is
# not, determine the value for the other one that will preserve the
# movie's aspect ratio
if width is not None and height is None:
height = int(width * info["height"] / info["width"])
if height is not None and width is None:
width = int(height * info["width"] / info["height"])
logging.info(
f"Individual thumbnails will be resized to "
f"{width}x{height}")
# Determine list of capture times to pass along back to the backend
logging.debug("Calculating frame capture times.")
frame_count = self.args.grid_rows * self.args.grid_cols
part_length = (
float(
info["duration"] - 2 * self.backend.frame_capture_padding)
/ (frame_count + 1))
if self.args.capture_focus == "none":
# All the time intervals between two frames should be equal length.
frame_times = [part_length + self.backend.frame_capture_padding]
last = frame_times[0]
for i in range(1, frame_count):
last = last + part_length
frame_times.append(last)
else:
# The list of time intervals between two frames for the 'end' case
# should look something like this:
# (N = number of intervals == frame_count + 1)
# base + delta * (N - 1); base + delta * (N - 2); ...; base
# The interval is simmetrical for the 'begin' case
# Their sum must equal (duration - 2 * padding):
# base * N + delta * ((N - 1) * N / 2) == (duration - 2 * padding)
# base + delta * (N - 1) / 2 == (duration - 2 * padding) / N
base = part_length * 0.2
duration = (
info["duration"] - 2 * self.backend.frame_capture_padding)
delta = (duration / (frame_count + 1) - base) * 2 / frame_count
# Calculate frame times for "begin" and convert them if focus is at
# "end"
frame_times = [base + self.backend.frame_capture_padding]
last = frame_times[0]
for i in range(1, frame_count):
last = last + base + delta * i
frame_times.append(last)
if self.args.capture_focus == "end":
for i in range(frame_count):
frame_times[i] = info["duration"] - frame_times[i]
frame_times.reverse()
# Capture frames
frame_files = self.backend.capture_frames(frame_times)
count = 0
for frame_file, time in frame_files:
count += 1
logging.debug(f"Resizing and annotating frame {count}.")
self.resize_and_annotate_frame(
frame_file,
width,
height,
self.backend.capture_time_to_seconds(time))
logging.info("Finished capturing frames. Creating montage.")
montage_file = self.create_montage(
file, info, self.tmp_dir, frame_files)
if montage_file:
destination = file.with_suffix(".png")
shutil.move(str(montage_file), str(destination))
logging.info(f"Saving final thumbnail to '{destination}'")
# Cleanup
self.backend.unload_file()
for frame_file, _time in frame_files:
frame_file.unlink()
# Transform a captured frame into a thumbnail by resizing it and annotating
# it's timestamp
def resize_and_annotate_frame(self, file, width, height, time):
process = subprocess.Popen(
[str(self.args.path_convert), str(file),
"-resize", f"{width}x{height}!",
"-fill", self.args.font_color,
"-undercolor", f"{self.args.background}80",
"-font", self.args.font_family,
"-pointsize", str(self.args.font_size),
"-gravity", "NorthEast",
"-annotate", "+0+0", f" {time_format(time)} ",
"-bordercolor", self.args.font_color,
"-border", "1x1",
str(file)],
shell=False)
process.wait()
# Create a montage of all frame captures from the tmp directory
def create_montage(self, file, info, tmp_dir, frame_files):
rows = self.args.grid_rows
cols = self.args.grid_cols
if len(frame_files) != rows * cols:
rows = int(math.ceil(float(len(frame_files)) / cols))
logging.info(
f"Only {len(frame_files)} captures, so the "
f"grid will be {rows} by {cols}")
montage_file = tmp_dir / "montage.png"
process = subprocess.Popen(
[str(self.args.path_montage),
"-geometry",
f"+{self.args.grid_spacing}+{self.args.grid_spacing}",
"-background", self.args.background,
"-fill", self.args.font_color,
"-tile", f"{cols}x{rows}"]
+ [str(frame_file) for frame_file, _time in frame_files]
+ [str(montage_file)],
shell=False)
process.wait()
if not montage_file.is_file():
logging.error("Error creating montage.")
return None
# Annotate montage with title and header
title = self.args.title or self.backend.info.get("title", None)
if title is None:
title = file.name
header = self.get_header_text(file, info)
process = subprocess.Popen(
[str(self.args.path_convert),
"-background", self.args.background,
"-bordercolor", self.args.background,
# Title
"-fill", self.args.heading_color,
"-font", self.args.heading_font_family,
"-pointsize", str(self.args.heading_font_size),
f"label:{title}",
# Header
"-fill", self.args.font_color,
"-font", self.args.font_family,
"-pointsize", str(self.args.font_size),
f"label:{header}",
# Border for title and header
"-border", f"{self.args.grid_spacing}x0",
# Montage
str(montage_file),
# Border for montage
"-border", f"{self.args.grid_spacing}x{self.args.grid_spacing}",
"-append",
str(montage_file)],
shell=False)
process.wait()
return montage_file
# Determine what will be written to the thumbnail's header
def get_header_text(self, file, info):
file_size = file.stat().st_size
text = f"Size : {file_size_format(file_size)} ({file_size} bytes)\n"
formatted_duration = time_format(
self.backend.capture_time_to_seconds(info["duration"]))
text += f"Length : {formatted_duration}\n"
video_info = []
if "width" in info and "height" in info:
video_info.append(f"{info['width']}x{info['height']}")
if "video_codec" in info:
video_info.append(f"{info['video_codec']}")
if "video_framerate" in info:
video_info.append("%.2f frames/sec" % info["video_framerate"])
if "video_bitrate" in info:
video_info.append("%.2f kb/sec" % (info["video_bitrate"] / 1024.0))
if "video_interlaced" in info and info["video_interlaced"]:
video_info.append("interlaced")
if len(video_info):
text += "Video : " + ", ".join(video_info) + "\n"
audio_info = []
if "audio_channels" in info:
audio_info.append(f"{info['audio_channels']} channel(s)")
if "audio_codec" in info:
audio_info.append(f"{info['audio_codec']}")
if "audio_rate" in info:
audio_info.append("%.2f kHz" % (info["audio_rate"] / 1000.0))
if "audio_bitrate" in info:
audio_info.append("%.2f kb/sec" % (info["audio_bitrate"] / 1024.0))
if len(audio_info):
text += "Audio : " + ", ".join(audio_info)
logging.debug(f"Created image header text:\n{text}")
return text
main = CLIMain()
main.start()
|
"""
This stores Title Author ISBN and year of the book
We can Add Select delete and update Entry.
"""
from tkinter import *
from DbOperations import Database
database=Database()
class Window(object):
def __init__(self,window):
self.window=window
self.window.wm_title("Abhijeet's Bookstore")
title_label = Label(window,text="Title")
title_label.grid(row=0, column=0)
self.title_value=StringVar()
self.title_input = Entry(window,textvariable=self.title_value)
self.title_input.grid(row=0, column=1)
author_label = Label(window,text="Author")
author_label.grid(row=0,column=2)
self.author_value=StringVar()
self.author_input=Entry(window, textvariable=self.author_value)
self.author_input.grid(row=0, column=3)
year_label = Label(window,text="Year")
year_label.grid(row=1, column=0)
self.year_value=IntVar()
self.year_input = Entry(window,textvariable=self.year_value)
self.year_input.grid(row=1, column=1)
isbn_label=Label(window,text="ISBN")
isbn_label.grid(row=1,column=2)
self.isbn_value=IntVar()
self.isbn_input=Entry(window,textvariable=self.isbn_value)
self.isbn_input.grid(row=1,column=3)
pages_label = Label(window,text="Page Count")
pages_label.grid(row=2, column=0)
self.pages_value=IntVar()
self.pages_input = Entry(window,textvariable=self.pages_value)
self.pages_input.grid(row=2, column=1)
genre_label=Label(window,text="Genre")
genre_label.grid(row=2,column=2)
self.genre_value=StringVar()
self.genre_input=Entry(window,textvariable=self.genre_value)
self.genre_input.grid(row=2,column=3)
view_button=Button(window,text="View All",width=15,command=self.view_all)
view_button.grid(row=3,column=3)
search_button=Button(window,text="Search Entry",command=self.search,width=15)
search_button.grid(row=4,column=3)
add_button=Button(window,text="Add Entry", command=self.insert,width=15)
add_button.grid(row=5,column=3)
update_button=Button(window,text="Update Selected",command=self.update,width=15)
update_button.grid(row=6,column=3)
delete_button=Button(window,text="Delete Selected",command=self.delete,width=15)
delete_button.grid(row=7,column=3)
close_button=Button(window,text="Reset",command=self.reset,width=15)
close_button.grid(row=8,column=3)
close_button=Button(window,text="Close",command=self.window.destroy,width=15)
close_button.grid(row=9,column=3)
self.display_area=Listbox(window,height=9, width=50)
self.display_area.grid(row=4,column=1,rowspan=7,columnspan=1)
self.action_box=Listbox(window,height=1, width=50)
self.action_box.grid(row=3,column=1)
sb=Scrollbar(window)
sb.grid(row=4,column=2,rowspan=7,columnspan=1)
self.display_area.configure(yscrollcommand=sb.set)
sb.configure(command=self.display_area.yview)
self.display_area.bind('<<ListboxSelect>>',row_id)
def view_all(self):
self.display_area.delete(0,END)
records=database.display_all()
self.action_box.delete(0,END)
self.action_box.insert(END,"View all records!!")
for row in records:
self.display_area.insert(END,row)
def insert(self):
try:
if int(self.year_value.get()) > 0 and int(self.isbn_value.get()) >0 and int(self.pages_value.get()) > 0:
database.insert_record(self.title_value.get(),self.author_value.get(),self.year_value.get(),self.isbn_value.get(),self.pages_value.get(),self.genre_value.get())
self.display_area.delete(0,END)
self.action_box.delete(0,END)
self.action_box.insert(END,"Record Added!!")
for row in database.display_all():
self.display_area.insert(END,row)
else:
self.display_area.delete(0,END)
self.display_area.insert(0,"Enter year, isbn and pages as integer and greater than 0")
except TclError:
self.display_area.delete(0,END)
self.display_area.insert(0,"Enter year, isbn and page count as integer")
def search(self):
try:
if int(self.year_value.get()) == 0 :
records=database.search_record (self.title_value.get(),self.author_value.get(),0,self.isbn_value.get(),self.pages_value.get(),self.genre_value.get())
self.display_area.delete(0,END)
self.action_box.delete(0,END)
self.action_box.insert(END,"Displaying Matching results if any!!")
for row in records:
self.display_area.insert(END,row)
if int(self.isbn_value.get()) == 0 :
records=database.search_record (self.title_value.get(),self.author_value.get(),self.year_value.get(),0,self.pages_value.get(),self.genre_value.get())
self.display_area.delete(0,END)
self.action_box.delete(0,END)
self.action_box.insert(END,"Displaying search results if any!!")
for row in records:
self.display_area.insert(END,row)
if int(self.pages_value.get()) == 0 :
records=database.search_record (self.title_value.get(),self.author_value.get(),self.year_value.get(),self.isbn_value.get(),0,self.genre_value.get())
self.display_area.delete(0,END)
action_box.delete(0,END)
action_box.insert(END,"Displaying search results!!")
for row in records:
display_area.insert(END,row)
if int(isbn_value.get()) == 0 and int(year_value.get()) == 0 and int(genre_value.get() == 0) :
records=database.search_record (title_value.get(),author_value.get(),0,0,0,genre_value.get())
display_area.delete(0,END)
action_box.delete(0,END)
action_box.insert(END,"Displaying search results!!")
for row in records:
display_area.insert(END,row)
except TclError:
display_area.delete(0,END)
display_area.insert(0,"Enter year, isbn and Page count as integer or enter 0")
def row_id(self,event):
try:
index=self.display_area.curselection()[0]
self.selected_row=self.display_area.get(index)
id=self.selected_row[0]
self.title_value.set(self.selected_row[1])
self.author_value.set(self.selected_row[2])
self.year_value.set(self.selected_row[3])
self.isbn_value.set(self.selected_row[4])
self.pages_value.set(self.selected_row[5])
self.genre_value.set(self.selected_row[6])
except IndexError:
pass
print(id)
def delete(self):
database.delete_record(id)
self.display_area.delete(0,END)
self.action_box.delete(0,END)
self.action_box.insert(0,"Record Deleted!!")
for row in database.display_all():
self.display_area.insert(END,row)
def reset(self):
self.display_area.delete(0,END)
self.action_box.delete(0,END)
self.action_box.insert(END,"Reset successful")
self.title_value.set("Enter a title")
self.author_value.set("Enter an Author")
self.year_value.set(0)
self.isbn_value.set(0)
self.pages_value.set(0)
self.genre_value.set(0)
def update(self):
try:
if int(self.year_value.get()) > 0 and int(self.isbn_value.get()) >0 and int(self.pages_value.get() >0 ):
database.update_record(self.id,self.title_value.get(),self.author_value.get(),self.year_value.get(),self.isbn_value.get(), self.pages_value.get(),self.genre_value.get())
self.display_area.delete(0,END)
self.action_box.delete(0,END)
self.action_box.insert(0,"Record Updated!!")
for row in database.display_all():
self.display_area.insert(END,row)
else:
self.display_area.delete(0,END)
self.display_area.insert(0,"Enter year and isbn as integer and greater than 0")
except TclError:
self.display_area.delete(0,END)
self.display_area.insert(0,"Enter year and isbn as integer")
def __init__(self,window):
self.window=window
self.window.wm_title("Abhijeet's Bookstore")
title_label = Label(window,text="Title")
title_label.grid(row=0, column=0)
self.title_value=StringVar()
self.title_input = Entry(window,textvariable=self.title_value)
self.title_input.grid(row=0, column=1)
author_label = Label(window,text="Author")
author_label.grid(row=0,column=2)
self.author_value=StringVar()
self.author_input=Entry(window, textvariable=self.author_value)
self.author_input.grid(row=0, column=3)
year_label = Label(window,text="Year")
year_label.grid(row=1, column=0)
self.year_value=IntVar()
self.year_input = Entry(window,textvariable=self.year_value)
self.year_input.grid(row=1, column=1)
isbn_label=Label(window,text="ISBN")
isbn_label.grid(row=1,column=2)
self.isbn_value=IntVar()
self.isbn_input=Entry(window,textvariable=self.isbn_value)
self.isbn_input.grid(row=1,column=3)
pages_label = Label(window,text="Page Count")
pages_label.grid(row=2, column=0)
self.pages_value=IntVar()
self.pages_input = Entry(window,textvariable=self.pages_value)
self.pages_input.grid(row=2, column=1)
genre_label=Label(window,text="Genre")
genre_label.grid(row=2,column=2)
self.genre_value=StringVar()
self.genre_input=Entry(window,textvariable=self.genre_value)
self.genre_input.grid(row=2,column=3)
view_button=Button(window,text="View All",width=15,command=self.view_all)
view_button.grid(row=3,column=3)
search_button=Button(window,text="Search Entry",command=self.search,width=15)
search_button.grid(row=4,column=3)
add_button=Button(window,text="Add Entry", command=self.insert,width=15)
add_button.grid(row=5,column=3)
update_button=Button(window,text="Update Selected",command=self.update,width=15)
update_button.grid(row=6,column=3)
delete_button=Button(window,text="Delete Selected",command=self.delete,width=15)
delete_button.grid(row=7,column=3)
close_button=Button(window,text="Reset",command=self.reset,width=15)
close_button.grid(row=8,column=3)
close_button=Button(window,text="Close",command=self.window.destroy,width=15)
close_button.grid(row=9,column=3)
self.display_area=Listbox(window,height=9, width=50)
self.display_area.grid(row=4,column=1,rowspan=7,columnspan=1)
self.action_box=Listbox(window,height=1, width=50)
self.action_box.grid(row=3,column=1)
sb=Scrollbar(window)
sb.grid(row=4,column=2,rowspan=7,columnspan=1)
self.display_area.configure(yscrollcommand=sb.set)
sb.configure(command=self.display_area.yview)
self.display_area.bind('<<ListboxSelect>>',self.row_id)
window=Tk()
Window(window)
window.mainloop() |
#!/usr/bin/env python
from __future__ import division
import argparse
import random
import pickle
import queue
"""
===============================================================================
Please complete the following function.
===============================================================================
"""
"""
Item Heuristics
Takes in resale value as resale, the cost of the item as cost, the weight of the item as weight
Returns {some heuristic value}
"""
def calcItemHeuristic(resale, cost, weight):
profit = resale - cost
return profit / (weight + 1) + profit / (cost + 1)
def recalcHeuristic(resale, cost, weight, P, M):
profit = resale - cost
cost_ratio = cost / M
weight_ratio = weight / P
return profit / (weight + 1) + profit / (cost + 1) - cost_ratio - weight_ratio
"""
Random Greedy Knapsack Solver 2
Takes in list of tuples as items; weight constraint as P, cost constraint as M
Returns list of items that makes approximately the most profit
Uses recalcHeuristic
"""
def greedyKnapsack2(items, P, M):
solution = set()
p70 = P * .7
m70 = M * .7
while P > p70 and M > m70 and len(items) > 0:
random_item = random.choice(items)
if random_item[2] < P and random_item[3] < M:
solution.add(random_item[0])
P = P - random_item[2]
M = M - random_item[3]
items.remove(random_item)
heuristic_map = dict()
for i in range(len(items)):
item = items[i]
assert type(item) is tuple
heuristic_map[items[i]]= calcItemHeuristic(items[i][4], items[i][3], items[i][2])
for i in range(len(items)):
item = max(heuristic_map.keys(), key=lambda i: heuristic_map[i])
del heuristic_map[item]
if P > 0 and item[2] < P:
if M > 0 and item[3] < M:
solution.add(item[0])
P = P - item[2]
M = M - item[3]
for item2 in heuristic_map:
heuristic_map[item2] = recalcHeuristic(item2[4], item2[3], item2[2], P, M)
return list(solution)
"""
Greedy Knapsack Solver 1
Takes in list of tuples as items; weight constraint as P, cost constraint as M
Returns list of items that makes approximately the most profit
Uses calcItemHeuristic only
"""
def greedyKnapsack(items, P, M):
solution = set()
p70 = P * .7
m70 = M * .7
while P > p70 and M > m70 and len(items) > 0:
random_item = random.choice(items)
if random_item[2] < P and random_item[3] < M:
solution.add(random_item[0])
P = P - random_item[2]
M = M - random_item[3]
items.remove(random_item)
itemHeuristics = queue.PriorityQueue()
for i in range(len(items)):
item = items[i]
assert type(item) is tuple
itemHeuristics.put(items[i], calcItemHeuristic(items[i][4], items[i][3], items[i][2]))
for i in range(len(items)):
item = itemHeuristics.get()
if P > 0 and item[2] < P:
if M > 0 and item[3] < M:
solution.add(item[0])
P = P - item[2]
M = M - item[3]
return list(solution)
"""
# Random greedy algorithm
# Picks an independent (compatible) set of classes from problem id.
"""
def pickSet(id, r=False):
with open("problem_graphs/" + str(id) + ".pickle", 'rb') as handle:
class_constraint_map = pickle.load(handle)
P, M, N, C, items, constraints = read_input("project_instances/problem" + str(id) + ".in")
itemscopy = list(items)
for item in itemscopy:
if item[4] - item[3] <= 0 or item[2] > P or item[3] > M:
items.remove(item)
classes = dict()
for i, item in enumerate(items):
if item[1] in classes:
classes[item[1]].add(i)
else:
classes[item[1]] = {i}
# Some features we might want to use
totalValue = lambda c: sum([items[item][4] for item in classes[c]])
totalWeight = lambda c: sum([items[item][2] for item in classes[c]])
totalCost = lambda c: sum([items[item][3] for item in classes[c]])
def heuristic(c):
value = (totalValue(c) - totalCost(c)) / (totalCost(c) + 1)
return value
result = []
# Greedy random
numClasses = len(classes)
classesPicked = 0
while len(classes) > 0:
# Pick randomly for first 5%
while classesPicked < 5:
next_class = random.choice(list(classes.keys()))
for neighbor in class_constraint_map[next_class]:
if neighbor in classes:
del classes[neighbor]
class_items = classes[next_class]
del classes[next_class]
for it in class_items:
result.append(items[it])
classesPicked += 1
# Greedily pick for the rest
next_class = max(classes.keys(), key=lambda c: heuristic(c))
for neighbor in class_constraint_map[next_class]:
if neighbor in classes:
del classes[neighbor]
class_items = classes[next_class]
del classes[next_class]
for it in class_items:
result.append(items[it])
classesPicked += 1
print("problem", str(id) + ": ", classesPicked, "classes picked.")
return result
"""
Scorer takes in id, and output_file
"""
def scorer(id, item_list):
P, M, N, C, items, constraints = read_input("project_instances/problem" + str(id) + ".in")
class_constraint_map = pickle.load(open("problem_graphs/"+str(id)+".pickle", "rb"))
item_map = pickle.load(open("item_map/" + str(id) + ".pickle", "rb"))
incompatibles = set()
score = 0
weight = 0
cost = 0
for item in item_list:
itemObj = item_map[item]
clas = itemObj[1]
if clas in incompatibles or weight + itemObj[2] > P or cost + itemObj[3] > M:
return 0
score += itemObj[4]
weight += itemObj[2]
cost += itemObj[3]
for neighbor in class_constraint_map[clas]:
incompatibles.add(neighbor)
print("value of items:", score)
return score + M - cost
def solve(id):
"""
Write your amazing algorithm here.
Return: a list of strings, corresponding to item names.
"""
f = open("outputs/best_scores.txt")
bestScores = f.readlines()
f.close()
P, M, N, C, items, constraints = read_input(generateFilePath(id))
indSet = pickSet(id)
picked_items = greedyKnapsack2(indSet, P, M)
this_score = round(scorer(id, picked_items), 2)
if this_score > float(bestScores[id-1]):
write_output("outputs/problem" + str(id) + ".out", picked_items)
print("got better score!", this_score, "for problem", id, "whose best score was previously", bestScores[id-1])
bestScores[id-1] = str(this_score)+"\n"
f = open("outputs/best_scores.txt", 'w')
for score in bestScores:
f.write(score)
f.close()
else:
print("got worse score", this_score, "for problem", id, "whose best score was", bestScores[id-1])
def generateFilePath(id):
return "project_instances/problem" + str(id) + ".in"
"""
===============================================================================
Read Input
Write Output
===============================================================================
"""
def read_input(filename):
"""
P: float
M: float
N: integer
C: integer
items: list of tuples
constraints: list of sets
"""
with open(filename) as f:
P = float(f.readline())
M = float(f.readline())
N = int(f.readline())
C = int(f.readline())
items = []
constraints = []
for i in range(N):
name, clas, weight, cost, val = f.readline().split(";")
items.append((name, int(clas), float(weight), float(cost), float(val)))
for i in range(C):
constraint = set(eval(f.readline()))
constraints.append(constraint)
return P, M, N, C, items, constraints
def write_output(filename, items_chosen):
with open(filename, "w") as f:
for i in items_chosen:
f.write("{0}\n".format(i)) |
# coding=utf-8
# ===============================================================
# Author: Óscar García Martínez
# Email: racsodev93@gmail.com
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by Oscar Garcia. Any
# explicit usage of this script or its contents is granted
# according to the license provided and its conditions.
# ===============================================================
from telegram.ext import Updater, InlineQueryHandler, CommandHandler, CallbackQueryHandler, ConversationHandler, RegexHandler, MessageHandler, Filters
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from shutil import rmtree
import sqlite3
import logging
from sqlite3 import Error
from datetime import datetime
from credentials import TELEGRAM_TOKEN
SELECTEDSECTION = ""
MAIN, ADDSECTION, DELETESECTION, ADDNOTE, ADDNOTE2, REMOVENOTE, REMOVENOTE2, GETNOTE = range(8)
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def safe_str(obj):
try: return str(obj)
except UnicodeEncodeError:
return obj.encode('utf8')
return ""
def sql_connection():
try:
con = sqlite3.connect('/home/pi/TelegramBot/bot_DB.db')
return con
except Error:
print(Error)
def ayuda(bot, update):
update.message.reply_text('Puedes utilizar los siguientes comandos : \n\n/ayuda - Guia para utilizar el bot. \n/start - Inicializa el bot. \n/addSeccion - Añade una nueva sección. \n/eliminarSeccion - Elimina una sección. \n/addNota - Añade una nota a una sección. \n/verNotas - Muestra todas las notas de una sección. \n/eliminarNota - Elimina una nota de una sección.\n\n')
def start(bot, update):
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute('SELECT * FROM Users WHERE Name=' + safe_str(update.message.chat_id))
rows = cursorObj.fetchall()
if (len(rows) == 0):
cursorObj.execute("INSERT INTO Users (Name, Password) values (?, ?)", (safe_str(update.message.chat_id), ""))
con.commit()
con.close()
update.message.reply_text('Bienvenido @' + update.message.from_user.username + '. Si necesitas ayuda para utilziar este bot, pruebe con el comando /ayuda. Un saludo.\n')
return MAIN
def addSeccion(bot, update):
update.message.reply_text('Escribe el nombre de la sección que quieres crear.')
return ADDSECTION
def textoAddSeccion(bot, update):
idUser = safe_str(update.message.chat_id)
texto = update.message.text
if (" " in texto):
update.message.reply_text('Una sección sólo puede nombrarse con una palabra. Si quieres que se llame \"Lista de la compra\" pon \"ListaDeLaCompra\".')
else:
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute("INSERT INTO Sections (User, Name) values (?, ?)", (idUser, texto))
con.commit()
con.close()
update.message.reply_text('Sección añadida.')
return MAIN
def eliminarSeccion(bot, update):
idUser = safe_str(update.message.chat_id)
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute('SELECT Name FROM Sections WHERE User=' + idUser)
rows = cursorObj.fetchall()
if (len(rows) == 0):
bot.send_message(chat_id=update.message.chat_id, text="No hay ninguna sección para eliminar.")
else:
lista = []
for row in rows:
var = safe_str(row[0])
lista.append([InlineKeyboardButton("🗑 " + var, callback_data=var)])
lista.append([InlineKeyboardButton("🔙 " + 'Cancelar', callback_data='Cancelar')])
reply_markup = InlineKeyboardMarkup(lista)
update.message.reply_text('Elige una sección:',reply_markup=reply_markup)
con.close()
return DELETESECTION
def botonEliminarSeccion(bot, update):
con = sql_connection()
cursorObj = con.cursor()
query = update.callback_query
idUser = safe_str(query.message.chat_id)
sectionName = safe_str(query.data)
if (sectionName == 'Cancelar'):
bot.edit_message_text(text="Cancelado", chat_id=query.message.chat_id, message_id=query.message.message_id)
return MAIN
cursorObj.execute('''DELETE FROM Sections WHERE User=? AND Name=?''', (idUser,sectionName))
con.commit()
bot.edit_message_text(text="Sección eliminada", chat_id=query.message.chat_id, message_id=query.message.message_id)
con.close()
return MAIN
def addNota(bot, update):
idUser = safe_str(update.message.chat_id)
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute('SELECT Id,Name FROM Sections WHERE User=' + idUser)
rows = cursorObj.fetchall()
if (len(rows) == 0):
update.message.reply_text('No hay ninguna sección para seleccionar.')
else:
lista = []
for row in rows:
idSect = safe_str(row[0])
nameSect = safe_str(row[1])
lista.append([InlineKeyboardButton(nameSect, callback_data=idSect)])
lista.append([InlineKeyboardButton("🔙 " + 'Cancelar', callback_data='-1')])
reply_markup = InlineKeyboardMarkup(lista)
update.message.reply_text('¿En qué sección quieres añadir la nota?',reply_markup=reply_markup)
con.close()
return ADDNOTE
def botonAddNota(bot, update):
global SELECTEDSECTION
query = update.callback_query
SELECTEDSECTION = safe_str(query.data)
if (SELECTEDSECTION == '-1'):
bot.edit_message_text(text="Cancelado", chat_id=query.message.chat_id, message_id=query.message.message_id)
return MAIN
bot.edit_message_text(text="Escribe la nota.", chat_id=query.message.chat_id, message_id=query.message.message_id)
return ADDNOTE2
def textoAddNota(bot, update):
global SELECTEDSECTION
texto = update.message.text
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute("INSERT INTO Notes (Section, Note) values (?, ?)", (SELECTEDSECTION, texto))
con.commit()
update.message.reply_text('Nota añadida.')
return MAIN
def verNotas(bot, update):
idUser = safe_str(update.message.chat_id)
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute('SELECT Id,Name FROM Sections WHERE User=' + idUser)
rows = cursorObj.fetchall()
if (len(rows) == 0):
update.message.reply_text('No hay ninguna sección que elegir.')
else:
lista = []
for row in rows:
nameSect = safe_str(row[1])
idSect = safe_str(row[0])
lista.append([InlineKeyboardButton(nameSect, callback_data=idSect)])
lista.append([InlineKeyboardButton("🔙 " + 'Cancelar', callback_data='-1')])
reply_markup = InlineKeyboardMarkup(lista)
update.message.reply_text('¿Selecciona la sección?',reply_markup=reply_markup)
con.close()
return GETNOTE
def imprimirNotas(bot, update):
query = update.callback_query
SELECTEDDATA = safe_str(query.data)
if (SELECTEDDATA == '-1'):
bot.edit_message_text(text="Cancelado", chat_id=query.message.chat_id, message_id=query.message.message_id)
return MAIN
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute('SELECT Note FROM Notes WHERE Section=' + SELECTEDDATA)
rows = cursorObj.fetchall()
if (len(rows) == 0):
bot.edit_message_text(text="No hay ninguna nota en esta sección.", chat_id=query.message.chat_id, message_id=query.message.message_id)
else:
cursorObj.execute('SELECT Name FROM Sections WHERE id=' + SELECTEDDATA)
rows2 = cursorObj.fetchall()
notas = "*" + safe_str(rows2[0][0]) + "*:\n"
for row in rows:
notas += "` " + safe_str(row[0]) + "`\n"
bot.edit_message_text(text=notas, chat_id=query.message.chat_id, message_id=query.message.message_id, parse_mode= 'Markdown')
return MAIN
def eliminarNota(bot, update):
idUser = safe_str(update.message.chat_id)
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute('SELECT Id,Name FROM Sections WHERE User=' + idUser)
rows = cursorObj.fetchall()
if (len(rows) == 0):
update.message.reply_text('No hay ninguna sección que elegir.')
else:
lista = []
for row in rows:
nameSect = safe_str(row[1])
idSect = safe_str(row[0])
lista.append([InlineKeyboardButton(nameSect, callback_data=idSect)])
lista.append([InlineKeyboardButton("🔙 " + 'Cancelar', callback_data='-1')])
reply_markup = InlineKeyboardMarkup(lista)
update.message.reply_text('¿En qué sección está la nota que quieres eliminar?',reply_markup=reply_markup)
con.close()
return REMOVENOTE
def botonSeccionEliminarNota(bot, update):
global SELECTEDSECTION
query = update.callback_query
SELECTEDSECTION = safe_str(query.data)
if (SELECTEDSECTION == '-1'):
bot.edit_message_text(text="Cancelado", chat_id=query.message.chat_id, message_id=query.message.message_id)
return MAIN
con = sql_connection()
cursorObj = con.cursor()
cursorObj.execute('SELECT Id,Note FROM Notes WHERE Section=' + SELECTEDSECTION)
rows = cursorObj.fetchall()
if (len(rows) == 0):
bot.edit_message_text(text="No hay ninguna nota en esta sección.", chat_id=query.message.chat_id, message_id=query.message.message_id)
return MAIN
else:
lista = []
for row in rows:
nameNote = safe_str(row[1])
idNote = safe_str(row[0])
lista.append([InlineKeyboardButton("🗑 " + nameNote, callback_data=idNote)])
lista.append([InlineKeyboardButton("🔙 " + 'Cancelar', callback_data='-1')])
reply_markup = InlineKeyboardMarkup(lista)
bot.edit_message_text(text="¿Qué nota quieres eliminar?", chat_id=query.message.chat_id, message_id=query.message.message_id, reply_markup=reply_markup)
con.close()
return REMOVENOTE2
def botonEliminarNota(bot, update):
query = update.callback_query
con = sql_connection()
cursorObj = con.cursor()
idNote = safe_str(query.data)
if (idNote == '-1'):
bot.edit_message_text(text="Cancelado", chat_id=query.message.chat_id, message_id=query.message.message_id)
return MAIN
cursorObj.execute('DELETE FROM Notes WHERE id=' + idNote)
con.commit()
bot.edit_message_text(text="Nota eliminada", chat_id=query.message.chat_id, message_id=query.message.message_id)
con.close()
return MAIN
def cancel(bot, update):
user = update.message.from_user
loggin.info("User %s canceled the conversation." % user.first_name)
update.message.reply_text('Bye! I hope we can talk again some day.',
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def error(bot, update, error):
loggin.warn('Update "%s" caused error "%s"' % (update, error))
def main():
print("\nStrating " + str(datetime.now()))
updater = Updater(TELEGRAM_TOKEN)
dp = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start',start)],
states={
MAIN: [CommandHandler('addSeccion', addSeccion),
CommandHandler('eliminarSeccion', eliminarSeccion),
CommandHandler('addNota', addNota),
CommandHandler('verNotas', verNotas),
CommandHandler('eliminarNota', eliminarNota),
CommandHandler('ayuda', ayuda)],
ADDSECTION: [MessageHandler(Filters.text, textoAddSeccion)],
DELETESECTION: [CallbackQueryHandler(botonEliminarSeccion)],
ADDNOTE: [CallbackQueryHandler(botonAddNota)],
ADDNOTE2: [MessageHandler(Filters.text, textoAddNota)],
REMOVENOTE: [CallbackQueryHandler(botonSeccionEliminarNota)],
REMOVENOTE2: [CallbackQueryHandler(botonEliminarNota)],
GETNOTE: [CallbackQueryHandler(imprimirNotas)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dp.add_handler(conv_handler)
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main() |
from time import sleep
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
def check_element_exists(driver):
try:
elem = parent_card.find_element_by_link_text("View more comments")
return elem
except Exception as e:
return None
driver = webdriver.Chrome(executable_path="chromedriver.exe")
#Define a wait to be used
wait = WebDriverWait(driver, 10)
#Go to the facebook page
driver.get("https://www.facebook.com/CNC3Television/posts/10158767485687996")
#Get rid of stupid sign-in popup
element = wait.until(EC.element_to_be_clickable((By.LINK_TEXT, 'Not Now')))
element.click()
parent_card = driver.find_element_by_id("contentArea")
# Load intial set of comments
parent_card.find_element_by_partial_link_text("Comments").click()
#Select All Comments
sleep(2)
parent_card.find_element_by_link_text("Most Relevant").click()
driver.find_elements_by_class_name("_54ni")[2].click()
js = "var aa=document.getElementById('headerArea');aa.parentNode.removeChild(aa)"
driver.execute_script(js)
sleep(1)
#Keep clicking "View more comments" to load all comments
while check_element_exists(parent_card):
parent_card.find_element_by_link_text("View more comments").click()
sleep(3)
comments=[]
elements = parent_card.find_elements_by_tag_name("li")
for element in elements :
comment_data = {}
comment_data['text']= element.find_element_by_xpath("//span[@dir]").text
comment_data['author']= element.find_element_by_xpath("//span[@dir]/preceding-sibling::*").text
comments.append(comment_data)
print(comments)
#Now scrape all the comments
driver.close() |
import wx
import wx.lib.dragscroller
import colorsys
from math import cos, sin, radians
#----------------------------------------------------------------------
BASE = 80.0 # sizes used in shapes drawn below
BASE2 = BASE/2
BASE4 = BASE/4
USE_BUFFER = ('wxMSW' in wx.PlatformInfo) # use buffered drawing on Windows
class TestPanel(wx.ScrolledWindow):
def __init__(self, parent):
wx.ScrolledWindow.__init__(self, parent, -1)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnWheel)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
if USE_BUFFER:
self.Bind(wx.EVT_SIZE, self.OnSize)
self.panning = False
self.offsetx = 300
self.offsety = 300
self.scale = 1.0
self.SetScrollbars(1, 1, 1200, 1200, 300, 300)
def OnLeftDown(self, evt):
self.x, self.y = evt.GetPositionTuple()
self.scrollx, self.scrolly = self.GetViewStart()
self.panning = True
def OnMotion(self, evt):
if self.panning:
xx, yy = evt.GetPositionTuple()
self.Scroll(self.scrollx + self.x - xx, self.scrolly + self.y - yy)
def OnLeftUp(self, evt):
if self.panning:
self.panning = False
def OnLeave(self, evt):
if self.panning: self.OnLeftUp(evt)
def OnWheel(self, evt):
if evt.ControlDown():
rot = evt.GetWheelRotation()
mult = 0.125 if rot > 0 else -0.125
self.scale += mult
# # Adjust scroll so it appears zoom is occuring around mouse point
# xx, yy = evt.GetPositionTuple()
# vw, vh = self.GetClientSize()
# scrollx, scrolly = self.GetViewStart()
# sx = int((vw+self.offsetx)*mult/2.0)
# sy = int((vh+self.offsety)*mult/2.0)
# print sx,sy
# scrollx -= sx
# scrolly -= sy
# self.Scroll(scrollx, scrolly)
dc = wx.MemoryDC(self._buffer)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
gc = self.MakeGC(dc)
self.Draw(gc)
self.Refresh()
def OnSize(self, evt):
# When there is a size event then recreate the buffer to match
# the new size of the window.
self.InitBuffer()
evt.Skip()
def OnPaint(self, evt):
if USE_BUFFER:
# The buffer already contains our drawing, so no need to
# do anything else but create the buffered DC. When this
# method exits and dc is collected then the buffer will be
# blitted to the paint DC automagically
dc = wx.BufferedPaintDC(self, self._buffer, wx.BUFFER_VIRTUAL_AREA)
else:
# Otherwise we need to draw our content to the paint DC at
# this time.
dc = wx.PaintDC(self)
gc = self.MakeGC(dc)
self.Draw(gc)
def InitBuffer(self):
sz = self.GetVirtualSize()
sz.width = max(1, sz.width)
sz.height = max(1, sz.height)
self._buffer = wx.EmptyBitmap(sz.width, sz.height, 32)
dc = wx.MemoryDC(self._buffer)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
gc = self.MakeGC(dc)
self.Draw(gc)
def MakeGC(self, dc):
try:
if False:
# If you want to force the use of Cairo instead of the
# native GraphicsContext backend then create the
# context like this. It works on Windows so far, (on
# wxGTK the Cairo context is already being used as the
# native default.)
gcr = wx.GraphicsRenderer.GetCairoRenderer
gc = gcr() and gcr().CreateContext(dc)
if gc is None:
wx.MessageBox("Unable to create Cairo Context.", "Oops")
gc = wx.GraphicsContext.Create(dc)
else:
# Otherwise, creating it this way will use the native
# backend, (GDI+ on Windows, CoreGraphics on Mac, or
# Cairo on GTK).
gc = wx.GraphicsContext.Create(dc)
except NotImplementedError:
dc.DrawText("This build of wxPython does not support the wx.GraphicsContext "
"family of classes.",
25, 25)
return None
return gc
def Draw(self, gc):
print("drawing")
gc.Translate(self.offsetx, self.offsety) # apply before scale!
gc.Scale(self.scale, self.scale)
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetWeight(wx.BOLD)
gc.SetFont(font)
# make a path that contains a circle and some lines, centered at 0,0
path = gc.CreatePath()
path.AddCircle(0, 0, BASE2)
# path.MoveToPoint(0, -BASE2)
# path.AddLineToPoint(0, BASE2)
# path.MoveToPoint(-BASE2, 0)
# path.AddLineToPoint(BASE2, 0)
# path.CloseSubpath()
path.AddRectangle(-BASE4, -BASE4/2, BASE2, BASE4)
path.AddRectangle(-7,-7,14,14)
path.AddRectangle(-4,-4,8,8)
# Now use that path to demonstrate various capbilites of the grpahics context
gc.PushState() # save current translation/scale/other state
gc.Translate(60, 75) # reposition the context origin
gc.SetPen(wx.Pen("navy", 1))
gc.SetBrush(wx.Brush("pink"))
# show the difference between stroking, filling and drawing
for label, PathFunc in [("StrokePath", gc.StrokePath),
("FillPath", gc.FillPath),
("DrawPath", gc.DrawPath)]:
w, h = gc.GetTextExtent(label)
gc.DrawText(label, -w/2, -BASE2-h-4)
PathFunc(path)
gc.Translate(2*BASE, 0)
gc.PopState() # restore saved state
gc.PushState() # save it again
gc.Translate(60, 200) # offset to the lower part of the window
gc.DrawText("Scale", 0, -BASE2)
gc.Translate(0, 20)
# for testing clipping
gc.Clip(0, 0, 100, 100)
rgn = wx.RegionFromPoints([ (0,0), (75,0), (75,25,), (100, 25),
(100,100), (0,100), (0,0) ])
gc.ClipRegion(rgn)
gc.ResetClip()
gc.SetBrush(wx.Brush(wx.Colour(178, 34, 34, 128))) # 128 == half transparent
for cnt in range(8):
gc.Scale(1.08, 1.08) # increase scale by 8%
gc.Translate(5,5)
gc.DrawPath(path)
gc.PopState() # restore saved state
gc.PushState() # save it again
gc.Translate(400, 200)
gc.DrawText("Rotate", 0, -BASE2)
# Move the origin over to the next location
gc.Translate(0, 75)
# draw our path again, rotating it about the central point,
# and changing colors as we go
for angle in range(0, 360, 30):
gc.PushState() # save this new current state so we can
# pop back to it at the end of the loop
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(float(angle)/360, 1, 1)]
gc.SetBrush(wx.Brush(wx.Colour(r, g, b, 64)))
gc.SetPen(wx.Pen(wx.Colour(r, g, b, 128)))
# use translate to artfully reposition each drawn path
gc.Translate(1.5 * BASE2 * cos(radians(angle)),
1.5 * BASE2 * sin(radians(angle)))
# use Rotate to rotate the path
gc.Rotate(radians(angle))
# now draw it
gc.DrawPath(path)
gc.PopState()
gc.PopState()
#----------------------------------------------------------------------
class DoodleFrame(wx.Frame):
def __init__(self, parent=None):
super(DoodleFrame, self).__init__(parent, title="Doodle Frame",
size=(800,600),
style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE)
# doodle = DragScrollerExample(self)
doodle = TestPanel(self)
if __name__ == '__main__':
app = wx.App()
frame = DoodleFrame()
frame.Show()
app.MainLoop()
|
from flask import Flask, render_template, request
from datetime import datetime
from algo import algorithm, algo_dict, not_learnt
app = Flask(__name__)
@app.route('/')
def index():
st = datetime.now()
print("\n\n\n---------------------------------\nindex started at {}".format(st))
print("rendering the index page")
Weekday_name = datetime.today().strftime('%A')
algo = """no_algo_selected"""
res = not_learnt
score = """No_score"""
print("serving empty result_")
msg = """please sellect algorithm: DT, LR, RF, KNN, NB"""
et = datetime.now()
tt = et - st
print("index process done in {}\n---------------------------------".format(tt))
return render_template('script.html', week_day = Weekday_name, time=tt, msg=msg, algo=algo, res = res, score = score)
@app.route('/', methods=['POST', 'GET'])
def task():
st = datetime.now()
print('\n\n\n---------------------------------\nplease provide algo key')
try:
t = datetime.now()
print("\ntask started at {}".format(t))
Weekday_name = datetime.today().strftime('%A')
algo = request.form['algo']
print('using: {}'.format(algo))
res, score = algorithm(algo)
print("serving algorithm result")
msg = """your prediction is ready"""
et = datetime.now()
tt = et - st
print("task executed successfully in {}".format(tt))
return render_template('script.html', week_day = Weekday_name, time=tt, msg=msg, algo=algo, res = res, score = score)
except ValueError as ve:
t = datetime.now()
print("\nexception caught at {}".format(t))
Weekday_name = datetime.today().strftime('%A')
algo = """Algo not learnt"""
print("Incorrect found by me : {}".format(ve))
print('error using: {}'.format(algo))
res, score = algorithm(algo)
msg = """Please check all the fields"""
et = datetime.now()
tt = et - st
print("exception processed in {}\n---------------------------------".format(tt))
return render_template('script.html', week_day = Weekday_name, time=tt, msg=msg, algo=algo, res = res, score = score)
if __name__ == '__main__':
app.run(debug=True) |
import sys
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QDialog, QApplication, QWidget
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QApplication, QWidget, QDesktopWidget
from camUtils import VideoThread
from imutils.video import WebcamVideoStream
from imutils.video import FPS
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
import cv2
import requests
from processImage import Process
from client import sio
class LoginScreen(QDialog):
def __init__(self, widget):
super(LoginScreen, self).__init__()
loadUi("login.ui",self)
self.closeBtn.clicked.connect(lambda:widget.close())
self.loginBtn.clicked.connect(lambda:self.login(widget))
def login(self, widget):
username = self.username.text()
password = self.password.text()
if len(username)==0 or len(password)==0:
self.error.setText("Please input all fields.")
return False
elif not self.authenticate(username, password):
self.error.setText("Wrong Username or Password")
return False
print("Succesful")
home = Home(self.loggedUsername, self.session_id)
widget.addWidget(home)
widget.setCurrentIndex(widget.currentIndex()+1)
widget.setStyleSheet('#wind{background-image: url(pyBack.png); background-position: center;}')
widget.setFixedHeight(958)
widget.setFixedWidth(1525)
self.centerWidgetOnScreen(widget)
widget.setWindowFlags(QtCore.Qt.Window)
widget.show()
return True
def centerWidgetOnScreen(self, widget):
centerPoint = QtGui.QScreen.availableGeometry(QtWidgets.QApplication.primaryScreen()).center()
fg = widget.frameGeometry()
fg.moveCenter(centerPoint)
widget.move(fg.topLeft())
def authenticate(self, username, password):
curSession = requests.Session()
#print(username)
#print(password)
response = curSession.post("http://localhost:8080/login", data={'username': username, 'password': password})
#print(response.status_code)
#print(response.text)
if response.status_code == 401:
self.error.setText("Invalid username or password")
return False
response2 = curSession.get("http://localhost:8080/socket_token/")
#print(response2.status_code)
response2 = response2.json()
#print(response2)
self.loggedUsername = response2['username']
self.session_id = response2['session_id']
#self.loggedUsername = 'adfads'
#self.session_id = 'asdfsadfasd'
self.error.setText("")
return True
class Home(QDialog):
def __init__(self, username, session_id):
super(Home, self).__init__()
loadUi("home.ui",self)
self.authe = {'username' : username, 'session_id' : session_id, 'profession': 'student'}
#print(username)
sio.connect('http://localhost:8080', auth = self.authe)
self.thread = VideoThread()
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
self.username = username
self.session_id = session_id
self.process = Process(sio, self.thread, self)
self.process.startStream()
def closeEvent(self, event):
self.thread.stop()
event.accept()
@pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(400, 400, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
# main
app = QApplication(sys.argv)
widget = QtWidgets.QStackedWidget()
welcome = LoginScreen(widget)
widget.addWidget(welcome)
widget.setWindowFlags(QtCore.Qt.FramelessWindowHint)
widget.setAttribute(QtCore.Qt.WA_TranslucentBackground)
widget.setFixedHeight(800)
widget.setFixedWidth(1000)
welcome.centerWidgetOnScreen(widget)
widget.show()
try:
sys.exit(app.exec_())
except:
print("Exiting")
sio.disconnect() |
# Generated by Django 3.0.3 on 2020-02-12 12:33
from decimal import Decimal
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Setting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='(e.g. SETTING_NAME)', max_length=50, unique=True, verbose_name='Name')),
('value_type', models.CharField(choices=[('bool', 'bool'), ('date', 'date'), ('datetime', 'datetime'), ('decimal', 'decimal'), ('email', 'email'), ('file', 'file'), ('float', 'float'), ('image', 'image'), ('int', 'int'), ('string', 'string'), ('text', 'text'), ('time', 'time'), ('url', 'url')], max_length=20, verbose_name='Type')),
('value_bool', models.BooleanField(default=False, verbose_name='Value')),
('value_date', models.DateField(blank=True, null=True, verbose_name='Value')),
('value_datetime', models.DateTimeField(blank=True, null=True, verbose_name='Value')),
('value_decimal', models.DecimalField(blank=True, decimal_places=10, default=Decimal('0.0'), max_digits=19, verbose_name='Value')),
('value_email', models.EmailField(blank=True, max_length=254, verbose_name='Value')),
('value_file', models.FileField(blank=True, upload_to='files', verbose_name='Value')),
('value_float', models.FloatField(blank=True, default=0.0, verbose_name='Value')),
('value_image', models.FileField(blank=True, upload_to='images', verbose_name='Value')),
('value_int', models.IntegerField(blank=True, default=0, verbose_name='Value')),
('value_string', models.CharField(blank=True, max_length=50, verbose_name='Value')),
('value_text', models.TextField(blank=True, verbose_name='Value')),
('value_time', models.TimeField(blank=True, null=True, verbose_name='Value')),
('value_url', models.URLField(blank=True, verbose_name='Value')),
],
options={
'verbose_name': 'Setting',
'verbose_name_plural': 'Settings',
'ordering': ['name'],
},
),
]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Author: shoumuzyq@gmail.com
# https://shoumu.github.io
# Created on 2015/12/17 16:35
def gray_code(n):
res = [0]
for i in range(n):
for j in range(len(res) - 1, -1, -1):
res.append(1 << i | res[j])
return res
print(gray_code(2))
|
#exception
try:
x=int(input("Enter number:"))
y=int(input("Enter number:"))
print(x/y)
except ValueError:
print("Invalid input(Enter integer only)")
except ZeroDivisionError:
print("Division by 0 not possible")
except:
print("Error!!!")
else:
print("Bye Bye")
finally:
print("Hello")
|
# !/usr/bin/python
# coding=utf-8
#
# @Author: LiXiaoYu
# @Time: 2013-10-17
# @Info: Server Library.
import os, sys
from optparse import OptionParser
from configparser import ConfigParser
class ParseConfig():
#配置对象
__config = ""
#配置文件
__config_file = "Config.ini"
#初始化文件
def __init__(self, app_name=""):
#获取当前目录
cur_path = os.path.normpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config_path = cur_path+"/../../Conf/"+self.__config_file
if app_name != "":
config_path = cur_path+"/../../../App/"+app_name+"/Conf/"+self.__config_file
#设置配置文件
self.parser(config_path)
#解析INI文件
def parser(self, file_name=""):
if not os.path.isfile(file_name):
print("you input ini file not is file")
sys.exit()
self.__config = ConfigParser()
self.__config.readfp(open(file_name))
#读取配置
def get(self, param_name=""):
temp = param_name.split(".")
if len(temp) == 2:
node = temp[0]
field = temp[1]
else:
node = "default"
field = temp[0]
#print(dir(self.__config))
#print(self.__config.items("db"))
value = self.__config.get(node, field)
return value
#读取节点配置信息
def getSection(self, name="default"):
return self.__config.items(name)
#读取配置节点
def getSections(self):
return self.__config.sections()
#获取所有节点配置
def getValues(self):
values = {}
for i in self.__config.values():
values[i.name] = {}
for k,v in i.items():
values[i.name][k] = v
return values
if __name__ == "__main__":
c = ParseConfig()
print(c.getSections())
dict1 = c.getValues()
c = ParseConfig('Queue')
print(c.getSections())
dict2 = c.getValues()
print(dict(dict1, **dict2))
|
"""Advent of Code Day 25 - Four Dimensional Adventure"""
class FixedPoint:
"""Initialise fixed point as ranked, self-parented node."""
def __init__(self, num):
self.num = num
self.parent = self
self.rank = 0
def find(point):
"""Return root of point (parent with parent as self)."""
if point.parent != point:
point.parent = find(point.parent)
return point.parent
def union(point_1, point_2):
"""Combine the sets containing each point via union by rank."""
# Find both roots and check they aren't already in the same set
point_1_root = find(point_1)
point_2_root = find(point_2)
if point_1_root == point_2_root:
return
# Rank merge avoids unchecked O(n) growth of trees if done naively
if point_1_root.rank < point_2_root.rank:
point_1_root, point_2_root = point_2_root, point_1_root
point_2_root.parent = point_1_root
if point_1_root.rank == point_2_root.rank:
point_1_root.rank += 1
def manhattan_distance(point_1, point_2):
"""Return Manhattan distance between two points."""
return sum(abs(a - b) for a, b in zip(point_1, point_2))
def main():
"""Use disjoint-set structure to turn points into constellations."""
with open('input.txt') as f:
lines = [[int(x) for x in l.strip().split(',')] for l in f.readlines()]
# Initialise each point and check against previous to see if union needed
points = {}
for i, line in enumerate(lines):
coords = tuple(line)
points[coords] = FixedPoint(i)
for point in points:
if manhattan_distance(coords, point) <= 3:
union(points[coords], points[point])
# Set comprehension of find() for each FixedPoint gives unique roots
constellations = len({find(x) for x in points.values()})
# Answer One
print("Number of constellations:", constellations)
if __name__ == '__main__':
main()
|
from typing import Any
import qrcode
import platform
def qr_terminal_str(data: Any, version: Any = None) -> str:
"""
:param data: qrcode data
:param version:1-40 or None
:return:
"""
if platform.system() == "Windows":
white_block = '▇'
black_block = ' '
new_line = '\n'
else:
white_block = '\033[0;37;47m '
black_block = '\033[0;37;40m '
new_line = '\033[0m\n'
qr = qrcode.QRCode(version)
qr.add_data(data)
if version:
qr.make()
else:
qr.make(fit=True)
output = white_block * (qr.modules_count + 2) + new_line
for mn in qr.modules:
output += white_block
for m in mn:
if m:
output += black_block
else:
output += white_block
output += white_block + new_line
output += white_block * (qr.modules_count + 2) + new_line
return output
def draw(data: Any, version: Any = None) -> None:
"""doc"""
output = qr_terminal_str(data, version)
print(output)
|
import conn
db = conn.cursor
def get_vak(db):
vak = []
db.execute("SELECT * from vak")
result = db.fetchall()
resp = result
return resp
def add_vak(db,naam):
sql = "INSERT INTO vak(vak_naam) value('" + naam + "')"
db.execute(sql)
conn.db.commit()
def latest_vak(db):
vak = []
db.execute("SELECT * from vak ORDER BY vak_id DESC LIMIT 1")
result = db.fetchall()
resp = result
return resp |
# 导入相关数据包
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from scipy import stats
from scipy.stats import norm
root_path = '/opt/data/kaggle/getting-started/house-prices'
train = pd.read_csv('%s/%s' % (root_path, 'train.csv'))
test = pd.read_csv('%s/%s' % (root_path, 'test.csv'))
|
from django.contrib import admin
from django.urls import path, include
from user.views import initial_view
urlpatterns = [
path('admin/', admin.site.urls),
path('manager/', include('manager.urls')),
path('chef/', include('chef.urls')),
path('runner/', include('runner.urls')),
path('', initial_view),
path('user/', include('user.urls')),
]
|
import datetime
import mock
import datetime
from unittest import TestCase
from flask_oauthlib.client import OAuthException
from .. import linkedin
from ..models import User, db, UserLinkedinInfo
from .test_models import DbTestCase
from .test_views import ViewTestCase
FAKE_AUTHORIZED_RESPONSE = {
'access_token': 'blorp',
'expires_in': 10000
}
FAR_IN_THE_PAST = datetime.datetime.now() - datetime.timedelta(days=50)
FAR_IN_THE_FUTURE = datetime.datetime.now() + datetime.timedelta(days=50)
class LinkedinDisabledTests(ViewTestCase):
def test_linkedin_is_disabled_if_setting_not_present(self):
self.assertTrue('LINKEDIN_ENABLED' not in self.app.jinja_env.globals)
def test_authorize_is_404(self):
res = self.client.get('/linkedin/authorize')
self.assert404(res)
class UserLinkedinInfoTests(TestCase):
def test_get_user_info_raises_exception_if_no_access_token(self):
user = User(email='foo@bar')
with self.assertRaisesRegexp(OAuthException,
'Access token unavailable or expired'):
linkedin.get_user_info(user)
def test_get_user_info_raises_exception_if_token_is_invalid(self):
info = UserLinkedinInfo(access_token='b',
access_token_expiry=FAR_IN_THE_FUTURE)
user = User(linkedin=info)
with self.assertRaisesRegexp(OAuthException,
'Server returned HTTP 401.*blah'):
mock_response = mock.Mock(status=401, data={'blah': 1})
with mock.patch.object(linkedin.linkedin, 'get',
return_value=mock_response):
linkedin.get_user_info(user)
def test_retrieve_access_token_is_none_if_linkedin_is_none(self):
user = User()
self.assertEqual(linkedin.retrieve_access_token(user), None)
def test_retrieve_access_token_is_none_if_token_is_expired(self):
info = UserLinkedinInfo(access_token='b',
access_token_expiry=FAR_IN_THE_PAST)
user = User(linkedin=info)
self.assertEqual(linkedin.retrieve_access_token(user), None)
def test_retrieve_access_token_works(self):
info = UserLinkedinInfo(access_token='b',
access_token_expiry=FAR_IN_THE_FUTURE)
user = User(linkedin=info)
self.assertEqual(linkedin.retrieve_access_token(user), ('b', ''))
def test_profile_url_is_none_if_not_available(self):
info = UserLinkedinInfo()
self.assertEqual(info.profile_url, None)
def test_profile_url_works(self):
info = UserLinkedinInfo(user_info={'publicProfileUrl': 'http://u'})
self.assertEqual(info.profile_url, 'http://u')
def test_picture_url_is_high_res_if_available(self):
info = UserLinkedinInfo(user_info={
'pictureUrls': {
'values': ['http://a', 'http://b']
},
'pictureUrl': 'http://c'
})
self.assertEqual(info.picture_url, 'http://a')
def test_picture_url_falls_back_to_low_res(self):
info = UserLinkedinInfo(user_info={'pictureUrl': 'http://c'})
self.assertEqual(info.picture_url, 'http://c')
def test_picture_url_is_none_if_not_available(self):
info = UserLinkedinInfo()
self.assertEqual(info.picture_url, None)
class LinkedinDbTests(DbTestCase):
def setUp(self):
super(LinkedinDbTests, self).setUp()
self.user = User(email=u'a@example.org', password='a', active=True)
db.session.add(self.user)
db.session.commit()
def test_linkedin_is_deleted_with_user(self):
linkedin.store_access_token(self.user, FAKE_AUTHORIZED_RESPONSE)
self.assertEqual(UserLinkedinInfo.query.count(), 1)
db.session.delete(self.user)
db.session.commit()
self.assertEqual(UserLinkedinInfo.query.count(), 0)
def test_only_one_linkedin_per_user_is_created(self):
self.assertEqual(UserLinkedinInfo.query.count(), 0)
linkedin.store_access_token(self.user, FAKE_AUTHORIZED_RESPONSE)
linkedin.store_access_token(self.user, FAKE_AUTHORIZED_RESPONSE)
self.assertEqual(UserLinkedinInfo.query.count(), 1)
def test_position_and_org_are_imported_from_profile(self):
linkedin.update_user_fields_from_profile(self.user, {
u'positions': {
u'_total': 1,
u'values': [{u'company': {u'name': u'Self-Employed'},
u'id': 768900354,
u'isCurrent': True,
u'location': {},
u'startDate': {u'month': 7, u'year': 2015},
u'summary': u'I currently freelance.',
u'title': u'Tinkerer'}],
},
u'headline': 'Do not use this'
})
self.assertEqual(self.user.position, 'Tinkerer')
self.assertEqual(self.user.organization, 'Self-Employed')
def test_headline_is_imported_from_profile_when_position_is_empty(self):
linkedin.update_user_fields_from_profile(self.user, {
u'positions': {u'_total': 0},
u'headline': 'Awesome Person'
})
self.assertEqual(self.user.position, 'Awesome Person')
def test_unrecognized_country_name_in_profile_is_ignored(self):
linkedin.update_user_fields_from_profile(self.user, {
u'location': {u'country': {u'code': u'lolol'}},
})
self.assertEqual(self.user.country, None)
def test_location_is_imported_from_profile(self):
linkedin.update_user_fields_from_profile(self.user, {
u'location': {u'country': {u'code': u'us'},
u'name': u'Greater New York City Area'},
})
self.assertEqual(self.user.city, 'Greater New York City Area')
self.assertEqual(self.user.country, 'US')
class LinkedinViewTests(ViewTestCase):
BASE_APP_CONFIG = ViewTestCase.BASE_APP_CONFIG.copy()
BASE_APP_CONFIG['LINKEDIN'] = dict(
consumer_key='ckey',
consumer_secret='csecret',
)
def test_linkedin_is_enabled_if_setting_present(self):
self.assertTrue(self.app.jinja_env.globals['LINKEDIN_ENABLED'])
@mock.patch.object(linkedin, 'gen_salt', return_value='boop')
def test_authorize_redirects_to_linkedin(self, gen_salt):
self.login()
res = self.client.get('/linkedin/authorize')
self.assertEqual(res.status_code, 302)
self.assertEqual(
res.headers['location'],
'https://www.linkedin.com/uas/oauth2/authorization?'
'response_type=code&client_id=ckey&'
'redirect_uri=http%3A%2F%2Flocalhost%2Flinkedin%2Fcallback&'
'scope=r_basicprofile&state=boop'
)
gen_salt.assert_called_with(10)
def get_callback(self, fake_response):
self.login()
with self.client.session_transaction() as sess:
sess['linkedin_state'] = 'b'
with mock.patch.object(
linkedin.linkedin, 'authorized_response',
return_value=fake_response
) as authorized_response:
res = self.client.get('/linkedin/callback?state=b',
follow_redirects=True)
authorized_response.assert_called_once_with()
return res
def test_callback_with_mismatched_state_fails(self):
self.login()
with self.client.session_transaction() as sess:
sess['linkedin_state'] = 'hi'
res = self.client.get('/linkedin/callback?state=blorp')
self.assertEqual(res.data, 'Invalid state')
def test_callback_with_no_session_state_fails(self):
self.login()
res = self.client.get('/linkedin/callback?state=blorp')
self.assertEqual(res.data, 'Invalid state')
def test_callback_with_no_state_fails(self):
self.login()
res = self.client.get('/linkedin/callback')
self.assertEqual(res.data, 'Invalid state')
def test_failed_callback_flashes_error(self):
res = self.get_callback(fake_response=None)
self.assert200(res)
assert "Connection with LinkedIn canceled" in res.data
@mock.patch.object(linkedin, 'update_user_info')
def test_successful_callback_works(self, update_user_info):
res = self.get_callback(fake_response=FAKE_AUTHORIZED_RESPONSE)
self.assert200(res)
user = self.last_created_user
self.assertEqual(user.linkedin.access_token, 'blorp')
self.assertAlmostEqual(user.linkedin.expires_in.total_seconds(),
10000, delta=120)
assert update_user_info.called
assert "Connection to LinkedIn established" in res.data
def test_deauthorize_works(self):
self.login()
linkedin.store_access_token(self.last_created_user,
FAKE_AUTHORIZED_RESPONSE)
res = self.client.get('/linkedin/deauthorize')
self.assertRedirects(res, '/me')
self.assertEqual(self.last_created_user.linkedin, None)
def test_deauthorize_requires_login(self):
self.assertRequiresLogin('/linkedin/deauthorize')
def test_authorize_requires_login(self):
self.assertRequiresLogin('/linkedin/authorize')
def test_callback_requires_login(self):
self.assertRequiresLogin('/linkedin/callback')
def test_connect_to_linkedin_is_shown_in_edit_profile(self):
self.login()
res = self.client.get('/me')
self.assert200(res)
assert 'href="/linkedin/authorize"' in res.data
assert 'href="/linkedin/deauthorize"' not in res.data
def test_disconnect_from_linkedin_is_shown_in_edit_profile(self):
self.login()
linkedin.store_access_token(self.last_created_user,
FAKE_AUTHORIZED_RESPONSE)
res = self.client.get('/me')
self.assert200(res)
assert 'href="/linkedin/authorize"' not in res.data
assert 'href="/linkedin/deauthorize"' in res.data
def test_linkedin_profile_url_is_shown_in_public_profile(self):
self.login()
linkedin.store_access_token(self.last_created_user,
FAKE_AUTHORIZED_RESPONSE)
self.last_created_user.linkedin.user_info = {
'publicProfileUrl': 'http://linkedin.com/blarg'
}
res = self.client.get('/user/%d' % self.last_created_user.id)
self.assert200(res)
assert 'href="http://linkedin.com/blarg"' in res.data
assert 'LinkedIn' in res.data
def test_linkedin_is_not_shown_in_public_profile(self):
self.login()
res = self.client.get('/user/%d' % self.last_created_user.id)
self.assert200(res)
assert 'LinkedIn' not in res.data
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from enum import Enum
class MessageProperty(Enum):
TRACE_SWITCH = "TRACE_ON"
MSG_REGION = "MSG_REGION"
KEYS = "KEYS"
TAGS = "TAGS"
WAIT_STORE_MSG_OK = "WAIT"
DELAY_TIME_LEVEL = "DELAY"
RETRY_TOPIC = "RETRY_TOPIC"
REAL_TOPIC = "REAL_TOPIC"
REAL_QUEUE_ID = "REAL_QID"
TRANSACTION_PREPARED = "TRAN_MSG"
PRODUCER_GROUP = "PGROUP"
MIN_OFFSET = "MIN_OFFSET"
MAX_OFFSET = "MAX_OFFSET"
BUYER_ID = "BUYER_ID"
ORIGIN_MESSAGE_ID = "ORIGIN_MESSAGE_ID"
TRANSFER_FLAG = "TRANSFER_FLAG"
CORRECTION_FLAG = "CORRECTION_FLAG"
MQ2_FLAG = "MQ2_FLAG"
RECONSUME_TIME = "RECONSUME_TIME"
UNIQ_CLIENT_MESSAGE_ID_KEYIDX = "UNIQ_KEY"
MAX_RECONSUME_TIMES = "MAX_RECONSUME_TIMES"
CONSUME_START_TIMESTAMP = "CONSUME_START_TIME"
|
from abc import ABC, abstractmethod
class ObservableEngine(Engine):
"""Наблюдаемый класс"""
def subscribe(self, subscriber):
pass
def unsbuscribe(self, subscriber):
pass
def notify(self, message):
pass
class AbstractObserver(ABC):
@abstractmethod
def update(self):
class ShortNotificationPrinter(AbstractObserver):
def __init__(self, subscriber):
self.subscriber = set()
def update(self, message):
for i in self.subscribers:
for
class FullNotificationPrinter(AbstractObserver):
def __init__(self, subscriber):
self.subscriber = list()
def update(self):
pass |
#!/usr/bin/python
from funcs import Func,Plot
import scipy.optimize
import sys
xmin= [-1.,-1.]
xmax= [2.,3.]
fkind= int(sys.argv[1]) if len(sys.argv)>1 else 1
f= lambda x:Func(x,fkind)
# Minimize the function f
res= scipy.optimize.minimize(f,[0.0,0.0])
print res
print 'Result=',res.x
Plot(xmin,xmax,f,x_points=[res.x])
|
#! /usr/bin/env python
import sys
from twisted.internet import reactor
from example_secrets import get_page_title, main
def print_page_title(url):
print "fetching", url
d = get_page_title(url)
def got_info(title):
print "title:", title
reactor.stop()
def fail_info(f):
print "failed!", f.type, f.value
reactor.stop()
d.addCallbacks(got_info, fail_info)
main(print_page_title, sys.argv[1])
|
from flask import Flask, request, jsonify
from daos.book_dao_postgres import BookDaoPostgres
from entities.book import Book
from exceptions.book_unavailable_error import BookUnavailableError
from exceptions.not_found_exception import ResourceNotFoundError
from services.book_service_impl import BookServiceImpl
import logging
app: Flask = Flask(__name__)
logging.basicConfig(filename="records.log", level=logging.DEBUG, format=f'%(asctime)s %(levelname)s %(message)s')
# Handler methods create your WEB API layer
# They are responsible for handling HTTP request and giving back responses
# Parsing and generating JSONs, giving back status codes as appropriate
# They SHOULD NOT be directly responsible for CRUD operations or Bussiness Logic
# Your Handler should use services. THEY SHOULD NOT uses DAOs directly
book_dao = BookDaoPostgres()
book_service = BookServiceImpl(book_dao) # Dependency Injection
@app.route("/books", methods=["POST"])
def create_book():
body = request.json # json will return a python dictionary version of that JSON
book = Book(body["bookId"], body["title"], body["author"], body["available"], body["quality"], body["returnDate"])
book_service.add_book(book) # pass off the heavier logic to the service
return f"Created book with id{book.book_id}", 201 # 201 is the status code for creating a new resource
@app.route("/books/<book_id>", methods=["GET"])
def get_book_by_id(book_id: str):
try:
book = book_service.retrieve_book_by_id(int(book_id))
return jsonify(book.as_json_dict())
except ResourceNotFoundError as e:
return str(e), 404
@app.route("/books", methods=["GET"])
def get_all_books():
title = request.args.get("title") # return the value of title. If no title returns None
if title is not None:
books = book_service.find_books_by_tile_containing(title)
json_books = [b.as_json_dict() for b in books]
return jsonify(json_books)
else:
books = book_service.retrieve_all_books() # list of books
json_books = [b.as_json_dict() for b in books] # list json dict
return jsonify(json_books)
@app.route("/books/<book_id>", methods=["PUT"])
def update_book(book_id: str):
body = request.json # json will return a python dictionary version of that JSON
book = Book(body["bookId"], body["title"], body["author"], body["available"], body["quality"], body["returnDate"])
# the body might contain a valid ID of a book to update
# The ID specified in the URI at the top overrides anything in the body
book.book_id = int(book_id)
book_service.update_book(book)
return "updated successfully"
@app.route("/books/<book_id>", methods=["DELETE"])
def delete_book(book_id: str):
try:
book_service.remove_book(int(book_id))
return "Deleted successfully", 200
except ResourceNotFoundError as e:
return "The resource could not be found", 404
@app.route("/books/<book_id>/checkout", methods=["PATCH"])
def checkout_book(book_id: str):
try:
book_service.checkout_book(int(book_id))
return f"The book with id {book_id} was successfully checked out"
except BookUnavailableError as e:
return str(e), 422 # request could not be processed even though all the information and formatting is correct
if __name__ == '__main__':
app.run()
|
# -*- coding: utf-8 -*-
from dash import Dash, dcc, html, dash_table, Input, Output, State
import dash_cytoscape as cyto
import dash_bootstrap_components as dbc
import pandas as pd
app = Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP],title = "HuBNet")
server = app.server
df = pd.read_csv("data/network_all.csv").iloc[:,2:]
# Specify the layout of the navigation bar at the top
navbar = dbc.Navbar(
dbc.Container(
[
html.A(
dbc.Row(
[
dbc.Col(html.Img(src = "assets/hubnet_logo.png", height = "50px")),
dbc.Col(dbc.NavbarBrand("HuBNet", className="ms-2"))
],
align="center",
className="g-0"
),
href="#",
style={"textDecoration":"none"},
),
html.Span(
[
dbc.NavbarToggler(id="navbar-toggler", n_clicks=0),
dbc.Collapse(
[
dbc.NavItem(
dbc.NavLink("Systems Biology Group", href="http://sb.cs.cmu.edu/", style = {"color":"#adadad"}),
),
dbc.NavItem(
dbc.NavLink("HuBMAP Project", href="https://portal.hubmapconsortium.org/", style = {"color":"#adadad"}),
)
],
id="navbar-collapse",
is_open = False,
navbar = True,
),
],
className = "ml-auto"
)
],
),
color="dark",
dark=True,
fixed="top"
)
# Specify the layout of the Query tab
query = dbc.Container(
[
dbc.Row(
[
dcc.Markdown(
"""
##### **How to use**
""",
style = {"marginTop":10,"marginLeft":25}
),
dcc.Markdown(
"""
-----
""",
style = {"marginTop":10,"marginLeft":25,"color":"#b0b0b0"}
),
dcc.Markdown(
"""
- Enter TF/target names (separated by semicolons).
- Select a tissue.
- If you need to retrive all TFs/targets for the given targets/TFs, simply leave the boxes empty.
- Query results will be shown in the table on the left.
- The corresponding network will be shown on the right.
""",
style = {"marginTop":10,"marginLeft":25}
),
dcc.Markdown(
"""
-----
""",
style = {"marginLeft":25,"color":"#b0b0b0"}
),
dbc.Row(
[
"Enter TF name(s): ",
dbc.Input(id='tf-name-input', value = 'FOS;JUN', type='text'),
],
style = {"width":400,"marginTop":20,"marginLeft":25}
),
dbc.Row(
[
"Enter target name(s): ",
dbc.Input(id='target-name-input',value = 'ARHGEF40;HES4;WDR86-AS1;S100A10;FCRL5;CASP4LP;RGCC;PLAAT4;ZBTB38;TNF;ARHGEF3;CEMIP2;HBA1;OTULIN-DT;ID3;KLRF1;COLQ;FASLG', type = 'text'),
],
style = {"width":400,"marginTop":20,"marginLeft":25}
),
dbc.Row(
[
"Select a tissue:",
dcc.Dropdown(
id = 'tissue-name-input',
options = [
'Liver',
'Heart',
'Spleen',
'Left kidney',
'Right Lung',
'Large intestine'
],
value = 'Spleen',
),
],
style = {"width":200,"marginTop":20,"marginLeft":10}
),
],
),
dbc.Row(
align = "start",
children = [
dbc.Col(
id = "table-container",
children = [],
sm = 12,
md = 4,
style = {"width":550,"height":600,'overflowY': 'scroll', "marginLeft":25,"marginTop":25}
),
dbc.Col(
id = "network-vis-container",
align = "end",
children = [],
sm = 12,
md = 4,
style = {"width": 550, "height":600,'overflowY':'hidden',"marginLeft":25, "marginTop":25}
)
],
style = {"marginTop":25}
),
dcc.Markdown(
"""
-----
""",
style = {"marginTop":40,"marginLeft":25,"color":"#b0b0b0"}
),
dcc.Markdown(
"""
Systems Biology Group · School of Computer Science · Carnegie Mellon University
5000 Forbes Avenue · Pittsburgh, PA 15213
© HuBNet v0.1; Created by Qi (Alex) Song
""",
style = {"marginTop":25,"marginLeft":25,"text-align":"center","color":"#949494"}
)
],
style = {"paddingBottom":25,"paddingRight":50}
)
# Specify the layout of all tabs
tabs = dbc.Container(
dbc.Card(
[
dbc.CardHeader(
dbc.Tabs(
[
dbc.Tab(
tab_id = "query_tab",
label = "Query",
label_style = {"font-weight":"bold","font-size":"120%","color":"#4d4d4d"},
tab_class_name = "flex-grow-1 text-center"
),
dbc.Tab(
tab_id = "statistics_tab",
label = "Statistics",
label_style = {"font-weight":"bold","font-size":"120%","color":"#4d4d4d"},
tab_class_name = "flex-grow-1 text-center"
),
dbc.Tab(
tab_id = "about_tab",
label = "About",
label_style = {"font-weight":"bold","font-size":"120%","color":"#4d4d4d"},
tab_class_name = "flex-grow-1 text-center"
),
dbc.Tab(
tab_id = "download_tab",
label = "Download Data",
label_style = {"font-weight":"bold","font-size":"120%","color":"#4d4d4d"},
tab_class_name = "flex-grow-1 text-center"
),
],
id = "tabs",
active_tab = "query_tab"
)
),
dbc.CardBody(
id = "tab-content",
children = []
),
],
style = {"marginTop":80}
)
)
# Specify the layout of the entire app
app.layout = html.Div(
id = "main-container",
children = [
navbar,
tabs,
dcc.Store(id = "table-data")
],
style = {"background-color":"#f7f7f7"}
)
# add callback for toggling the collapse on small screens
@app.callback(
Output("navbar-collapse", "is_open"),
[Input("navbar-toggler", "n_clicks")],
[State("navbar-collapse", "is_open")],
)
def toggle_navbar_collapse(n, is_open):
if n:
return not is_open
return is_open
# Generate different content for different tab page
@app.callback(
Output("tab-content","children"),
[Input("tabs","active_tab")]
)
def generate_tab_content(active_tab):
if active_tab == "query_tab":
return query
if active_tab == "statistics_tab":
return dbc.Container(
[
html.P("Comming soon!", className = "card-text",style = {"marginLeft":25}),
dcc.Markdown(
"""
-----
""",
style = {"marginTop":40,"marginLeft":25,"color":"#b0b0b0"}
),
dcc.Markdown(
"""
Systems Biology Group · School of Computer Science · Carnegie Mellon University
5000 Forbes Avenue · Pittsburgh, PA 15213
© HuBNet v0.1; Created by Qi (Alex) Song
""",
style = {"marginTop":25,"marginLeft":25,"text-align":"center","color":"#949494"}
)
],
style = {"marginTop":20}
)
if active_tab == "download_tab":
return dbc.Container(
[
dbc.Button(
"Download",
id = "download-btn",
className="me-1",
href="/static/network_all.csv",
external_link = True,
style = {"background-color":"#f0f0f0", "border-color":"#d1d1d1","color":"#4d4d4d","font-weight":"bold","marginLeft":25}
),
dcc.Markdown(
"""
-----
""",
style = {"marginTop":40,"marginLeft":25,"color":"#b0b0b0"}
),
dcc.Markdown(
"""
Systems Biology Group · School of Computer Science · Carnegie Mellon University
5000 Forbes Avenue · Pittsburgh, PA 15213
© HuBNet v0.1; Created by Qi (Alex) Song
""",
style = {"marginTop":25,"marginLeft":25,"text-align":"center","color":"#949494"}
)
],
style = {"marginTop":20}
)
if active_tab == "about_tab":
return dbc.Container(
[
html.P("Comming soon!", className = "card-text",style = {"marginLeft":25}),
dcc.Markdown(
"""
-----
""",
style = {"marginTop":40,"marginLeft":25,"color":"#b0b0b0"}
),
dcc.Markdown(
"""
Systems Biology Group · School of Computer Science · Carnegie Mellon University
5000 Forbes Avenue · Pittsburgh, PA 15213
© HuBNet v0.1; Created by Qi (Alex) Song
""",
style = {"marginTop":25,"marginLeft":25,"text-align":"center","color":"#949494"}
)
],
style = {"marginTop":20}
)
@app.callback(
Output('network-vis-container','children'),
Input("table-data",'data')
)
def generate_network(data):
# Convert json data to dataframe
query_df = pd.read_json(data, orient = "columns")
# Create gene names to ids mapping and node data for network visualization
node_data = []
name2id = dict()
for idx,name in enumerate(pd.concat([query_df["TF name"],query_df["Target name"]]).unique()):
name2id[name] = str(idx)
node_data.append({'data':{'id':str(idx), 'label':name}})
# Go through edges and generate edge data for network visualization
edge_data = []
for idx,row in query_df.iterrows():
if row["TF name"] is not None and row['Target name'] is not None:
edge_data.append({'data':{'source':name2id[row["TF name"]],'target':name2id[row["Target name"]]}})
return[cyto.Cytoscape(
id="network_vis",
layout={"name": "cose"},
#style={"width": "500px", "height": "500px"},
elements= node_data + edge_data,
#stylesheet=def_stylesheet,
minZoom=0.5,
maxZoom=3,
zoom=1
)]
@app.callback(
Output('table-container','children'),
Output('table-data','data'),
Input('tf-name-input','value'),
Input('target-name-input','value'),
Input('tissue-name-input','value')
)
def generate_table(tf_names,target_names,tissue_name):
df.columns = ["TF name","Target name","TF Marker","Score","Tissue"]
# Remove spaces in the left and right and convert to captial letters.
tf_names = tf_names.strip().upper()
target_names = target_names.strip().upper()
# If user leave the input box empty. All TFs/targets of the selected tissue will be returned
if tf_names == "":
select1 = df["Tissue"] == tissue_name
else:
tf_names = tf_names.split(";")
select1 = df["TF name"].isin(tf_names)
if target_names == "":
select2 = df["Tissue"] == tissue_name
else:
target_names = target_names.split(";")
select2 = select2 = df["Target name"].isin(target_names)
select3 = df["Tissue"] == tissue_name
query_df = df.loc[select1 & select2 & select3,]
# Jsonified the dataframe and store it in "table-data"
query_js = query_df.loc[:,["TF name","Target name","Score"]].to_json(orient = "columns")
return [dash_table.DataTable(
id="table",
sort_action="native",
filter_action="native",
row_deletable=True,
#css=[{
# "rule": "display: inline; white-space: inherit; overflow: inherit; text-overflow: inherit;"
#}],
style_data={"whiteSpace": "normal"},
style_cell={
"padding": "15px",
"midWidth": "0px",
"width": "25%",
"textAlign": "center",
"border": "grey",
},
style_data_conditional=[
{"if": {"row_index": "even"}, "backgroundColor": "#EFEFEF"}
],
columns=[{"name": i, "id": i} for i in df.columns],
data=query_df.to_dict("rows"),
)],query_js
if __name__ == '__main__':
app.run_server(debug=True) |
# Network protocol constants
TYPE = "type"
TYPE_JAG = "jag"
TYPE_VIC = "vic"
TYPE_SOLENOID = "sol"
TYPE_DIGITALOUT = "do"
ARGS = "args"
NUM = "num"
TYPE_SENSOR = "s"
SENSOR_TYPE = "st"
SENSOR_DIGITAL_IN = "d"
SENSOR_ANALOG_IN = "a" |
import os
def rename_files():
save_path = "C:/Users/HODOR/Desktop/rename_files_new"
file_list = os.listdir(save_path)
print(file_list)
os.chdir(save_path)
for file_name in file_list:
print("Renaming file: "+file_name+" in Directory: "+save_path)
os.rename(file_name,file_name.translate(None,"1234567890"))
rename_files()
|
import sys
def minCost(i, j, n, m):
if i == n-1 and j == m-1:
return cost[i][j]
if i >= n or j >= m:
return sys.maxsize
return cost[i][j] + min(minCost(i, j+1, n, m), minCost(i+1, j, n, m), minCost(i+1, j+1, n, m))
cost = [[1, 5, 11], [8, 13, 12], [2, 3, 7], [15, 16, 18]]
ans = minCost(0, 0, 4, 3)
print(ans)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.htmdata, name='htmdata'),
] |
#!/usr/bin/env python
# encoding: utf-8
import redis
import json
import tushare as ts
import pandas as pd
class DataHolder ():
def __init__(self, stocks=[]):
self.stocks = stocks
if len(self.stocks) == 0:
df = ts.get_stock_basics()
df.to_json()
def get_now_trans(self):
for stock in self.stocks:
pass
if __name__ == "__main__":
d = DataHolder()
print d.get_now_trans()
|
def file_to_list(filename='ex_4.text'):
list_file = []
with open(filename, 'r', encoding='utf-8') as file:
for line in file:
list_file.append(line.rstrip().split())
return list_file, filename
# вот с этой функцией косяк. Если передавать напрямую возвращаемый первой функцией кортеж
# то она принимает его первым параметром и ищет второй. Как сделать иначе не придумал, поэтому сделал костыль
def create_mod_file(args):
# Костыль
list_file = args[0]
filename = args[1]
# добавление '_new' перед именем файла для записи
new_name = filename.split('.')
new_name[0] += '_new'
new_filename = '.'.join(new_name)
# обработкаданных и запись нового файла
things_to_change = ['первый', 'второй', 'третий', 'четвертый']
with open(new_filename, 'w', encoding='utf-8') as new_file:
for i in range(4):
list_file[i][0] = things_to_change[i]
new_file.write(' '.join(list_file[i]) + '\n')
def do():
try:
create_mod_file(file_to_list())
except FileNotFoundError:
print('Запрашиваемый файл не найден')
except FileExistsError:
print('Не удалось записать модифицированый файл на диск')
except (ValueError, TypeError, IndexError):
print('Исходный файл содержит некорректные данные')
except:
print('Сломалось что-то ещё или недостаточно прав для работы в каталоге')
if __name__ == '__main__':
do()
|
from threading import Timer
import telegramfunctions as t
import time
from datetime import datetime
import answerfinder as a
import sensibledata as s
import os
import traceback
active = True
os.environ['TZ'] = 'Europe/Rome'
time.tzset()
def toggleBot():
global active
active^=True
def manageBot(input,chatid):
a = 0
global active
if not active:
active = True
t.send_message("BOT IS BACK BITCHES", chatid)
return
for word in input.split():
try:
a = (float(word))
except ValueError:
pass
active = False
timersleep = Timer(a*60, toggleBot)
t.send_message("SLEEPING FOR " + str(a) + "m", chatid)
timersleep.start()
def log(message):
logfile = open("chatlog.txt", "a")
logfile.write(message + "\n")
print(message)
logfile.close()
def getName(id):
return s.knownIds.get(str(id), "")
def sleepFor(timeToSleep):
time.sleep(timeToSleep * 60)
last_update_id = None
t.send_message("BOT BOOTED", s.MYID)
while True:
try:
now = datetime.now()
date_time = now.strftime("%d/%m/%Y %H:%M:%S")
updates = t.get_updates(last_update_id)
if "result" in updates and len(updates["result"]) > 0:
last_update_id = t.get_last_update_id(updates) + 1
text, id, person_id = t.get_last_chat_id_and_text(updates)
if len(text) > 0:
if(a.found(text,"BOT SLEEP")):
manageBot(text,id)
if(a.found(text,"BOT RESUME NOW")):
manageBot(text,id)
if active:
answer = a.find_answer(text, id)
# t.send_message(answer, id)
# log("\n"+date_time + "\n" + str(person_id) + "(" + getName(person_id) + "): \"" + text +"\" ")
if len(answer) > 0:
log("BOT REPLIED: \"" + answer + "\"")
except:
traceback.print_exc()
time.sleep(0.5)
|
#!/usr/bin/python
# vim: set expandtab ts=4
import Tensor
from sympy import pi,sin,cos,trigsimp,symbols,Abs,simplify
import numpy as np
class Spherical(Tensor.Coords):
def __init__(self):
x,y,z=symbols("x,y,z")
X=x,y,z
# spherical part
# Unfortunately there is a lot of confusion about the symbols used, most notably by
# the opposite role which phi and theta play in mathematics and physics.
# It is therefore necessary to derive all expressions from a single definition.
# Here we stick to the convention in Chandrasekhars book, which uses the system most common
# in physics.
# We define:
# phi to be the azimuthal angle in the xy-plane from the x-axis with 0<=phi<2pi ,
# theta to be the polar (zenithal) angle from the positive z-axis with 0<=theta<=pi, and
# r to be distance (radius) from a point to the origin.
#
# This is >> not << the convention commonly used in mathematics,where theta and phi are reversed.
r=symbols("r",positive=True,real=True)
phi,theta=symbols("phi,theta",real=True)
# Order:
# We always use the conventions
# A_x=A[0] A_y=A[1] A_z=A[2]
# A_r=A[0] A_phi=A[1] A_theta=A[2]
# regardless if A_r is a cellar, roof or physical component.
# Transformation:
# Let X and U be ordered tupels
# |x| | r|
# X=|y| U=|phi|
# |z| |theta|
# then X(U) is given by:
x = r*cos(phi)*sin(theta)
y = r*sin(phi)*sin(theta)
z = r*cos(theta)
XofU=[x,y,z]
U=[r,phi,theta]
XofU=[x,y,z]
sc=super(Spherical,self)
inst=sc.__init__(X,U,XofU)
def scalarSimp(self,exp):
r,phi,theta=self.U
res=trigsimp(exp)
res=res.subs(sin(phi)**2*sin(theta)**2-cos(phi)**2*cos(theta)**2+cos(phi)**2+cos(theta)**2,1)
res=simplify(res.subs((sin(phi))**2,1-(cos(phi))**2))
return(res)
def __repr__(self):
return("Spherical()")
def tor_gen(self,fang,T):
r,phi,theta=self.U
Tr=0 #r component
Tphi=-1/r*T*diff(fang,theta)#phi
Ttheta=1/(r*sin(theta))*T*diff(fang,phi) #theta component
A=Matrix(3,1,[Tr,Tphi,Ttheta])
return(A)
def pol_gen(self,l,fang,S):
r,phi,theta=self.U
Sr=l*(l+1)/r**2*S*fang
Sphi=1/(r*sin(theta))*diff(S,r)*diff(fang,phi)
Stheta=1/r*diff(S,r)*diff(fang,theta)
A=Matrix(3,1,[Sr,Sphi,Stheta])
return(A)
def tor_real(self,l,m,T):
r,phi,theta=self.U
fang=Zlm(l,m, theta, phi)
A=self.tor_gen(fang,T)
return(A)
def pol_real(self,l,m,S):
r,phi,theta=self.U
fang=Zlm(l,m, theta, phi)
A=self.pol_gen(l,fang,S)
return(A)
def tor_comp(self,l,m,T):
r,phi,theta=self.U
fang=Ylm(l,m, theta, phi)
A=self.tor_gen(fang,T)
return(A)
def pol_comp(self,l,m,S):
r,phi,theta=self.U
fang=Ylm(l,m, theta, phi)
A=self.pol_gen(l,fang,S)
return(A)
def testnumsphere(self,expr,prec):
r,phi,theta=self.U
n=8
increment=pi/n
for i in range(1,n):
for j in range(1,n):
t=float(i*increment)
p=float(j*increment)
test=(expr.subs(theta,t)).subs(phi,p)
if not(Abs(test)<prec):
raise NumTestError(test)
def testnumsphere_r(self,expr,r_vals,prec):
r,phi,theta=self.U
def f(x):
return(expr.subs(r,x))
expressions= map(f,r_vals)
n=len(r_vals)
precs=np.ones(n)*prec
map(self.testnumsphere,expressions,precs)
def testnumshell(self,expr,r_min,r_max,prec):
r,phi,theta = self.U
def f(x):
return(expr.subs(r,x))
n=10
expressions=map(f,np.linspace(r_min,r_max,n))
precs=np.ones(n)*prec
map(self.testnumsphere,expressions,precs)
def testnumrad(self,expr1,r_min,r_max,prec):
r,phi,theta = self.U
def g(x):
return(expr1.subs(r,x))
args=np.linspace(r_min,r_max,10)
vals=map(g,args)
maxd=max(map(abs,vals))
if maxd >=prec:
raise NumTestError(maxd)
# def laplaceBeltrami(self,fundef):
# r,theta, phi = symbols("r theta phi")
# f=fundef(theta,phi)
# theta_part=1/(sin(theta))*diff(sin(theta)*diff(f,theta),theta)
# phi_part=1/((sin(theta))**2)*diff(diff(f,phi),phi)
# lb=Lambda((theta,phi),theta_part+phi_part)
# return(lb)
# def div(self,A):
# # this function computes the divergence of a vector given in physical
# # components.
# r,theta, phi = symbols("r theta phi",real=True)
# Ar=A[0]
# Aphi=A[1]
# Atheta=A[2]
# div=1/r**2*diff(r**2*Ar,r) +1/(r*sin(theta))*diff(sin(theta)*Atheta,theta) +1/(r*sin(theta))*diff(Aphi,phi)
# return(div)
# def vectorLaplace(self,A):
# r,theta, phi = symbols("r theta phi",real=True)
# Ar=A[0]
# Aphi=A[1]
# Atheta=A[2]
# res=self.grad(self.div(A))-self.rot(self.rot(A))
# return(res)
# def rot(self,A):
# r,theta, phi = symbols("r theta phi",real=True)
# Ar=A[0]
# Aphi=A[1]
# Atheta=A[2]
# rr=1/(r*sin(theta))*(diff(Aphi*sin(theta),theta)-diff(Atheta,phi))
# rphi=1/r*(diff((r*Atheta),r)-diff(Ar,theta))
# rtheta=1/r*(1/sin(theta)*diff(Ar,phi)-diff(r*Aphi,r))
# def res(r_val,phi_val,theta_val):
# subsdict={r:r_val,phi:phi_val,theta:theta_val}
# mat=Matrix(3,1,[rr.subs(subsdict),rphi.subs(subsdict),rtheta.subs(subsdict)])
# return(mat)
# return(res)
class NumTestError(Exception):
def __init__(self,d):
self.d=d
def __str__(self):
return("The difference was: "+str(self.d))
def testnumrad_eq(expr1,expr2,sym,r_min,r_max,prec):
#print("expr1",expr1)
#print("expr2",expr2)
def f1(x):
return((expr1.subs(sym,x)).evalf())
def f2(x):
return((expr2.subs(sym,x)).evalf())
args=np.linspace(r_min,r_max,50)
vals1=list2numpy(map(f1,args))
vals2=list2numpy(map(f2,args))
test=(vals1-vals2)/(vals1+vals2)
maxd=max(map(abs,test))
if maxd >=prec:
raise NumTestError(maxd)
|
from pyfsm.machine import FSM, State, as_state
from pyfsm.exceprions import UnreleasedTransition
|
import os
from conans import ConanFile, tools
required_conan_version = ">=1.43.0"
class FastDoubleParserConan(ConanFile):
name = "fast_double_parser"
description = "Fast function to parse strings into double (binary64) floating-point values, enforces the RFC 7159 (JSON standard) grammar: 4x faster than strtod"
topics = ("numerical", "header-only")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/lemire/fast_double_parser"
license = ("Apache-2.0", "BSL-1.0")
settings = "os", "compiler", "build_type", "arch"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def package(self):
include_folder = os.path.join(self._source_subfolder, "include")
self.copy("*.h", dst="include", src=include_folder)
self.copy("LICENSE*", dst="licenses", src=self._source_subfolder)
|
import os
import sys
import re
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import japanize_matplotlib
import category_encoders as ce
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
sys.path.append(".")
from utils import update_tracking, log_evaluation, preprocess_df
####################
## Changes
####################
# MODEL_ID = "TEINEI_14"
MODEL_ID = "TEINEI_15"
# cartegorical feature 指定
# feature_frac 0.7 -> 0.9, スパース特徴に対処(?)
# outlier handlingを修正(20926: rentではなくareaを修正)
# area_per_room = area / num_room
# change nan handling(as is -> -999)
# add 緯度経度 feature
# データの中にめちゃくちゃ重複が多いことに気づいた
# 区と丁目と築年数とbuilding_floorとroom_floorとareaが一緒なら同じ部屋と判断し一方削除
# 区と丁目と築年数とbuilding_floorが一緒なら同じ建物と判断し、groupkfoldで必ず同じfoldに入るようにする
# と思ったけど丁目の情報に欠損が多く、その処理をどうするか。とりあえず丁目を除いた。これでも意外と良さそうだがどうだろう?
# post processing, 同じ部屋のやつの平均取る
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
sc = logging.StreamHandler()
logger.addHandler(sc)
fh = logging.FileHandler(f"./logs/model_logs/{MODEL_ID}.log")
logger.addHandler(fh)
logger.debug(f"./logs/model_logs/{MODEL_ID}.log")
####################
## Parameters
####################
N_ROUNDS = 30000
LR = 0.01
BOOSTING = "gbdt"
BAG_FREQ = 1
BAG_FRAC = 0.7
MIN_DATA_IN_LEAF = 50
SEED = 42
METRIC = "rmse"
L1 = 1e-2
L2 = 1e-2
MAX_DEPTH = 5
FEAT_FRAC = 0.9
update_tracking(MODEL_ID, "n_rounds", N_ROUNDS)
update_tracking(MODEL_ID, "lr", LR)
update_tracking(MODEL_ID, "boosting", BOOSTING)
update_tracking(MODEL_ID, "bag_freq", BAG_FREQ)
update_tracking(MODEL_ID, "bag_frac", BAG_FRAC)
update_tracking(MODEL_ID, "min_data_in_leaf", MIN_DATA_IN_LEAF)
update_tracking(MODEL_ID, "seed", SEED)
update_tracking(MODEL_ID, "metric", METRIC)
update_tracking(MODEL_ID, "lambda_l1", L1)
update_tracking(MODEL_ID, "lambda_l2", L2)
update_tracking(MODEL_ID, "max_depth", MAX_DEPTH)
update_tracking(MODEL_ID, "feature_fraction", FEAT_FRAC)
params = {"learning_rate": LR,
"boosting": BOOSTING,
"bagging_freq": BAG_FREQ,
"bagging_fraction": BAG_FRAC,
"min_data_in_leaf": MIN_DATA_IN_LEAF,
"bagging_seed": SEED,
"metric": METRIC,
"random_state": SEED,
"lambda_l1": L1,
"lambda_l2": L2,
"max_depth": MAX_DEPTH,
"feature_fraction": FEAT_FRAC}
####################
## Load data
####################
# 変数名の英訳
train_cols_eng = ["id", "rent", "location", "access", "layout", "age", "direction", "area", "floor",
"bath_toilet", "kitchen", "broad_com", "facility", "parking", "environment", "structure",
"contract_period"]
test_cols_eng = ["id", "location", "access", "layout", "age", "direction", "area", "floor",
"bath_toilet", "kitchen", "broad_com", "facility", "parking", "environment", "structure",
"contract_period"]
train = pd.read_csv("./data/train.csv", names=train_cols_eng, header=0)
test = pd.read_csv("./data/test.csv", names=test_cols_eng, header=0)
use_cols = []
####################
## Preprocess data
####################
train_processed = preprocess_df(train)
test_processed = preprocess_df(test)
# handle outliers
train_processed.drop(20427, axis=0, inplace=True) # 築1019年、どう修正するべきか不明なので
train_processed.loc[20231, "age_year"] = 52
train_processed.loc[20231, "age_in_months"] = 52 * 12 + 5 # 築520年、おそらく52年のタイポと仮定
train_processed.loc[5775, "rent"] = 120350 # 条件からしてありえない高値。おそらくゼロの個数違い
train_processed.loc[20926, "area"] = 43.01 # 条件からしてありえなく広い。おそらくゼロの個数違い
# drop and handle duplicates for train!!!
# 緯度経度
geo_csvs = os.listdir("./data/geo/")
geo_csvs = [csv for csv in geo_csvs if "csv" in csv]
loc_dic = {}
for csv in geo_csvs:
df = pd.read_csv("./data/geo/"+csv, encoding="shift-jis")
df["loc"] = df["緯度"].astype(str) + "," + df["経度"].astype(str)
dic = dict(zip(df["大字町丁目名"].values, df["loc"].values))
loc_dic[df["市区町村名"].unique()[0]] = dic
train_processed["ku"] = train_processed["location"].apply(lambda x: re.search("(?<=都).*?区", x).group())
test_processed["ku"] = test_processed["location"].apply(lambda x: re.search("(?<=都).*?区", x).group())
train_processed["tyou"] = train_processed["location"].apply(lambda x: re.search("(?<=区).*?丁目", x).group() \
if re.search("(?<=区).*?丁目", x) else np.nan)
test_processed["tyou"] = test_processed["location"].apply(lambda x: re.search("(?<=区).*?丁目", x).group() \
if re.search("(?<=区).*?丁目", x) else np.nan)
train_processed.drop_duplicates(keep="first", subset=["ku", "building_floor",
"age_in_months", "room_floor", "area"], inplace=True)
train_group = train_processed["ku"] + \
+ train_processed["building_floor"].astype(str) + train_processed["age_in_months"].astype(str)
test_group = test_processed["ku"] + test_processed["building_floor"].astype(str) \
+ test_processed["age_in_months"].astype(str) + test_processed["area"].astype(str)
train_processed.reset_index(drop=True, inplace=True)
target = train_processed["rent"]
target_log = np.log1p(target)
train_processed.drop(["id", "rent"], axis=1, inplace=True)
test_processed.drop("id", axis=1, inplace=True)
####################
## get feature
####################
# モデル学習用データフレーム(category encoderの都合で分ける)
train_use = pd.DataFrame()
test_use = pd.DataFrame()
### location ###
ce_ordinal = ce.OrdinalEncoder(cols=["district"], handle_missing="value")
train_use["district"] = train_processed["district"]
test_use["district"] = test_processed["district"]
train_use = ce_ordinal.fit_transform(train_use)
test_use = ce_ordinal.transform(test_use)
# 緯度経度
num_map = {"1":"一", "2":"二", "3":"三", "4":"四", "5":"五", "6":"六", "7":"七", "8":"八", "9":"九"}
def convert_number(tyou):
if pd.isnull(tyou):
return np.nan
for num in num_map.keys():
if num in tyou:
return tyou.replace(num, num_map[num])
train_processed["tyou"] = train_processed["tyou"].apply(convert_number)
test_processed["tyou"] = test_processed["tyou"].apply(convert_number)
train_processed["loc_processed"] = train_processed["ku"] + "," + train_processed["tyou"]
test_processed["loc_processed"] = test_processed["ku"] + "," + test_processed["tyou"]
def get_long_lati(loc_processed):
if pd.isnull(loc_processed):
return np.nan
ku, chou = loc_processed.split(",")
if chou in loc_dic[ku]:
return loc_dic[ku][chou]
else:
return np.nan
# 丁目の情報がないのがほとんどnanの原因でいくつかはとってきたcsvにその丁目の情報なし
train_processed["lati_long"] = train_processed["loc_processed"].apply(get_long_lati)
test_processed["lati_long"] = test_processed["loc_processed"].apply(get_long_lati)
train_use["lati"] = train_processed["lati_long"].apply(lambda x: float(x.split(",")[0]) if not pd.isnull(x) else np.nan)
train_use["long"] = train_processed["lati_long"].apply(lambda x: float(x.split(",")[1]) if not pd.isnull(x) else np.nan)
test_use["lati"] = test_processed["lati_long"].apply(lambda x: float(x.split(",")[0]) if not pd.isnull(x) else np.nan)
test_use["long"] = test_processed["lati_long"].apply(lambda x: float(x.split(",")[1]) if not pd.isnull(x) else np.nan)
### access ###
train_use["min_to_nearest_sta"] = train_processed["access_min"].apply(lambda x: min(x) if x else np.nan)
test_use["min_to_nearest_sta"] = test_processed["access_min"].apply(lambda x: min(x) if x else np.nan)
train_use["num_sta"] = train_processed["access_sta"].apply(lambda x: len(x))
test_use["num_sta"] = test_processed["access_sta"].apply(lambda x: len(x))
# 路線
line_cols = [col for col in train_processed.columns.values if "線" in col or "ライン" in col
or "ライナー" in col or "エクスプレス" in col]
line_cols = [col for col in line_cols if train_processed[col].dropna().sum() > 300]
train_use[line_cols] = train_processed[line_cols]
test_use[line_cols] = test_processed[line_cols]
# 駅
sta_cols = [col for col in train_processed.columns.values if "駅" in col]
sta_cols = [col for col in sta_cols if train_processed[col].dropna().sum() > 300]
train_use[sta_cols] = train_processed[sta_cols]
test_use[sta_cols] = test_processed[sta_cols]
### layout ###
ce_ordinal = ce.OrdinalEncoder(cols=["layout"], handle_missing="value")
train_use["layout"] = train_processed["layout"]
test_use["layout"] = test_processed["layout"]
train_use = ce_ordinal.fit_transform(train_use)
test_use = ce_ordinal.transform(test_use)
layout_cols = ["is_K", "is_R", "is_L", "is_D", "is_S", "num_room"]
train_use[layout_cols] = train_processed[layout_cols]
test_use[layout_cols] = test_processed[layout_cols]
### age ###
age_cols = ["age_year", "age_month", "age_in_months"]
train_use[age_cols] = train_processed[age_cols]
test_use[age_cols] = test_processed[age_cols]
### direction ###
ce_ordinal = ce.OrdinalEncoder(cols=["direction"], handle_missing="value")
train_use["direction"] = train_processed["direction"]
test_use["direction"] = test_processed["direction"]
train_use = ce_ordinal.fit_transform(train_use)
test_use = ce_ordinal.transform(test_use)
direction_cols = ["has_N", "has_S", "has_E", "has_W"]
train_use[direction_cols] = train_processed[direction_cols]
test_use[direction_cols] = test_processed[direction_cols]
### area ###
train_use["area"] = train_processed["area"]
test_use["area"] = test_processed["area"]
train_use["area_per_room"] = train_use["area"] / train_use["num_room"]
test_use["area_per_room"] = test_use["area"] / test_use["num_room"]
### floor ###
train_processed["floor_ratio"] = train_processed["room_floor"] / train_processed["building_floor"]
test_processed["floor_ratio"] = test_processed["room_floor"] / test_processed["building_floor"]
floor_cols = ["has_underground", "room_floor", "building_floor", "floor_ratio"]
train_use[floor_cols] = train_processed[floor_cols]
test_use[floor_cols] = test_processed[floor_cols]
### bath_toilet ###
bath_toilet_cols = ["シャワー", "バスなし", "バス・トイレ別", "共同トイレ", "共同バス",
"専用トイレ", "専用バス", "洗面台独立", "浴室乾燥機", "温水洗浄便座", "脱衣所", "追焚機能"]
train_use[bath_toilet_cols] = train_processed[bath_toilet_cols]
test_use[bath_toilet_cols] = test_processed[bath_toilet_cols]
### kitchen ###
kitchen_cols = ["IHコンロ", "L字キッチン", "カウンターキッチン", "ガスコンロ", "コンロ1口", "コンロ2口", "コンロ3口",
"コンロ4口以上", "コンロ設置可(コンロ1口)", "コンロ設置可(コンロ2口)", "コンロ設置可(コンロ3口)",
"コンロ設置可(コンロ4口以上)", "コンロ設置可(口数不明)", "システムキッチン", "冷蔵庫あり", "独立キッチン",
"給湯", "電気コンロ"]
train_use[kitchen_cols] = train_processed[kitchen_cols]
test_use[kitchen_cols] = test_processed[kitchen_cols]
### broad_com ###
broad_com_cols = ["BSアンテナ", "CATV", "CSアンテナ", "インターネット使用料無料",
"インターネット対応", "光ファイバー", "有線放送", "高速インターネット"]
train_use[broad_com_cols] = train_processed[broad_com_cols]
test_use[broad_com_cols] = test_processed[broad_com_cols]
### facility ###
facility_cols = ["24時間換気システム", "2面採光",
"3面採光", "ウォークインクローゼット", "エアコン付", "エレベーター", "オール電化", "ガスその他",
"ガス暖房", "クッションフロア", "シューズボックス", "タイル張り", "トランクルーム", "バリアフリー",
"バルコニー", "フローリング", "プロパンガス", "ペアガラス", "ルーフバルコニー", "ロフト付き", "下水",
"二世帯住宅", "二重サッシ", "井戸", "公営水道", "冷房", "出窓", "地下室", "室内洗濯機置場",
"室外洗濯機置場", "専用庭", "床下収納", "床暖房", "排水その他", "敷地内ごみ置き場", "水道その他",
"汲み取り", "洗濯機置場なし", "浄化槽", "石油暖房", "都市ガス", "防音室"]
train_use[facility_cols] = train_processed[facility_cols]
test_use[facility_cols] = test_processed[facility_cols]
### parking ###
parking_cols = ["bicycle_parking", "car_parking", "bike_parking"]
train_use[parking_cols] = train_processed[parking_cols]
test_use[parking_cols] = test_processed[parking_cols]
### environment ###
env_cols = ["デパート", "公園",
"郵便局", "コインパーキング", "学校", "図書館", "飲食店", "月極駐車場", "銀行", "小学校",
"ドラッグストア", "レンタルビデオ", "病院", "総合病院", "コンビニ", "大学", "幼稚園・保育園",
"スーパー", "クリーニング"]
train_use[env_cols] = train_processed[env_cols]
test_use[env_cols] = test_processed[env_cols]
### structure ###
ce_ordinal = ce.OrdinalEncoder(cols=["structure"], handle_missing="value")
train_use["structure"] = train_processed["structure"]
test_use["structure"] = test_processed["structure"]
train_use = ce_ordinal.fit_transform(train_use)
test_use = ce_ordinal.transform(test_use)
### contract_period ###
period_cols = ["fixed_term", "contract_period_year", "contract_period_month", "contract_period_in_months"]
train_use[period_cols] = train_processed[period_cols]
test_use[period_cols] = test_processed[period_cols]
# nan handling
for col in train_use.columns.values:
train_use[col].fillna(-999, inplace=True)
test_use[col].fillna(-999, inplace=True)
n_train = len(train_use)
n_test = len(test_use)
logger.debug(f"Number of rows in train: {n_train}")
logger.debug(f"Number of rows in test: {n_test}")
logger.debug(f"Using features:{train_use.columns.values}")
categorical_cols = ["district", "layout", "direction", "structure"]
####################
## Train model
####################
folds = GroupKFold(n_splits=5)
oof = np.zeros(len(train_use))
predictions = np.zeros(len(test_use))
feature_importance_df = pd.DataFrame()
for fold, (train_idx, val_idx) in enumerate(folds.split(train_use, groups=train_group)):
print(f"Fold {fold+1}")
train_data = lgb.Dataset(train_use.iloc[train_idx], label=target_log[train_idx], categorical_feature=categorical_cols)
val_data = lgb.Dataset(train_use.iloc[val_idx], label=target_log[val_idx], categorical_feature=categorical_cols)
num_round = N_ROUNDS
callbacks = [log_evaluation(logger, period=100)]
clf = lgb.train(params, train_data, num_round, valid_sets = [train_data, val_data], verbose_eval=False, early_stopping_rounds=100, callbacks=callbacks)
oof[val_idx] = clf.predict(train_use.values[val_idx], num_iteration=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = train_use.columns.values
fold_importance_df["importance"] = clf.feature_importance(importance_type="gain")
fold_importance_df["fold"] = fold + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
feature_importance_df = feature_importance_df[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False).head(50)
logger.debug("##### feature importance #####")
logger.debug(feature_importance_df)
predictions += clf.predict(test_use, num_iteration=clf.best_iteration) / folds.n_splits
# inverse log transformation
oof = np.expm1(oof)
predictions = np.expm1(predictions)
# post processing
post_process = pd.DataFrame()
post_process["pred"] = predictions
post_process["group"] = test_group
predictions = post_process.groupby("group")["pred"].transform("mean")
cv_score = np.sqrt(mean_squared_error(oof, target))
logger.debug(f"5fold CV score: {cv_score}")
update_tracking(MODEL_ID, "cv_rmse", cv_score)
####################
## Submit
####################
spsbm = pd.read_csv("./data/sample_submit.csv", header=None)
spsbm.iloc[:, 1] = predictions
spsbm.to_csv(f"./submissions/{MODEL_ID}.csv", header=None, index=None) |
class CreateVehicleDto:
id: int
name: str
registration_number: int
capacity: int
status: str
class EditVehicleDto:
id: int
name: str
capacity: int
status: str
class ListVehiclesDto:
id: int
name: str
capacity: int
status: str
class GetVehicleDto:
id: int
name: str
registration_number: int
capacity: int
status: str
|
def test_yield():
print("----1----")
yield 1
print("----2----")
yield 2
print("----3----")
yield 3
print("----4----")
def fibona_create(max_num):
print("-----first----")
a, b = 0, 1
cursor = 0
while cursor < max_num:
print("-----second----")
#print(a) # 这里如果想保存数据,则占用大量空间
yield a # 只要有yield,则 fibona_create 就不是函数了
print("-----third----")
cursor += 1
a, b = b, a+b
print("-----forth----")
if __name__ == "__main__":
"""测试生成器"""
#如果调用的函数中有yield语句,那么就不是函数调用,而是创建一个生成器对象
obj = fibona_create(10)
print("start loop")
"""
for num in obj:
print(num)
"""
try:
while True:
ret = next(obj)
print(ret)
except Exception:
pass
print("\n\n" + "=="*20)
y = test_yield()
for test in y:
print("test_yield: ", test)
|
"""empty message
Revision ID: 5c1a09488932
Revises: 6dbb8fb1c3ab
Create Date: 2018-10-01 22:10:34.569220
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5c1a09488932'
down_revision = '6dbb8fb1c3ab'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('nfl_boxscore_rushing', sa.Column('lg_td', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('nfl_boxscore_rushing', 'lg_td')
# ### end Alembic commands ###
|
#!/usr/bin/env python3
'''
Determines the tile proportions as a percentage for a given board.
'''
import sys,os,re
def main(args):
if len(args) < 2:
print("Usage: %s board boards..." % args[0])
return
for fn in args[1:]:
with open(fn) as fp:
#Skip first 8 lines
lines = fp.readlines()[7:]
num = []
for line in lines:
num += [int(x) for x in re.split("[^0-9]", line) if x]
last = -1
cts = 0
maxCts = 0
maxTile = 0
maxCount = 0
count = 0
props = {}
for n in num:
count += 1
if n == last:
cts += 1
else:
if cts > maxCts:
maxCts = cts
maxTile = last
maxCount = count
cts = 0
last = n
if n not in props:
props[n] = 1
else:
props[n] += 1
print("Tile count: %d" % len(num))
for k in sorted(props):
print("%d, %.1f" % (k, (100 * props[k]) / len(num)))
print("Max sequence: %d (Tile %d at %d)" % \
(maxCts + 1, maxTile, maxCount - maxCts - 1))
#print(num[maxCount - maxCts - 2 : maxCount + maxCts + 10])
main(sys.argv)
|
#!/usr/bin/env python
'''
Lists
'''
from random import *
# One dimensional list
list1 = []
for i in range(5):
var = randint(1, 100)
list1.append(var)
list1.sort()
print('List-1: ' + str(list1))
# Two dimensional list
n = 5
list2 = [[] for x in range(n)]
for i in range(5):
var = randint(1, 100)
list2[i].append(var)
print('List-2: ' + str(list2))
# Numpy array
import numpy as np
a = np.array([[1,2,3],[4,5,6]])
print('Numpy Array: ')
print(a)
for i in range(5):
var = randint(1, 100)
a = np.append(a, var)
print(a)
|
# Predicting Heavy and Extreme Losses in Real-Time for Portfolio Holders
# (c) 2015 QuantAtRisk.com, by Pawel Lachowicz
#
# heavy2.py
import pandas_datareader.data as web
import matplotlib.pyplot as plt
import numpy as np
from pyvar import cpr
# ---1. Data Processing
# fetch and download daily adjusted-close price series for CAAS stock
# and NASDAQ index using Yahoo! Finance public data provider
'''
caas = web.DataReader("CAAS", data_source='yahoo',
start='2005-05-13', end='2015-05-13')['Adj Close']
nasdaq = web.DataReader("^IXIC", data_source='yahoo',
start='2005-05-13', end='2015-05-13')['Adj Close']
CAAScp = np.array(caas.values)
NASDAQcp = np.array(nasdaq.values)
f = open("data2.dat","wb")
np.save(f,CAAScp)
np.save(f,NASDAQcp)
f.close()
'''
f = open("data2.dat","rb")
CAAScp = np.load(f)
NASDAQcp = np.load(f)
f.close()
# compute the return-series
retCAAS = CAAScp[1:]/CAAScp[:-1]-1
retNASDAQ = NASDAQcp[1:]/NASDAQcp[:-1]-1
# plotting (figure #1)
# return-series for CAAS and NASDAQ index
#
plt.figure(num=1, figsize=(10, 6))
plt.subplot(211)
plt.grid(True)
plt.plot(retCAAS, '-r', label="CAAS")
plt.axis("tight")
plt.ylim([-0.25,0.5])
plt.legend(loc="upper right")
plt.ylabel("CAAS daily returns")
plt.subplot(212)
plt.grid(True)
plt.plot(retNASDAQ, '-', label="NASDAQ")
plt.legend(loc="upper right")
plt.axis("tight")
plt.ylim([-0.10,0.15])
plt.ylabel("NASDAQ daily returns")
plt.xlabel("Trading days 13/05/2005-13/05/2015")
#plt.show()
# ---2. Computations of Conditional Probabilities for Rare Events
# isolate return-series displaying negative returns solely
# set 1 for time stamps corresponding to positive returns
nretCAAS = np.where(retCAAS < 0, retCAAS, 1)
nretNASDAQ = np.where(retNASDAQ < 0, retNASDAQ, 1)
# set threshold for rare events
thr = -0.065
# compute the sets of events
A = np.where(nretCAAS < 0, nretCAAS, 1)
A = np.where(A >= thr, A, 1)
B = np.where(nretCAAS < thr, retCAAS, 1)
R = np.where(nretNASDAQ < thr, retNASDAQ, 1)
nA = float(len(A[A != 1]))
nB = float(len(B[B != 1]))
n = float(len(nretCAAS[nretCAAS != 1])) # n must equal to nA + nB
# (optional)
print(nA, nB, n == (nA + nB)) # check, if True then proceed further
print(len(A), len(B), len(R))
print
# compute the probabilities
pA = nA/n
pB = nB/n
# compute the conditional probabilities
pRA = np.sum(np.where(R+A < 0, 1, 0))/n
pRB = np.sum(np.where(R+B < 0, 1, 0))/n
pR = pRA*pA + pRB*pB
# display resultsh
print("Pr(A)\t = %5.5f%%" % (pA*100.))
print("Pr(B)\t = %5.5f%%" % (pB*100.))
print("Pr(R|A)\t = %5.5f%%" % (pRA*100.))
print("Pr(R|B)\t = %5.5f%%" % (pRB*100.))
print("Pr(R)\t = %5.5f%%" % (pR*100.))
if(pR>0):
pBR = pRB*pB/pR
print("\nPr(B|R)\t = %5.5f%%" % (pBR*100.))
else:
print("\nPr(B|R) impossible to be determined. Pr(R)=0.")
from pyvar import cpr
prob = []
for t in range(2,len(retCAAS)-1):
ret1 = retCAAS[0:t]
ret2 = retNASDAQ[0:t]
pBR, _ = cpr(ret1, ret2, thr=thr)
prob.append(pBR)
# plotting (figure #2)
# conditional probability for rare events given threshold
#
plt.figure(num=2, figsize=(10, 6))
plt.plot(np.array(prob)*100., 'r', label="CAAS")
plt.ylabel("Pr(B|R) [%]")
plt.axis('tight')
plt.title("L$_{thr}$ = %.2f%%" % (thr*100.))
plt.legend(loc="upper left")
plt.xlabel("Trading days 13/05/2005-13/05/2015")
plt.show()
# extras-----------------------
f = open("data3.dat","rb")
alphas = np.load(f)
f.close()
prob1 = []
prob2 = []
for t in range(2,len(retCAAS)-1):
ret1 = retCAAS[0:t]
ret2 = retNASDAQ[0:t]
pBR, pR = cpr(ret1, ret2, thr=-0.065)
prob1.append(pBR)
prob2.append(pR)
# plotting (figure #3)
# conditional probability for rare events given threshold
#
fig, ax1 = plt.subplots(num=2, figsize=(10, 6))
plt.plot(np.array(prob1)*100., 'r', label="Pr(B|R) at L=-6.5%")
plt.plot(np.array(prob2)*100., 'b', label="Pr(R) at L=-6.5%")
plt.plot(np.array(alphas*100.), 'k', label="Pr(L < -6.5%)")
plt.axis('tight')
plt.ylim([-0.5, 16])
plt.legend(loc="upper left")
plt.xlabel("Trading days 13/05/2005-13/05/2015")
plt.show()
|
#!/usr/bin/env python3
print("Type integers, each followed by Enter; or just Enter to finish")
total=0
count=0
while True:
line=input("integer: ")
if line:
try:
number=int(line)
except ValueError as err:
print(err)
continue
total+=number
count+=1
else:
break
if count:
print("count =",count,"total =",total,"mean =",total/count)
|
###################
# CONFIG GLOBAL #
###################
class ConfigGlobal:
# Comprimento das barras de título
barraComprimento = 70
def checaProjeto():
return 'checa' + str(barraComprimento)
def __init__ (self):
return None
# ------------------------------
# Fim de 'ConfigGlobal' |
# This file is kept only for backwards compatibility. Edit the one in ../mapgen
class Lookup:
def __init__(self, id='', table=None, display=None, comment='',
instruction=None, rules=None):
if rules is None:
from filters import MSAnd
rules = MSAnd()
if instruction is None:
instruction = []
self.id = id
self.table = table
self.display = display
self.comment = comment
self.instruction = instruction
self.rules = rules
def add_instruction(self, instruction):
self.instruction.append(instruction)
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError as e:
raise KeyError(e)
def __add__(self, other):
return LookupCollection([self]) + other
def __matmul__(self, other):
return LookupCollection([self]) @ other
def __iter__(self):
return iter(LookupCollection([self]))
class LookupCollection(list):
__slots__ = ()
def __init__(self, seq, *, id=''):
super().__init__(Lookup(id=id, **lookup)
if isinstance(lookup, dict)
else lookup
for lookup in seq)
if not all(isinstance(item, Lookup) for item in self):
raise TypeError('LookupCollection can only contain Lookups')
def add_instruction(self, instruction):
for lookup in self:
lookup.add_instruction(instruction)
def __matmul__(self, other):
return self.__class__(
Lookup(l.id + r.id,
l.table or r.table,
l.display or r.display,
l.comment + r.comment,
l.instruction + r.instruction,
l.rules & r.rules)
for l in self
for r in other
if (r.table is None or l.table is None or r.table == l.table)
and (r.display is None or l.display is None
or r.display == l.display)
)
def __add__(self, other):
return self.__class__(list.__add__(self, other))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.