text
stringlengths
8
6.05M
# -*- coding: utf-8 -*- log_enabled = False # 是否输出日志 reorder_counter = 2 # 限制使用重列的次数,防止重列次数用完后,陷入无限循环。一局游戏中,必须使用2次重列的概率非常非常低
# # import functools # #name = input() # # # # class ChessBoard(): # def __init__(self): # self.board = [] # for i in range(8): # self.board += [["*" for i in range(8)]] # self.board[0] = ["R", "N", "B", "Q", "K", "B", "N", "R"] # self.board[7] = [i.lower() for i in self.board[0]] # self.board[1] = ["P" for _ in range(8)] # self.board[6] = ["p" for _ in range(8)] # # def movePiece(self, move, turn): # if move == "0-0": # #TODO: # x = 1 # elif move == "0-0-0": # #TODO: # x = 1 # # elif move[0].islower(): # if "=" in move: # #TODO: pawn promotion # x = 1 # #TODO: pawn move # x = 1 # # # # def possibleKnightSquare(self, sqr, color): # row, col = sqr # allPos = [] # knight = "N" if color == "w" else "n"; # # allPos.append((row + 1, col + 2)) # allPos.append((row - 1, col + 2)) # allPos.append((row + 1, col - 2)) # allPos.append((row - 1, col - 2)) # # allPos.append((row + 2, col + 1)) # allPos.append((row - 2, col - 1)) # allPos.append((row + 2, col - 1)) # allPos.append((row - 2, col + 1)) # allPos = [i for i in filter(lambda i: 8 > i[0] >= 0 and 8 > i[1] >= 0 and self.board[i][j] == knight, allPos)] # return allPos # # def travel_until_piece(self, inc1, inc2, end1, end2, allPos, piece, row, col): # for i, j in zip(range(row + inc1, inc1, end1), range(col + inc2, inc2, end2)): # if self.board[i][j] == piece: # allPos.append((i, j)) # break # elif self.board[i][j] != "*": # break # # def correctBishop(self, sqr, color): # row, col = sqr; # allPos = [] # bishop = "B" if color == "w" else "b"; # # self.travel_until_piece(-1, -1, -1, -1, allPos, bishop, row, col) # self.travel_until_piece(-1, 1, -1, 8, allPos, bishop, row, col) # self.travel_until_piece(1, 1, 8, 8, allPos, bishop, row, col) # self.travel_until_piece(1, -1, 8, -1, allPos, bishop, row, col) # # return allPos # # def correct_rook(self, sqr, color): # row, col = sqr; # rook = "R" if color == "w" else "r" # allPos = [] # self.travel_until_piece(-1, 0, -1, -1, allPos, rook, row, col) # # # def possible_Piece(self, piece, square): # """ return the square(s) of the piece that can reach this one""" # squares = [] # # if piece == "R": # x = 1 # elif piece == "N": # x = 1 # # elif piece == "B": # # # elif piece == "Q": # x = 1 # elif piece == "K": # x = 1 # elif piece == "P": # x = 1 # #obj = ChessBoard() #print(obj.board)
from gi.repository import GObject class Chains(GObject.Object): def __init__(self, chain): self.chain = chain self.activations = 0 self.successes = 0 #print "Pair type:", type(self) def equals(self, chain2): return self.chain == chain2.chain
# Enter your code here. Read input from STDIN. Print output to STDOUT import numpy as np def read_data(): train_data = list() test_data = list() F,N = map(int, input().split(' ')) [train_data.append(input().split(' ')) for _ in range(0,N)] T = int(input()) [test_data.append(input().split(" ")) for _ in range(0, T)] train_data = np.array(train_data, dtype = np.float64) test_data = np.array(test_data, dtype = np.float64) X_train = train_data[:, 0:F] y_train = train_data[:,-1] X_test = test_data return X_train, y_train,X_test X_train, y_train, x_test = read_data() from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(X_train,y_train) prediction_lin = lin_reg.predict(x_test) print('\n'.join(list(map(str,prediction_lin))))
''' Utility that performs the dependency searches. ''' import re import subprocess def find_deps(query_package, current_package=None, package_list=[]): ''' Recursively finds all dependencies of a package. Paramaters ---------- query_package: string Name of the query package. current_package: string Current package to recursively search. package_list: list of strings List that will be populated with dependencies of the query package. Returns ---------- package_list: list of strings Complete list of unique dependencies. ''' if current_package == None: current_package = query_package reqs = None pip_text = subprocess.run(['pip', 'show', current_package], stdout=subprocess.PIPE, text=True).stdout.split('\n') for line in pip_text: if 'Requires' in line: reqs = line if reqs != None: reqs = re.sub('Requires:| ', '', reqs).split(',') if reqs != ['']: for dep in reqs: if dep not in package_list: package_list = find_deps(query_package, dep, package_list) if(current_package not in package_list and current_package != query_package): package_list.append(current_package) return package_list def generate_requirements(dependencies): ''' Generates the dependencies in a format suitable for a "requirements.txt" file. Parameters ---------- dependencies: list of strings list of dependencies Returns --------- results: list of strings Dependencies with thier version numbers ''' results = [] dependencies.sort() for package in dependencies: pip_freeze = subprocess.Popen(['pip', 'freeze'], stdout=subprocess.PIPE) grep_text = subprocess.run(['grep', '-i', package + '=='], stdin=pip_freeze.stdout, stdout=subprocess.PIPE, text=True).stdout pip_freeze.stdout.close() pip_freeze.wait() req = re.sub('\n', '', grep_text) if req != '': results.append(req) return results
#convertor: https://wordhtml.com/ html_br_content_success = """ <html> <body> <p><span style="font-weight: 400;"> 亲爱的 @X,你好 </span></p> <p><span style="font-weight: 400;"> 我们已成功为您匹配到倾听者 @X。 </span></p> <p> </p> <p><strong> 您的解聆时间为:@X </strong></p> <p> </p> <p><span style="color: #800000;"> <strong> 距离您解聆时间开始前10分钟, 倾听者@X会通过邮箱 @X </strong> 向您发送微信群聊二维码。(请务必在摇铃开始前检查您的邮箱!) </strong> </span></p> <p> </p> <p><span style="text-decoration: underline;"> <span style="font-weight: 400;"> 若需要取消解聆,请务必第一时间通过邮箱联系倾听者,谢谢。 </span> </span></p> <p><span style="text-decoration: underline;"> <span style="font-weight: 400;"> 若摇铃时间开始后25分钟您仍未登陆群聊,此次线上解聆将被取消。 </span> </span></p> <p> </p> <p><span style="font-weight: 400;"> 倾听者们皆为无偿倾听,他们志愿付出自己的时间和一份努力为同龄人提供一个开放,包容的倾诉/倾听空间。相互尊重是良好志愿解聆关系的基础,请谨遵摇铃守则并保证准时。 </span></p> <p><span style="font-weight: 400;"> 若摇铃人出现三次超时登陆或于约定解聆时间无法与其取得联系的情况,将被加入摇铃黑名单,谢谢理解。 </span></p> <p> </p> <p><strong> 以下是解聆服务须知,请您仔细阅读,谢谢: </strong></p> <ol> <li> <em><em><span>摇铃人需尊重倾听者个人隐私,不得在解聆结束后向任何第三方转述、透露、或公开发布倾听者在解聆过程中提及的个人信息、隐私、及亲身经历。</span></em></em></li> <li><em><span style="font-weight: 400;">请摇铃人在解聆过程中请文明用语,尊重倾听者。若两次警告未果,倾听者有权利及时结束解聆。</span> </em></li> <li><em><span style="font-weight: 400;">平台不对倾听者的行为、言论、及解聆过程中的任何相关延伸领域承担责任。 </span> </em></li> <li><em> <span style="font-weight: 400;"> 解聆结束后,摇铃人与倾听者间解聆关系解除,平台不对倾听者和摇铃人私人间或单方面的协议承担任何形式的责任或义务。</span></em></li> </ol> <p> </p> <p> </p> <p><span style="font-weight: 400;"> 祝您度过美好的一天! </span></p> <p><span style="font-weight: 400;"> 解聆人公益 </span></p> </body> </html> """ html_br_content_fail = """ <html> <body> <p><span style="font-weight: 400;">亲爱的 @X,你好。</span></p> <p><strong>很遗憾的通知您,我们未能在您提供的时间段内为您匹配到合适的倾听者。</strong></p> <p><span style="font-weight: 400;">若您想继续申请,请重新填写一份新的报名表。我们建议您填写三个以上的意向倾听时间,多谢理解。</span></p> <p><br /><br /></p> <p><span style="font-weight: 400;">祝您度过美好的一天!</span></p> <p><span style="font-weight: 400;">解聆人公益</span></p> </body> </html> """ html_l_content = """ <html> <body> <p><span style="font-weight: 400;">亲爱的 @X, 我们已成功为您匹配到摇铃人 @X</span></p> <p> </p> <p><strong>您的解聆时间为:@X</strong></p> <p><span style="color: #800000;"><strong>请您在摇铃时间开始前10分钟向摇铃人通过邮箱发送您的群聊二维码,谢谢</strong></span></p> <p> </p> <p><span style="font-weight: 400;">若有意外情况发生需要取消摇铃,请自行联系替补倾听者,并向小助手汇报,谢谢。</span></p> <p><span style="font-weight: 400;">若摇铃时间开始后25分钟您的摇铃人仍未登陆群聊,此次线上解聆将被取消。若有此情况发生,请将摇铃人的信息发送给小助手,谢谢。</span></p> <p> </p> <p><strong>以下为摇铃人@X的基本信息:</strong></p> <p> </p> <ul> <li><strong><strong>姓名: @X</strong></strong></li> </ul> <ul> <li><strong>邮箱: @X</strong></li> </ul> <ul> <li><strong>解聆话题: @X</strong></li> </ul> <ul> <li><strong>解聆需求: @X</strong></li> </ul> <ul> <li><strong>精神状态: @X</strong></li> </ul> <ul> <li><strong>其他信息: @X</strong></li> </ul> <p> </p> <p><br /><br /></p> <p><span style="font-weight: 400;">祝您度过美好的一天!</span></p> <p><span style="font-weight: 400;">解聆人公益</span></p> </body> </html> """ html_developer_content = """ <p>线上解铃匹配到新的配对:</p> <ul> <li>摇铃人: <ul> <li>姓名: @X</li> <li>提交表格时间:@X</li> </ul> </li> </ul> <ul> <li>倾听者: <ul> <li>姓名:@X</li> </ul> </li> <li>匹配时间:@X</li> </ul> <p>&nbsp;</p> """ html_developer_content_fail = """ <p>摇铃人未匹配到倾听者:</p> <ul> <li>姓名: @X</li> <li>提交表格时间:@X</li> </ul> """
import csv import urllib.parse csv_file = open('ai_index.csv') ai_index = csv.reader(csv_file) urls = [] for ind in ai_index: ind_url = urllib.parse.quote(ind[0]) urls.append([ind[0], 'https://ja.wikipedia.org/wiki/' + ind_url]) f = open('result.csv', 'w') writer = csv.writer(f, lineterminator='\n') writer.writerows(urls) f.close()
# -*- coding: utf-8 -*- from commands.base import bot import os import asyncio async def main_task(token): print("Logging in...") await bot._bot.login(token) print("Logged in, Connecting...") await bot._bot.connect() def main(): loop = asyncio.get_event_loop() token = os.environ.get("TOKEN") if token in [None, ""]: raise RuntimeError("No discord token set in environment") try: loop.run_until_complete(main_task(token)) finally: loop.run_until_complete(asyncio.sleep(1)) loop.stop() if __name__ == "__main__": main()
import cv2 DEFAULT_HEIGHT = 720 DEFAULT_WIDTH = 1280 """ Change image resolution Defaults to 1280 * 720 """ class Image(): def __init__(self, image,height=DEFAULT_HEIGHT,width=DEFAULT_WIDTH): self.image = image self.height = height self.width = width def read(self): img = cv2.imread(self.image, cv2.IMREAD_UNCHANGED) return img def resize(self): img = self.read() new_dim = (self.width, self.height) resized = cv2.resize(img, new_dim, interpolation = cv2.INTER_AREA) return resized
from _typeshed import Incomplete def randomized_partitioning( G, seed: Incomplete | None = None, p: float = 0.5, weight: Incomplete | None = None ): ... def one_exchange( G, initial_cut: Incomplete | None = None, seed: Incomplete | None = None, weight: Incomplete | None = None, ): ...
# MQTT Library Import import paho.mqtt.client as mqtt import paho.mqtt.publish as publish import socket mqttc = mqtt.Client() mqttc.connect("octokong", 1883) mqttc.publish("system/heartbeat", socket.gethostname(), qos=1, retain=True) mqttc.loop(2)
import cutter import subprocess import re import os import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers.experimental.preprocessing import TextVectorization from tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D from PySide2.QtCore import QObject, SIGNAL, QProcess, QThread, Signal, Slot from PySide2.QtWidgets import QAction, QLabel, QPlainTextEdit, QWidget, QVBoxLayout, QProgressBar ##from PyQt5.QtCore import QThread, QObject, pyqtSignal, pyqtSlot import time #sys.path.append('./') import disassembly_lib import pickle_lib class InferenceClass(QThread): resultReady = Signal(str) summaryReady = Signal(str) updateProgressBar = Signal(int) def __init__(self, parent): super().__init__(parent) def set_new_radare2_e(self): ##store values we modify self.asm_syntax = cutter.cmd("e asm.syntax") self.asm_arch = cutter.cmd("e asm.arch") self.asm_xrefs = cutter.cmd("e asm.xrefs") self.asm_bytes = cutter.cmd("e asm.bytes") self.asm_demangle = cutter.cmd("e asm.demangle") self.asm_var_sub = cutter.cmd("e asm.var.sub") self.asm_var = cutter.cmd("e asm.var") self.asm_sub_rel = cutter.cmd("e asm.sub.rel") self.asm_calls = cutter.cmd("e asm.calls") self.asm_comments = cutter.cmd("e asm.comments") self.asm_reloff = cutter.cmd("e asm.reloff") self.scr_color = cutter.cmd("e scr.color") self.asm_noisy = cutter.cmd("e asm.noisy") self.asm_functions = cutter.cmd("e asm.functions") self.asm_sub_section = cutter.cmd("e asm.sub.section") self.asm_filter = cutter.cmd("e asm.filter") ## replace numeric with sym. self.asm_lines = cutter.cmd("e asm.lines") self.asm_meta = cutter.cmd("e asm.meta") ### setup stuff to get gdb-style disassembly cutter.cmd("e asm.syntax=att") cutter.cmd("e asm.arch=x86") cutter.cmd("e asm.bytes=false") cutter.cmd("e asm.demangle=false") cutter.cmd("e asm.var.sub=false") cutter.cmd("e asm.var=false") ##vars in head-part cutter.cmd("e asm.sub.rel=false") cutter.cmd("e asm.calls=false") cutter.cmd("e asm.comments=false") cutter.cmd("e asm.reloff=true") cutter.cmd("e scr.color=3") cutter.cmd("e asm.noisy=false") cutter.cmd("e asm.xrefs=false") ##part in head-part cutter.cmd("e asm.functions=false") ##part in head-part cutter.cmd("e asm.sub.section=false") cutter.cmd("e asm.filter=false") ## replace numeric with sym. cutter.cmd("e asm.lines=false") cutter.cmd("e asm.meta=false") #cutter.cmd("e asm.tabs=false") and other tabs def set_stored_radare2_e(self): cutter.cmd("e asm.syntax=" + self.asm_syntax) cutter.cmd("e asm.arch=" + self.asm_arch) cutter.cmd("e asm.bytes=" + self.asm_bytes) cutter.cmd("e asm.demangle=" + self.asm_demangle) cutter.cmd("e asm.var.sub=" + self.asm_var_sub) cutter.cmd("e asm.var=" + self.asm_var) ##vars in head-part cutter.cmd("e asm.sub.rel=" + self.asm_sub_rel) cutter.cmd("e asm.calls=" + self.asm_calls) cutter.cmd("e asm.comments=" + self.asm_comments) cutter.cmd("e asm.reloff=" + self.asm_reloff) cutter.cmd("e scr.color=" + self.scr_color) cutter.cmd("e asm.noisy=" + self.asm_noisy) cutter.cmd("e asm.xrefs=" + self.asm_xrefs) ##part in head-part cutter.cmd("e asm.functions=" + self.asm_functions) ##part in head-part cutter.cmd("e asm.sub.section=" + self.asm_sub_section) cutter.cmd("e asm.filter=" + self.asm_filter) ## replace numeric with sym. cutter.cmd("e asm.lines=" + self.asm_lines) cutter.cmd("e asm.meta=" + self.asm_meta) def modify_aflj_output(self, aflj_output): aflj_dict = dict() for elem in aflj_output: sign = elem['signature'] if '(' in sign: idx = sign.index('(') sign = sign[:idx] sign = sign.strip() else: print(f'Error modify') if ' ' in sign: idx = sign[::-1].index(' ') sign = sign[len(sign)-idx:] if sign[0] == '*': sign = sign[1:] if sign[0] == '*': sign = sign[1:] #print(f'sign >{sign}<') int_addr = int(elem['offset']) hex_addr = hex(int_addr) aflj_dict[sign] = hex_addr #print(f'modified aflj_dict >{aflj_dict}<') return aflj_dict def get_disassembly_of(self, address): #disasm_callee = cutter.cmdj("pdrj @ $F") if address == 0x0: return '' disassembly = cutter.cmdj("pdrj @ " + str(address)) disassembly_str = '' offset = '' fcn_addr = '' opcode = '' size = '' oldsize = 0 for dis_dict in disassembly: for key in dis_dict: if key == 'offset': offset = dis_dict['offset'] elif key == 'fcn_addr': fcn_addr = dis_dict['fcn_addr'] elif key == 'size': size = dis_dict['size'] elif key == 'opcode': opcode = dis_dict['opcode'] elif key == 'disasm': disasm = dis_dict['disasm'] ## 0x0000000000001394 <+18>: callq 0x1289 <my_function(int, char)> ## 0x00001394 call fcn.00001289 ##{"offset":5012,"esil":"4745,rip,8,rsp,-=,rsp,=[],rip,=","refptr":false,"fcn_addr":4994, ##"fcn_last":5019,"size":5,"opcode":"call 0x1289","disasm":"call fcn.00001289","bytes":"e8f0feffff", ##"family":"cpu","type":"call","reloc":false,"type_num":3,"type2_num":0,"jump":4745,"fail":5017, ##"refs":[{"addr":4745,"type":"CALL"}]} if offset and fcn_addr and opcode and size: #disassembly_str = disassembly_str + f"{offset:#0{18}x}" + ' <+' + str(oldsize) + '>: ' + opcode + '\n' disassembly_str = disassembly_str + opcode + '\n' oldsize += size offset = '' fcn_addr = '' opcode = '' size = '' return disassembly_str def predict(self, model_path, vocab_len, max_seq_len, disas): tf.keras.backend.clear_session() model = tf.keras.models.load_model(model_path) ##summary_str = str(model.to_json()) stringlist = [] model.summary(print_fn=lambda x: stringlist.append(x)) self.model_summary_str = "\n".join(stringlist) vectorize_layer = TextVectorization(standardize=None, max_tokens=vocab_len+2, output_mode='int', output_sequence_length=max_seq_len) export_model = tf.keras.Sequential([vectorize_layer, model, tf.keras.layers.Activation('softmax') ]) example = [disas] ret = export_model.predict(example) #print(f"Prediction: >{ret}<") #print() ##just a newline return ret def get_prediction_summary(self, ret_type_dict, ret): reverse_ret_type_dict = dict() counter = 0 for key in ret_type_dict: reverse_ret_type_dict[counter] = key counter += 1 arg_one_prediction_summary = [] arg_one_prediction_summary.append('\n') for item in ret: result = 0 biggest = 0 biggest_count = 0 counter = 0 for i in item: if i > biggest: biggest = i biggest_count = counter tmp_str = f'Type >{reverse_ret_type_dict[counter] : <{30}}< has a probability of >{i}<\n' #print(tmp_str) arg_one_prediction_summary.append(tmp_str) counter += 1 result += i for ret in ret_type_dict: if ret_type_dict[ret] == biggest_count: #print() #print(f'argument one is of type >{ret}<') arg_one_prediction_summary.append(f'\nBiggest Probability type >{ret}< with prob >{biggest}<\n\n') self.biggest_prob = biggest self.biggest_prob_type = ret arg_one_prediction_summary.append(f'Does last count together to 1 ? Result: >{result}<') arg_one_prediction_summary_str = ''.join(arg_one_prediction_summary) return arg_one_prediction_summary_str def get_prediction(self, model, disasm_caller_callee_str, func_sign_prob_git_path): ### predict now model_path = func_sign_prob_git_path + \ "ubuntu-20-04-scripts/trained_models/" + model + "/saved_model/" ###load vocabulary list vocab_file = func_sign_prob_git_path + \ "ubuntu-20-04-scripts/trained_models/" + model + "/" + \ 'vocabulary_list.pickle' vocabulary = pickle_lib.get_pickle_file_content(vocab_file) ###load max-sequence-length max_seq_len_file = func_sign_prob_git_path + \ "ubuntu-20-04-scripts/trained_models/" + model + "/" + \ 'max_seq_length.pickle' max_seq_length = pickle_lib.get_pickle_file_content(max_seq_len_file) ret = self.predict(model_path, len(vocabulary), max_seq_length, disasm_caller_callee_str) ## get strings for ints, with ret_type_dict ret_type_dict_file = func_sign_prob_git_path + \ "ubuntu-20-04-scripts/trained_models/" + model + "/" + \ 'return_type_dict.pickle' ret_type_dict = pickle_lib.get_pickle_file_content(ret_type_dict_file) ### get human-readable output prediction_summary_str = self.get_prediction_summary(ret_type_dict, ret) ## store for later # nr_of_args_model_summary_str = self.model_summary_str # self._disasTextEdit.setPlainText(f"tf model summary:\n{self.model_summary_str}\n \ # {nr_of_args_model_summary_str}") return prediction_summary_str #@Slot() def oldrun(self): print('runInference---test') curr_pos = cutter.cmd('s') self.resultReady.emit() ##@Slot() def run(self): self.updateProgressBar.emit(1) curr_pos = cutter.cmd('s') if curr_pos.strip() == '0x0': print('runInference not from addr 0x0') return self.set_new_radare2_e() ### get name of current function current_func_name = cutter.cmdj("afdj $F").get('name') #print(f'current_func_name >{current_func_name}<') ## find data/code references to this address with $F current_func_header = cutter.cmdj("axtj $F") ## get addr of callee caller_addr = 0 for item_dicts in current_func_header: print(f'item_dicts >{item_dicts}<') for elem in item_dicts: if elem == 'from': caller_addr = item_dicts[elem] #print(f'address of caller >{item_dicts[elem]}<') ## get disassembly of current/callee function address = cutter.cmd('s').strip() #print(f'address >{address}<') if address == '0x0': self.updateProgressBar.emit(100) self.resultReady.emit('') self.summaryReady.emit('Current address is 0x0, choose other function') #print(f'address kicked >{address}<') return disasm_callee_str = self.get_disassembly_of(address) #print(f'disasm_callee_str >{disasm_callee_str}<') ### get disassembly of caller function disasm_caller_str = self.get_disassembly_of(caller_addr) #print(f'disasm_caller_str >{disasm_caller_str}<') ### split disas for the tf-model disasm_caller_str = disassembly_lib.split_disassembly(disasm_caller_str) disasm_callee_str = disassembly_lib.split_disassembly(disasm_callee_str) ##check if we got caller and callee disassembly if (len(disasm_caller_str) == 0) or (len(disasm_callee_str) == 0): self.updateProgressBar.emit(100) self.resultReady.emit('') self.summaryReady.emit('No caller disassembly found, choose other function') #print(f'Not found callee and caller disassembly.') return ### the path were we cloned git repo to self._userHomePath = os.path.expanduser('~') #print(f'userHomePath >{self._userHomePath}<') func_sign_prob_git_path = self._userHomePath + "/git/func_sign_prob/" self.updateProgressBar.emit(9) callee_addr_split = [char if char != '0' else 'null' for char in address] callee_addr = ' '.join(callee_addr_split) disasm_caller_callee_predict_str = disasm_caller_str + ' caller_callee_separator ' + callee_addr + ' ' + disasm_callee_str ### predict now ret-type ret_type_prediction_summary_str = self.get_prediction('return_type/words_100000', disasm_caller_callee_predict_str, func_sign_prob_git_path) ## store for later, will be overridden ret_type_model_summary_str = self.model_summary_str ret_type_biggest_prob = self.biggest_prob ret_type_biggest_prob_type = self.biggest_prob_type ret_type_biggest_prob_percent = 100 * ret_type_biggest_prob self.updateProgressBar.emit(10) ### predict now nr_of_args nr_of_args_prediction_summary_str = self.get_prediction('nr_of_args/words_100000', disasm_caller_callee_predict_str, func_sign_prob_git_path) ## store for later, will be overridden nr_of_args_model_summary_str = self.model_summary_str nr_of_args_biggest_prob = self.biggest_prob nr_of_args_biggest_prob_type = self.biggest_prob_type self.updateProgressBar.emit(20) ###predict now arg_one arg_one_prediction_summary_str = self.get_prediction('arg_one', disasm_caller_callee_predict_str, func_sign_prob_git_path) ## store for later, will be overridden arg_one_model_summary_str = self.model_summary_str arg_one_biggest_prob = self.biggest_prob arg_one_biggest_prob_type = self.biggest_prob_type arg_one_biggest_prob_percent = 100 * arg_one_biggest_prob #nr_of_args_biggest_prob_type = 1 if nr_of_args_biggest_prob_type == 1: self.set_stored_radare2_e() self.updateProgressBar.emit(100) self.summaryReady.emit(f"tf return type model summary:\n \ {ret_type_model_summary_str}\n \ {ret_type_prediction_summary_str}\n \ tf nr_of_args model summary:\n \ {nr_of_args_model_summary_str}\n \ {nr_of_args_prediction_summary_str}\n \ tf arg_one model summary:\n \ {self.model_summary_str}\n \ {arg_one_prediction_summary_str}") self.resultReady.emit(f'{ret_type_biggest_prob_type} \ <span style=\"background-color:red;\">({ret_type_biggest_prob_percent:3.1f}%)</span> \ {current_func_name} ( \ {arg_one_biggest_prob_type} \ <span style=\"background-color:red;\">({arg_one_biggest_prob_percent:3.1f}%)</span> \ )') return else: self.updateProgressBar.emit(30) ###if more one args ###predict now arg_two arg_two_prediction_summary_str = self.get_prediction('arg_two', disasm_caller_callee_predict_str, func_sign_prob_git_path) ## store for later, will be overridden arg_two_model_summary_str = self.model_summary_str arg_two_biggest_prob = self.biggest_prob arg_two_biggest_prob_type = self.biggest_prob_type arg_two_biggest_prob_percent = 100 * arg_two_biggest_prob #nr_of_args_biggest_prob_type = 2 if nr_of_args_biggest_prob_type == 2: self.updateProgressBar.emit(100) self.summaryReady.emit(f"tf return type model summary:\n \ {ret_type_model_summary_str}\n \ {ret_type_prediction_summary_str}\n \ tf nr_of_args model summary:\n \ {nr_of_args_model_summary_str}\n \ {nr_of_args_prediction_summary_str}\n \ tf arg_one model summary:\n \ {arg_one_model_summary_str}\n \ {arg_one_prediction_summary_str}\n \ tf arg_two model summary:\n \ {arg_two_model_summary_str}\n \ {arg_two_prediction_summary_str}") self.resultReady.emit(f'{ret_type_biggest_prob_type} \ <span style=\"background-color:red;\">({ret_type_biggest_prob_percent:3.1f}%)</span> \ {current_func_name} ( \ {arg_one_biggest_prob_type} \ <span style=\"background-color:red;\">({arg_one_biggest_prob_percent:3.1f}%)</span> , \ {arg_two_biggest_prob_type} \ <span style=\"background-color:red;\">({arg_two_biggest_prob_percent:3.1f}%)</span> \ )') self.set_stored_radare2_e() return else: self.updateProgressBar.emit(40) ###if more than two args ###predict now arg_three arg_three_prediction_summary_str = self.get_prediction('arg_three', disasm_caller_callee_predict_str, func_sign_prob_git_path) ## store for later, will be overridden arg_three_model_summary_str = self.model_summary_str arg_three_biggest_prob = self.biggest_prob arg_three_biggest_prob_type = self.biggest_prob_type arg_three_biggest_prob_percent = 100 * arg_three_biggest_prob #nr_of_args_biggest_prob_type = 3 if nr_of_args_biggest_prob_type >= 3: #hack, if more args, need more models self.updateProgressBar.emit(100) self.summaryReady.emit(f"tf return type model summary:\n \ {ret_type_model_summary_str}\n \ {ret_type_prediction_summary_str}\n \ tf nr_of_args model summary:\n \ {nr_of_args_model_summary_str}\n \ {nr_of_args_prediction_summary_str}\n \ tf arg_one model summary:\n \ {arg_one_model_summary_str}\n \ {arg_one_prediction_summary_str}\n \ tf arg_two model summary:\n \ {arg_two_model_summary_str}\n \ {arg_two_prediction_summary_str}\n \ tf arg_three model summary:\n \ {arg_three_model_summary_str}\n \ {arg_three_prediction_summary_str}") self.resultReady.emit(f'{ret_type_biggest_prob_type} \ <span style=\"background-color:red;\">({ret_type_biggest_prob_percent:3.1f}%)</span> \ {current_func_name} ( \ {arg_one_biggest_prob_type} \ <span style=\"background-color:red;\">({arg_one_biggest_prob_percent:3.1f}%)</span> , \ {arg_two_biggest_prob_type} \ <span style=\"background-color:red;\">({arg_two_biggest_prob_percent:3.1f}%)</span> , \ {arg_three_biggest_prob_type} \ <span style=\"background-color:red;\">({arg_three_biggest_prob_percent:3.1f}%)</span> \ )') self.set_stored_radare2_e() return else: self.updateProgressBar.emit(50) #for debug print('over') self.set_stored_radare2_e() # /* ... here is the expensive or blocking operation ... */ self.resultReady.emit('') class FuncSignProbDockWidget(cutter.CutterDockWidget): # inferenceClass = InferenceClass() # inferenceThread = QThread() startInferenceSignal = Signal() # counter = 0 def __init__(self, parent, action): super(FuncSignProbDockWidget, self).__init__(parent, action) self.setObjectName("func_sign_probDockWidget") self.setWindowTitle("func_sign_prob DockWidget") self._multiWidget = QWidget() self._layout = QVBoxLayout() self._funcSignLabel = QLabel(self) self._progressBar = QProgressBar(self) self._disasTextEdit = QPlainTextEdit(self) self._layout.addWidget(self._funcSignLabel) self._layout.addWidget(self._progressBar) self._layout.addWidget(self._disasTextEdit) self._multiWidget.setLayout(self._layout); self.setWidget(self._multiWidget); self._userHomePath = os.path.expanduser('~') #print(f'user-home-path >{self._userHomePath}<') cutter.core().seekChanged.connect(self.update_contents) self.inferenceClass = InferenceClass(parent) self.inferenceClass.resultReady.connect(self.showInferenceResult) self.inferenceClass.summaryReady.connect(self.showInferenceResultSummary) self.inferenceClass.updateProgressBar.connect(self.updateProgressBar) @Slot() def updateProgressBar(self, value): self._progressBar.setValue(int(value)) @Slot() def showInferenceResultSummary(self, result): self._disasTextEdit.setPlainText(result) @Slot() def showInferenceResult(self, result): self._funcSignLabel.setText(result) def update_contents(self): self._funcSignLabel.setText('Running Inference, Please wait ...could take some time') self._disasTextEdit.setPlainText('') self._progressBar.setValue(0) self.inferenceClass.start() class FuncSignProbCutterPlugin(cutter.CutterPlugin): name = "func_sign_prob plugin" description = "func_sign_prob plugin" version = "0.1" author = "flo" def setupPlugin(self): pass def setupInterface(self, main): action = QAction("func_sign_prob Plugin", main) action.setCheckable(True) widget = FuncSignProbDockWidget(main, action) main.addPluginDockWidget(widget, action) def terminate(self): pass # def create_cutter_plugin(): # return FuncSignProbCutterPlugin()
class DetectSquares: def __init__(self): self.points = defaultdict(int) def add(self, point: List[int]) -> None: self.points[ (point[0], point[1]) ] += 1 def count(self, point: List[int]) -> int: res = 0 x, y = point for p, v in self.points.items(): if p[0] == x or p[1] == y: continue if abs(p[0] - x) != abs(p[1] - y): continue p0, p1 = (p[0], y), (x, p[1]) # get如果没有会返回noneType而无法参与整数运算,此时可以再接一个参数表示没有情况下的默认值 res += self.points.get(p0, 0) * self.points.get(p1, 0) * v return res # Your DetectSquares object will be instantiated and called as such: # obj = DetectSquares() # obj.add(point) # param_2 = obj.count(point)
THUMBNAILS = 'thumbnails' SCALE_WIDTH = 'w' SCALE_HEIGHT = 'h' from django.template import Library from django.conf import settings from PIL import Image import os register = Library() def scale(max_x, pair): x, y = pair new_y = (float(max_x) / x) * y return (int(max_x), int(new_y)) # Thumbnail filter based on code from http://batiste.dosimple.ch/blog/2007-05-13-1/ @register.filter def thumbnail(original_image_path, arg): if not original_image_path: return '' if arg.find(','): size, upload_path = [a.strip() for a in arg.split(',')] else: size = arg upload_path = '' if (size.lower().endswith('h')): mode = 'h' else: mode = 'w' # defining the size size = size[:-1] max_size = int(size.strip()) # defining the filename and the miniature filename basename, format = original_image_path.rsplit('.', 1) basename, name = basename.rsplit(os.path.sep, 1) miniature = name + '_' + str(max_size) + mode + '.' + format thumbnail_path = os.path.join(basename, THUMBNAILS) if not os.path.exists(thumbnail_path): os.mkdir(thumbnail_path) miniature_filename = os.path.join(thumbnail_path, miniature) miniature_url = os.path.join(settings.MEDIA_URL, upload_path, THUMBNAILS, miniature) # if the image wasn't already resized, resize it if not os.path.exists(miniature_filename) \ or os.path.getmtime(original_image_path) > os.path.getmtime(miniature_filename): image = Image.open(original_image_path) image_x, image_y = image.size if mode == SCALE_HEIGHT: image_y, image_x = scale(max_size, (image_y, image_x)) else: image_x, image_y = scale(max_size, (image_x, image_y)) image = image.convert("RGB").resize((image_x, image_y), Image.ANTIALIAS) image.save(miniature_filename, image.format) return miniature_url
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # import psutil import sys import signal from threading import Thread from queue import Queue from time import sleep # Helper classes and functions # from sensor_util import SensorData from ps_sensors import AllPsSensorClasses workers = [] def signal_handler(signal, frame): global workers print("") print("You pressed Ctrl + C, force all running threads to exit") for w in workers: w.stop() print("All threads are shutdown gracefully...") class SensorWorker(Thread): def __init__(self, sensor, output_queue): super().__init__() self._sensor = sensor self._output = output_queue self._continue = True def stop(self): self._continue = False def perform(self): try: self._output.put(self._sensor.data) except Exception as e: pass def run(self): t = self._sensor.preferred_period x = int(t / 10) gap = 1 if x <= 0 else x gap = 3 if gap > 3 else gap x = t while self._continue: if x >= t: self.perform() x = 0 else: sleep(gap) x = x + gap class ConsoleConsumer(Thread): def __init__(self, std, data_queue, serializer): super().__init__() self._serializer = serializer self._data_queue = data_queue self._std = std self._continue = True self._end_obj = object() def stop(self): self._continue = False self._data_queue.put(self._end_obj) def run(self): while self._continue: data_list = self._data_queue.get() if data_list == self._end_obj: break for x in data_list: self._std.write(self._serializer(x)) def main(): # Register SIG-INT and C-c events signal.signal(signal.SIGINT, signal_handler) def sensor_web_csv_serializer(sensor_data): return "DAT: %s\n" % (sensor_data.to_line('\t')) def debug_serializer(sensor_data): s = sensor_data return "%s/%s/%s/%s\t%s%s\n" % ( s.board_type, s.board_id, s.sensor, s.data_type, s.value, "" if s.unit_length is None else s.unit_length ) q = Queue() # Producers for c in AllPsSensorClasses: s = c() w = SensorWorker(s, q) w.start() workers.append(w) # Consumer w = ConsoleConsumer(sys.stdout, q, sensor_web_csv_serializer) # w = ConsoleConsumer(sys.stdout, q, debug_serializer) w.start() workers.append(w) for w in workers: w.join() sys.exit(0) # Entry point # if __name__ == '__main__': main()
#Import library and create instance of REST client. from Adafruit_IO import * from Adafruit_IO import Client # importing AIO key and username aio = Client('Scientist1995', 'd199c5a8f669476c920ea5323a020e8c') # Add the value 98.6 to the feed 'Temperature'. soumil = aio.feeds('soumil') aio.send_data(soumil.key, 98.6) # Receiving Data from server data = aio.receive('soumil') print('Received value: {0}'.format(data.value)) # Get list of feeds. feeds = aio.feeds() # Print out the feed names: for f in feeds: print('Feed: {0}'.format(f.name)) """ # Create Feed object with name 'Foo'. feed = Feed(name='Foo') # Send the Feed to IO to create. # The returned object will contain all the details about the created feed. result = aio.create_feed(feed) # Delete the feed with name 'Test'. aio.delete_feed('Test') """
PINK = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' # underlined [a-z] u_lalphas = [UNDERLINE + chr(i) + ENDC for i in range(97, 97+26)] # underlined [A-Z] u_ualphas = [UNDERLINE + chr(i) + ENDC for i in range(65, 65+26)] def errmsg(s): return FAIL + s + ENDC def cyan(s): return OKCYAN + s + ENDC def blue(s): return OKBLUE + s + ENDC def pink(s): return PINK + s + ENDC def bold(s): return BOLD + s + ENDC def warning(s): return WARNING + s + ENDC def green(s): return OKGREEN + s + ENDC
# # Copyright © 2021 Uncharted Software Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from os import path import csv import sys from d3m import container from d3m.metadata import base as metadata_base from distil.primitives.time_series_formatter import TimeSeriesFormatterPrimitive from distil.primitives import utils import utils as test_utils class TimeSeriesFormatterPrimitiveTestCase(unittest.TestCase): _dataset_path = path.abspath( path.join(path.dirname(__file__), "timeseries_resource_dataset") ) _dataset_2_path = path.abspath( path.join(path.dirname(__file__), "timeseries_resource_dataset_2") ) _resource_id = "learningData" def test_basic(self) -> None: dataset = test_utils.load_dataset(self._dataset_path) # create the time series dataset hyperparams_class = TimeSeriesFormatterPrimitive.metadata.query()[ "primitive_code" ]["class_type_arguments"]["Hyperparams"] ts_formatter = TimeSeriesFormatterPrimitive( hyperparams=hyperparams_class.defaults() ) timeseries_dataset = ts_formatter.produce(inputs=dataset).value timeseries_df = timeseries_dataset[self._resource_id] # verify that ID and digest is present - runtime will fail without when it tries to execute the sub-pipeline root_metadata = timeseries_dataset.metadata.query(()) self.assertIn("id", root_metadata) self.assertIn("digest", root_metadata) # verify that we have the expected shape self.assertEqual(timeseries_df.shape[0], 664) self.assertEqual(timeseries_df.shape[1], 5) # check that learning metadata was copied name = timeseries_dataset.metadata.query_column_field( 0, "name", at=(self._resource_id,) ) self.assertEqual("d3mIndex", name) name = timeseries_dataset.metadata.query_column_field( 1, "name", at=(self._resource_id,) ) self.assertEqual("timeseries_file", name) name = timeseries_dataset.metadata.query_column_field( 2, "name", at=(self._resource_id,) ) self.assertEqual("label", name) name = timeseries_dataset.metadata.query_column_field( 3, "name", at=(self._resource_id,) ) self.assertEqual("time", name) name = timeseries_dataset.metadata.query_column_field( 4, "name", at=(self._resource_id,) ) self.assertEqual("value", name) # verify that the d3mIndex is now marked as a multi key self.assertIn( "https://metadata.datadrivendiscovery.org/types/PrimaryMultiKey", timeseries_dataset.metadata.query_column_field( 0, "semantic_types", at=(self._resource_id,) ), ) self.assertIn( "http://schema.org/Integer", timeseries_dataset.metadata.query_column_field( 0, "semantic_types", at=(self._resource_id,) ), ) # verify that the grouping key was added self.assertIn( "https://metadata.datadrivendiscovery.org/types/GroupingKey", timeseries_dataset.metadata.query_column_field( 1, "semantic_types", at=(self._resource_id,) ), ) self.assertIn( "https://metadata.datadrivendiscovery.org/types/Attribute", timeseries_dataset.metadata.query_column_field( 1, "semantic_types", at=(self._resource_id,) ), ) self.assertIn( "http://schema.org/Text", timeseries_dataset.metadata.query_column_field( 1, "semantic_types", at=(self._resource_id,) ), ) # verify that the label column is of type unknown self.assertIn( "https://metadata.datadrivendiscovery.org/types/UnknownType", timeseries_dataset.metadata.query_column_field( 2, "semantic_types", at=(self._resource_id,) ), ) # verify that data columns have correct semantic types self.assertIn( "https://metadata.datadrivendiscovery.org/types/Time", timeseries_dataset.metadata.query_column_field( 3, "semantic_types", at=(self._resource_id,) ), ) self.assertIn( "http://schema.org/Integer", timeseries_dataset.metadata.query_column_field( 3, "semantic_types", at=(self._resource_id,) ), ) # verify that data columns have correct semantic types self.assertIn( "http://schema.org/Float", timeseries_dataset.metadata.query_column_field( 4, "semantic_types", at=(self._resource_id,) ), ) self.assertIn( "https://metadata.datadrivendiscovery.org/types/Attribute", timeseries_dataset.metadata.query_column_field( 4, "semantic_types", at=(self._resource_id,) ), ) def test_hyperparams(self) -> None: dataset = test_utils.load_dataset(self._dataset_path) # create the time series dataset hyperparams_class = TimeSeriesFormatterPrimitive.metadata.query()[ "primitive_code" ]["class_type_arguments"]["Hyperparams"] ts_formatter = TimeSeriesFormatterPrimitive( hyperparams=hyperparams_class.defaults().replace( {"main_resource_id": "learningData", "file_col_index": 1} ) ) timeseries_df = ts_formatter.produce(inputs=dataset).value[self._resource_id] # verify that we have the expected shape self.assertEqual(timeseries_df.shape[0], 664) self.assertEqual(timeseries_df.shape[1], 5) if __name__ == "__main__": unittest.main()
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from textwrap import dedent import pytest from pants.backend.python.goals import lockfile from pants.backend.python.macros import python_requirements from pants.backend.python.macros.python_requirements import PythonRequirementsTargetGenerator from pants.backend.python.target_types import PythonRequirementTarget from pants.core.target_types import TargetGeneratorSourcesHelperTarget from pants.engine.addresses import Address from pants.engine.internals.graph import _TargetParametrizations, _TargetParametrizationsRequest from pants.engine.target import Target from pants.testutil.rule_runner import QueryRule, RuleRunner, engine_error @pytest.fixture def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *lockfile.rules(), *python_requirements.rules(), QueryRule(_TargetParametrizations, [_TargetParametrizationsRequest]), ], target_types=[PythonRequirementsTargetGenerator], ) def assert_python_requirements( rule_runner: RuleRunner, build_file_entry: str, requirements_txt: str, *, expected_targets: set[Target], requirements_txt_relpath: str = "requirements.txt", ) -> None: rule_runner.write_files({"BUILD": build_file_entry, requirements_txt_relpath: requirements_txt}) result = rule_runner.request( _TargetParametrizations, [ _TargetParametrizationsRequest( Address("", target_name="reqs"), description_of_origin="tests" ) ], ) assert set(result.parametrizations.values()) == expected_targets def test_requirements_txt(rule_runner: RuleRunner) -> None: """This tests that we correctly create a new python_requirement for each entry in a requirements.txt file, where each dependency is unique. Some edge cases: * We ignore comments and options (values that start with `--`). * module_mapping works regardless of capitalization. * Projects get normalized thanks to Requirement.parse(). * Overrides works, including for dependencies. """ file_addr = Address("", target_name="reqs", relative_file_path="requirements.txt") assert_python_requirements( rule_runner, dedent( """\ python_requirements( name='reqs', module_mapping={'ansiCOLORS': ['colors']}, type_stubs_module_mapping={'Django-types': ['django']}, overrides={ "ansicolors": {"tags": ["overridden"]}, "Django": {"dependencies": ["#Django-types"]}, }, ) """ ), dedent( """\ # Comment. --find-links=https://duckduckgo.com ansicolors>=1.18.0 Django==3.2 ; python_version>'3' Django-types Un-Normalized-PROJECT # Inline comment. pip@ git+https://github.com/pypa/pip.git """ ), expected_targets={ PythonRequirementTarget( { "requirements": ["ansicolors>=1.18.0"], "modules": ["colors"], "dependencies": [file_addr.spec], "tags": ["overridden"], }, Address("", target_name="reqs", generated_name="ansicolors"), ), PythonRequirementTarget( { "requirements": ["Django==3.2 ; python_version>'3'"], "dependencies": ["#Django-types", file_addr.spec], }, Address("", target_name="reqs", generated_name="Django"), ), PythonRequirementTarget( { "requirements": ["Django-types"], "type_stub_modules": ["django"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="Django-types"), ), PythonRequirementTarget( {"requirements": ["Un_Normalized_PROJECT"], "dependencies": [file_addr.spec]}, Address("", target_name="reqs", generated_name="Un-Normalized-PROJECT"), ), PythonRequirementTarget( { "requirements": ["pip@ git+https://github.com/pypa/pip.git"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="pip"), ), TargetGeneratorSourcesHelperTarget({"source": "requirements.txt"}, file_addr), }, ) def test_multiple_versions(rule_runner: RuleRunner) -> None: """This tests that we correctly create a new python_requirement for each unique dependency name in a requirements.txt file, grouping duplicated dependency names to handle multiple requirement strings per PEP 508.""" file_addr = Address("", target_name="reqs", relative_file_path="requirements.txt") assert_python_requirements( rule_runner, "python_requirements(name='reqs')", dedent( """\ Django>=3.2 Django==3.2.7 confusedmonkey==86 repletewateringcan>=7 """ ), expected_targets={ PythonRequirementTarget( { "requirements": ["Django>=3.2", "Django==3.2.7"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="Django"), ), PythonRequirementTarget( {"requirements": ["confusedmonkey==86"], "dependencies": [file_addr.spec]}, Address("", target_name="reqs", generated_name="confusedmonkey"), ), PythonRequirementTarget( {"requirements": ["repletewateringcan>=7"], "dependencies": [file_addr.spec]}, Address("", target_name="reqs", generated_name="repletewateringcan"), ), TargetGeneratorSourcesHelperTarget({"source": "requirements.txt"}, file_addr), }, ) def test_invalid_req(rule_runner: RuleRunner) -> None: """Test that we give a nice error message.""" with engine_error( contains="Invalid requirement 'Not A Valid Req == 3.7' in requirements.txt at line 3" ): assert_python_requirements( rule_runner, "python_requirements(name='reqs')", "\n\nNot A Valid Req == 3.7", expected_targets=set(), ) def test_source_override(rule_runner: RuleRunner) -> None: file_addr = Address("", target_name="reqs", relative_file_path="subdir/requirements.txt") assert_python_requirements( rule_runner, "python_requirements(name='reqs', source='subdir/requirements.txt')", "ansicolors>=1.18.0", requirements_txt_relpath="subdir/requirements.txt", expected_targets={ PythonRequirementTarget( {"requirements": ["ansicolors>=1.18.0"], "dependencies": [file_addr.spec]}, Address("", target_name="reqs", generated_name="ansicolors"), ), TargetGeneratorSourcesHelperTarget({"source": "subdir/requirements.txt"}, file_addr), }, ) def test_lockfile_dependency(rule_runner: RuleRunner) -> None: rule_runner.set_options(["--python-enable-resolves"]) reqs_addr = Address("", target_name="reqs", relative_file_path="requirements.txt") lock_addr = Address( "3rdparty/python", target_name="_python-default_lockfile", relative_file_path="default.lock" ) assert_python_requirements( rule_runner, "python_requirements(name='reqs')", "ansicolors>=1.18.0", expected_targets={ PythonRequirementTarget( { "requirements": ["ansicolors>=1.18.0"], "dependencies": [reqs_addr.spec, lock_addr.spec], }, Address("", target_name="reqs", generated_name="ansicolors"), ), TargetGeneratorSourcesHelperTarget({"source": reqs_addr.filename}, reqs_addr), }, ) def test_pyproject_toml(rule_runner: RuleRunner) -> None: """This tests that we correctly create a new python_requirement for each entry in a pyproject.toml file, where each dependency is unique. Some edge cases: * We ignore comments and options (values that start with `--`). * module_mapping works regardless of capitalization. * Projects get normalized thanks to Requirement.parse(). * Overrides works, including for dependencies and optional-dependencies """ file_addr = Address("", target_name="reqs", relative_file_path="pyproject.toml") assert_python_requirements( rule_runner, dedent( """\ python_requirements( name='reqs', module_mapping={'ansiCOLORS': ['colors']}, type_stubs_module_mapping={'Django-types': ['django']}, overrides={ "ansicolors": {"tags": ["overridden"]}, "Django": {"dependencies": ["#Django-types"]}, "notebook": {"tags": ["another-tag"]}, }, source='pyproject.toml', ) """ ), dedent( """\ [project] dependencies = [ # Comment. "ansicolors>=1.18.0", "Django==3.2 ; python_version>'3'", "Django-types", "Un-Normalized-PROJECT", # Inline comment. "pip@ git+https://github.com/pypa/pip.git", ] [project.optional-dependencies] test = [ "pytest>=5.7.0", ] jupyter = [ "notebook>=6.1.0", ] """ ), expected_targets={ PythonRequirementTarget( { "requirements": ["ansicolors>=1.18.0"], "modules": ["colors"], "dependencies": [file_addr.spec], "tags": ["overridden"], }, Address("", target_name="reqs", generated_name="ansicolors"), ), PythonRequirementTarget( { "requirements": ["Django==3.2 ; python_version>'3'"], "dependencies": ["#Django-types", file_addr.spec], }, Address("", target_name="reqs", generated_name="Django"), ), PythonRequirementTarget( { "requirements": ["Django-types"], "type_stub_modules": ["django"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="Django-types"), ), PythonRequirementTarget( {"requirements": ["Un_Normalized_PROJECT"], "dependencies": [file_addr.spec]}, Address("", target_name="reqs", generated_name="Un-Normalized-PROJECT"), ), PythonRequirementTarget( { "requirements": ["pip@ git+https://github.com/pypa/pip.git"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="pip"), ), PythonRequirementTarget( { "requirements": ["pytest>=5.7.0"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="pytest"), ), PythonRequirementTarget( { "requirements": ["notebook>=6.1.0"], "dependencies": [file_addr.spec], "tags": ["another-tag"], }, Address("", target_name="reqs", generated_name="notebook"), ), TargetGeneratorSourcesHelperTarget({"source": "pyproject.toml"}, file_addr), }, requirements_txt_relpath="pyproject.toml", ) def test_multiple_versions_pyproject_toml(rule_runner: RuleRunner) -> None: """This tests that we correctly create a new python_requirement for each unique dependency name in a pyproject.toml file, grouping duplicated dependency names to handle multiple requirement strings per PEP 508.""" file_addr = Address("", target_name="reqs", relative_file_path="pyproject.toml") assert_python_requirements( rule_runner, "python_requirements(name='reqs', source='pyproject.toml')", dedent( """\ [project] dependencies = [ "Django>=3.2", "Django==3.2.7", "confusedmonkey==86", "repletewateringcan>=7", ] """ ), expected_targets={ PythonRequirementTarget( { "requirements": ["Django>=3.2", "Django==3.2.7"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="Django"), ), PythonRequirementTarget( { "requirements": ["confusedmonkey==86"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="confusedmonkey"), ), PythonRequirementTarget( { "requirements": ["repletewateringcan>=7"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="repletewateringcan"), ), TargetGeneratorSourcesHelperTarget({"source": "pyproject.toml"}, file_addr), }, requirements_txt_relpath="pyproject.toml", ) def test_invalid_req_pyproject_toml(rule_runner: RuleRunner) -> None: """Test that we give a nice error message.""" with engine_error(contains="Invalid requirement 'Not A Valid Req == 3.7' in pyproject.toml"): assert_python_requirements( rule_runner, "python_requirements(name='reqs', source='pyproject.toml')", """[project]\ndependencies = ["Not A Valid Req == 3.7"]""", expected_targets=set(), requirements_txt_relpath="pyproject.toml", ) def test_source_override_pyproject_toml(rule_runner: RuleRunner) -> None: file_addr = Address("", target_name="reqs", relative_file_path="subdir/pyproject.toml") assert_python_requirements( rule_runner, "python_requirements(name='reqs', source='subdir/pyproject.toml')", "[project]\ndependencies = ['ansicolors>=1.18.0']", expected_targets={ PythonRequirementTarget( { "requirements": ["ansicolors>=1.18.0"], "dependencies": [file_addr.spec], }, Address("", target_name="reqs", generated_name="ansicolors"), ), TargetGeneratorSourcesHelperTarget({"source": "subdir/pyproject.toml"}, file_addr), }, requirements_txt_relpath="subdir/pyproject.toml", )
# Generated by Django 2.0.2 on 2019-03-08 13:03 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Collection', fields=[ ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')), ('is_delete', models.BooleanField(default=False, verbose_name='删除标记')), ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='收藏ID')), ('is_valid', models.CharField(choices=[('WX', '无效'), ('YX', '有效')], max_length=2, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏', 'verbose_name_plural': '收藏', }, ), ]
from django.shortcuts import render # Create your views here. def view1(request): myname = "Abhishek CM" favplayer = "ABD" favanimal = "lion" favsubject = "Python" d = {'name' : myname,'player' : favplayer,'animal' : favanimal,'subject' : favsubject} return render(request,'staticApp1/1.html',d)
#!/usr/bin/env python from main.page.desktop_v3.inbox_review.pe_inbox_review import * from main.page.desktop_v3.header import * class inboxReviewActivity(): def setObject(self, driver): self.inbox_review_page = InboxReviewPage(driver) def goto_inbox_review(self, site): self.inbox_review_page.open(site) #inbox_review_page.select_tab_all() # inbox_review_page.select_tab_my_product() # inbox_review_page.click_next_page() # inbox_review_page.select_filter_unread() def check_all_review(self): self.inbox_review_page.check_review_exists() def check_paging(self): self.inbox_review_page.check_paging_exists() def change_filter_all(self): self.inbox_review_page.select_filter_all() def change_filter_unread(self): self.inbox_review_page.select_filter_unread() def check_counter_review(self): self.inbox_review_page.check_counter_notification() def get_my_product(self): x = self.inbox_review_page.get_product_element() return x def get_my_shop(self): y = self.inbox_review_page.get_shop_element() return y def insert_review(self, product_name="", shop_name=""): target_review_product = self.inbox_review_page.get_selected_review(product_name, shop_name) self.inbox_review_page.input_review_for_last_transaction(target_review_product) def skip_review(self, product_name="", shop_name=""): target_review_product = self.inbox_review_page.get_selected_review(product_name,shop_name) flag_review = self.inbox_review_page.skip_review_for_latest_transaction(target_review_product) if flag_review == 1: self.inbox_review_page.close_skip_review_dialog() time.sleep(2) self.inbox_review_page.skip_review_for_latest_transaction(target_review_product) self.inbox_review_page.cancel_skip_review() time.sleep(2) self.inbox_review_page.skip_review_for_latest_transaction(target_review_product) self.inbox_review_page.confirm_skip_review() time.sleep(2) else: print ("Cannot Skip review because there is no Skip Review button! Continue...")
from django.core.exceptions import ObjectDoesNotExist from django.shortcuts import render import logging from stories.models import Story, Comment logger = logging.getLogger(__name__) def get_comment_object(request, comment_id): """ returns comment object :param request: :param story_id: :param comment_id: :return: """ try: comment = Comment.objects.get(id=comment_id) return comment except ObjectDoesNotExist: logger.error("Comment does not exist") return render(request, 'stories/error.html') def get_story_object(request, story_id): """ returns story object :param request: :param story_id: :return: """ try: story = Story.objects.get(id=story_id) return story except Story.DoesNotExist: logger.error("Story does not exist") return render(request, 'stories/error.html')
import enum import json import re import subprocess import sys import dateutil.parser class GermanParserInfo(dateutil.parser.parserinfo): MONTHS = [ ("Jan", "Januar", "Jänner"), ("Feb", "Februar"), ("Mär", "Mrz", "März"), ("Apr", "April"), ("Mai",), ("Jun", "Juni"), ("Jul", "Juli"), ("Aug", "August"), ("Sep", "Sept", "September"), ("Okt", "Oktober"), ("Nov", "November"), ("Dez", "Dezember"), ] class State(enum.Enum): PREAMBLE = 1 STUDIUM_TYPE = 2 STUDIUM_NAME = 3 STUDIUM_KENNZAHL = 4 BESCHLUSS_DATUM = 5 GUELTIG_DATUM = 6 INHALTSVERZEICHNIS = 7 PRUEFUNGSFACH_MODUL_LVA = 12 PRUEFUNGSFAECHER = 13 PRUEFUNGSFACH_NAME = 14 PRUEFUNGSFACH_MODUL = 15 KURZBESCHREIBUNG_MODULE = 16 MODULBESCHREIBUNGEN = 17 MODUL_NAME = 18 MODUL_REGELARBEITSAUFWAND = 19 MODUL_LERNERGEBNISSE = 20 MODUL_LVAS = 21 LEHRVERANSTALTUNGSTYPEN = 23 SEMESTEREINTEILUNG = 24 SEMESTEREINTEILUNG_SEMESTER = 25 SEMESTEREINTEILUNG_LVA = 26 SEMESTEREMPFEHLUNG_SCHIEFEINSTEIGEND = 27 END = 99 def next_line(lines, skip_empty=True, strip=True): """Returns the next not empty line.""" while True: line = next(lines) if strip: line = line.strip() if line or not skip_empty: return line def parse_studienplan(text): state = State.PREAMBLE lines = iter(text.splitlines()) studienplan = {} try: line = next_line(lines) while True: if state == State.PREAMBLE: if line.startswith("Bachelorstudium") or line.startswith( "Masterstudium" ): state = State.STUDIUM_TYPE else: line = next_line(lines) elif state == State.STUDIUM_TYPE: studienplan["studium_type"] = line state = State.STUDIUM_NAME line = next_line(lines) elif state == State.STUDIUM_NAME: studienplan["studium_name"] = line state = State.STUDIUM_KENNZAHL line = next_line(lines) elif state == State.STUDIUM_KENNZAHL: studienplan["studienkennzahl"] = line.replace(" ", "") state = State.BESCHLUSS_DATUM line = next_line(lines) elif state == State.BESCHLUSS_DATUM: if line.startswith("mit Wirksamkeit"): studienplan["beschluss_datum"] = dateutil.parser.parse( line.replace("mit Wirksamkeit ", ""), GermanParserInfo() ).date() state = State.GUELTIG_DATUM line = next_line(lines) elif state == State.GUELTIG_DATUM: assert line.startswith("Gültig ab") studienplan["gueltig_datum"] = dateutil.parser.parse( line.replace("Gültig ab ", ""), GermanParserInfo() ).date() state = State.INHALTSVERZEICHNIS line = next_line(lines) elif state == State.INHALTSVERZEICHNIS: # A lot of text inbetween is skipped. if line.startswith("A. Modulbeschreibungen"): state = State.MODULBESCHREIBUNGEN line = next_line(lines) elif state == State.MODULBESCHREIBUNGEN: if line.endswith("ist in Anhang B im Detail erläutert."): studienplan["modulbeschreibungen"] = [] modulbeschreibungen = studienplan["modulbeschreibungen"] state = State.MODUL_NAME line = next_line(lines) elif state == State.MODUL_NAME: if line.startswith("B. Lehrveranstaltungstypen"): state = State.LEHRVERANSTALTUNGSTYPEN else: modul = { "name": line.strip(), "lvas": [], "regelarbeitsaufwand": {"ects": None}, "lernergebnisse": [], } modulbeschreibungen.append(modul) state = State.MODUL_REGELARBEITSAUFWAND line = next_line(lines) elif state == State.MODUL_REGELARBEITSAUFWAND: if line.startswith("Regelarbeitsaufwand:"): modul["regelarbeitsaufwand"]["ects"] = line.replace( "Regelarbeitsaufwand: ", "" ).replace(" ECTS", "") line = next_line(lines) state = State.MODUL_LERNERGEBNISSE elif state == State.MODUL_LERNERGEBNISSE: if line.startswith("Lehrveranstaltungen des Moduls:"): state = State.MODUL_LVAS line = next_line(lines, strip=False) elif line.endswith("Individuell nach gewählten Modulen/LVAs."): # Bachelor Technische Informatik has two Module that do not have a # list of LVAs. state = State.MODUL_NAME line = next_line(lines) else: modul["lernergebnisse"].append(line) line = next_line(lines) # Stay in the same state to potentially add another line to # Lernergebnisse. continue # Lernergebnisse is fully parsed. modul["lernergebnisse"] = ( "\n".join(modul["lernergebnisse"]) .replace("Lernergebnisse:", "") .strip() ) elif state == State.MODUL_LVAS: # Line is not stripped so we can distinguish between continuing # LVA name, new LVA name as well as new modules. if re.match(r"^((?:\*|\s)\s*\d|\d\d)[,.]\d", line): # The Modul "Software Engineering und Projektmanagement" in # Medizinische Informatik has a special rule. lva = re.match( r"(?:\*\s*)?(?P<ects>\d{1,2}[,.]\d)/(?P<sst>\d{1,2}[,.]\d)\s*" + r"(?P<lva_typ>[A-Z]+)\s+(?P<name>.*)", line.strip(), ).groupdict() # Normalize spaces in name. lva["name"] = re.sub("\s+", " ", lva["name"]) modul["lvas"].append(lva) line = next_line(lines, strip=False) elif line.startswith(" ") and line.strip(): # LVA name goes over two lines. modul["lvas"][-1]["name"] += " " + line.strip() line = next_line(lines, strip=False) elif "zentralen Wahlfachkatalog der TU Wien" in line: # The Modul "Freie Wahlfächer und Transferable Skills" doesn't have # a list of LVAs. Just skip the description. line = next_line(lines) state = State.MODUL_NAME elif len(modul["lvas"]) == 0 or line in ["Verpflichtend:", "Wahl:"]: # There might be some text before/in the list of LVAs that we just # skip. line = next_line(lines, strip=False) else: state = State.MODUL_NAME elif state == State.LEHRVERANSTALTUNGSTYPEN: # A lot of text inbetween is skipped. if "Semestereinteilung der Lehrveranstaltungen" in line: # Can be appendix D or C. state = State.SEMESTEREINTEILUNG studienplan["semestereinteilung"] = {} semestereinteilung = studienplan["semestereinteilung"] line = next_line(lines) elif state == State.SEMESTEREINTEILUNG: if line.endswith("Semester (WS)") or line.endswith("Semester (SS)"): state = State.SEMESTEREINTEILUNG_SEMESTER else: line = next_line(lines) elif state == State.SEMESTEREINTEILUNG_SEMESTER: semestereinteilung[line] = [] semester = semestereinteilung[line] state = State.SEMESTEREINTEILUNG_LVA line = next_line(lines) elif state == State.SEMESTEREINTEILUNG_LVA: if line.endswith("Semester (WS)") or line.endswith("Semester (SS)"): state = State.SEMESTEREINTEILUNG_SEMESTER elif line.startswith("E. Semesterempfehlung"): # Bachelor state = State.SEMESTEREMPFEHLUNG_SCHIEFEINSTEIGEND elif line.startswith("D. Prüfungsfächer mit den zugeordneten Modulen"): # Master state = State.PRUEFUNGSFAECHER else: match = re.match( r"(?P<not_steop_constrained>\*)?\s*(?P<ects>\d{1,2},\d)\s*" + r"(?P<lva_typ>[A-Z]+)\s+(?P<name>.*)", line, ) if match: lva = match.groupdict() lva["not_steop_constrained"] = ( lva["not_steop_constrained"] != "*" ) semester.append(lva) line = next_line(lines) elif state == State.SEMESTEREMPFEHLUNG_SCHIEFEINSTEIGEND: # A lot of text inbetween is skipped. if "Prüfungsfächer mit den zugeordneten Modulen" in line: # Can be appendix D or G, depending on Bachelor or Master. state = State.PRUEFUNGSFAECHER line = next_line(lines) elif state == State.PRUEFUNGSFAECHER: if line.startswith("Prüfungsfach"): studienplan["pruefungsfaecher"] = [] pruefungsfaecher = studienplan["pruefungsfaecher"] state = State.PRUEFUNGSFACH_NAME else: line = next_line(lines) elif state == State.PRUEFUNGSFACH_NAME: if line.startswith("Prüfungsfach"): pruefungsfach = {"name": line, "module": []} pruefungsfaecher.append(pruefungsfach) line = next_line(lines) elif line.startswith("Modul") or line.startswith("*Modul"): pruefungsfach["name"] = re.match( r'Prüfungsfach "([^"]+)"', pruefungsfach["name"] ).group(1) state = State.PRUEFUNGSFACH_MODUL elif line.startswith("H. Bachelor-Abschluss mit Honors"): state = State.END elif pruefungsfach["name"] == 'Prüfungsfach "Diplomarbeit"': # Special case for Diplomarbeit which doesn't have a Modul. pruefungsfach["name"] = "Diplomarbeit" state = State.END else: # Continuing Prüfungsfach name pruefungsfach["name"] += " " + line line = next_line(lines) elif state == State.PRUEFUNGSFACH_MODUL: # The fixing of quotes ist not 100% perfect so we don't rely on the fact # that the name of the Modul is within quotes. We parse the name with # quotes. modul = re.match( r"(?P<wahl>\*)?Modul " + r"(?:(?P<name>.+)\s+\((?P<ects>.*) ECTS\)|(?P<name_no_ects>.+))", line, ).groupdict() name_no_ects = modul.pop("name_no_ects") if name_no_ects: modul["name"] = name_no_ects # And remove the quotes here. modul["name"] = modul["name"].replace('"', "") modul["wahl"] = modul["wahl"] == "*" pruefungsfach["module"].append(modul) state = State.PRUEFUNGSFACH_MODUL_LVA line = next_line(lines) elif state == State.PRUEFUNGSFACH_MODUL_LVA: if line.startswith("Modul") or line.startswith("*Modul"): state = State.PRUEFUNGSFACH_MODUL elif line.startswith("Prüfungsfach"): state = State.PRUEFUNGSFACH_NAME else: # TODO Skip list of LVAs for now. line = next_line(lines) elif state == State.END: break except StopIteration: pass return studienplan def read_pdf(filename): result = subprocess.run( [ "pdftotext", "-nopgbrk", "-layout", "-x", "72", "-y", "72", "-W", "460", "-H", "650", filename, "-", ], encoding="utf8", capture_output=True, ) return result.stdout def dehyphenate(text): while "-\n" in text: text = re.sub("-\n\\s*", "", text) return text def fix_quotes(text): text = text.replace("“", '"') fixed_text = [] prev_line = None for line in text.splitlines(): while "”" in line: i_quote = line.index("”") line = line.replace("”", " ", 1) assert prev_line is not None if len(prev_line) <= i_quote: i_quote = len(prev_line) - 1 if prev_line[i_quote] == " ": i_word = i_quote + 1 else: i_word = prev_line.rindex(" ", 0, i_quote) + 1 # XXX what if quote is at the beginning of the line prev_line = prev_line[:i_word] + '"' + prev_line[i_word:] fixed_text.append(prev_line) prev_line = line return "\n".join(fixed_text[1:]) def remove_footnotes(text): fixed_text = [] in_footnote = False for line in text.splitlines(): if re.match(r"^ \d$", line): in_footnote = True continue if in_footnote and line.startswith(" "): continue in_footnote = False fixed_text.append(line) return "\n".join(fixed_text) def cleanup_text(text): text = fix_quotes(text) text = dehyphenate(text) text = remove_footnotes(text) return text def condense_studienplan(studienplan): def _get_modulbeschreibung(modul_name): for i, modulbeschreibung in enumerate(studienplan["modulbeschreibungen"]): if modulbeschreibung["name"] == modul_name: del studienplan["modulbeschreibungen"][i] return modulbeschreibung raise ValueError(f"Modulbeschreibung for {modul_name} not found!") def _get_semester_steop(lva): for semester, lvas in studienplan["semestereinteilung"].items(): for i, l in enumerate(lvas): if ( lva["name"] == l["name"] and lva["lva_typ"] == l["lva_typ"] and lva["ects"] == l["ects"] ): del lvas[i] return semester, l["not_steop_constrained"] return None, False for pruefungsfach in studienplan["pruefungsfaecher"]: for modul in pruefungsfach["module"]: try: modulbeschreibung = _get_modulbeschreibung(modul["name"]) except ValueError as e: if modul["name"].startswith("Projekt aus "): # The Modul "Projekt aus Software Engineering & Projektmanagement" # is part of every Prüfungsfach. However, it's deleted from the # Modulbeschreibung after beeing assigned to the first Prüfungsfach. # That's OK. continue raise e assert modulbeschreibung["regelarbeitsaufwand"]["ects"] == modul["ects"] modul["lernergebnisse"] = modulbeschreibung["lernergebnisse"] modul["lvas"] = modulbeschreibung["lvas"] for lva in modul["lvas"]: lva["semester"], lva["not_steop_constrained"] = _get_semester_steop(lva) # Delete redundant information and make sure that it has been used. assert studienplan["modulbeschreibungen"] == [] del studienplan["modulbeschreibungen"] for semestereinteilung in studienplan["semestereinteilung"].values(): assert semestereinteilung == [] del studienplan["semestereinteilung"] # Sort. studienplan["pruefungsfaecher"] = sorted( studienplan["pruefungsfaecher"], key=lambda p: p["name"] ) for pruefungsfach in studienplan["pruefungsfaecher"]: pruefungsfach["module"] = sorted( pruefungsfach["module"], key=lambda m: m["name"] ) def main(): text = cleanup_text(read_pdf(sys.argv[1])) studienplan = parse_studienplan(text) condense_studienplan(studienplan) with open(sys.argv[1].replace("pdf", "json"), "w") as f: json.dump(studienplan["pruefungsfaecher"], f, indent=4, sort_keys=True) if __name__ == "__main__": main()
import html from .main import socketio, session from flask_socketio import emit, disconnect import sys from .chatlog import * import markdown2 # this is not used yet @socketio.on('join', namespace='/chat') def chat_join(name): session['Name'] = html.escape(name) sendToChat(session['Name'] + " Joined") @socketio.on('chat', namespace='/chat') def chat_message(message): data = markdown2.markdown(html.escape(message)) chatlog.AddChatLog(session['Name'], data) sendToChat(session['Name'] + ":" + data) @socketio.on('disconnect', namespace='/chat') def test_disconnect(): sendToChat(session['Name'] + ' Left') session['Name'] = None def sendToChat(msg): print('Chat>>' + msg) emit('chat', msg, broadcast=True)
''' Python 使用被称为 异常 的特殊对象来管理程序执行期间发生的错误。 每当发生让 Python 不知所措的错误时,它都会创建一个异常对象。 如果你编写了处理该异常的代码,程序将继续运行; 如果你未对异常进行处理,程序将停止,并显示一个 traceback ,其中包含有关异常的报告。 ''' # test 1 ''' 处理异常:try-except结构 try用于捕获异常,except用于处理异常,else中包含try中代码成功执行的后续代码 ''' try: print(5/0) except ZeroDivisionError: pass #什么都不做 #print("you can't divide by zero.") else: print("continue running !")
#!/usr/bin/python # proximityAlert.py # This code is for use in the Proximity Alert PiWars 2015 challenge # http://piwars.org/2015-competition/challenges/proximity-alert/ # Import required libraries import robohat import time # Define servo movement function def doServos(): robohat.setServo(pan, pVal) robohat.setServo(tilt, tVal) # Set initial variables speed = 40 minRange = 10 pan = 0 tilt = 1 tVal = 0 # 0 degrees is horizontal centre pVal = 0 # 0 degrees is vertical centre # Initialise robohat controller robohat.init() # Set servos to point sonar to initial chosen direction doServos() # Measure current distance dist = robohat.getDistance() print "Distance to wall, ", int(dist) time.sleep(1) try: while True: if dist >= minRange: robohat.forward(speed) print "Stepping forward" print "Distance to wall, ", int(dist) time.sleep(0.5) dist = robohat.getDistance() else: robohat.stop() print "hello handsome" dist = robohat.getDistance() print "Distance to wall, ", int(dist) time.sleep(0.5) except KeyboardInterrupt: print "Exiting" finally: # cleanup is run even if ^c is typed robohat.cleanup()
#! /usr/bin/env python """ * File: template.py * Author: FILL NAME HERE """ import math import sys import argparse import numpy as np import scipy.stats as st import matplotlib.pyplot as plt def main(argc,argv): print 'Hello World' if __name__ == "__main__": main(len(sys.argv),sys.argv)
from django.shortcuts import render # views.py from django.views.generic import ListView from posts.models import Post class PostsList(ListView): model = Post context_object_name = 'posts' template_name='posts/post_list.html' ordering = ['-created_at'] paginate_by = 5 queryset = Post.objects.all() # Create your views here.
class Layer: def __init__(self): self.input = None self.output = None def forward(self, input): # TODO: return output pass def backward(self, output_gradient): # TODO: update parameters and return input gradient pass
class Rectangle: def __init__(self, width = 0, height = 0): self.height = height self.width = width def __str__(self): return f"Rectangle(width={self.width}, height={self.height})" def get_area(self): return self.width * self.height def get_perimeter(self): return (2 *self.width + 2* self.height) def get_diagonal(self): return ((self.width ** 2 + self.height ** 2 ) ** .5) def get_picture(self): if self.height < 50 and self.width < 50: w = "*" * self.width picture = "" for line in range(self.height): picture = picture + w + "\n" return picture elif self.height > 50 or self.width > 50: return "Too big for picture." def set_width(self, x): self.width = x def set_height(self, y): self.height = y def get_amount_inside(self, shape): areaGuest = shape.get_area() areaHome = self.get_area() i = 0 while areaHome>=areaGuest: areaHome = areaHome - areaGuest i = i + 1 return i class Square(Rectangle): def __init__(self, side = 0): self.width = side self.height = side self.side = side Rectangle.__init__(self, width=side, height=side) def __str__(self): return "Square(side={})".format(self.width) def set_side(self, z): self.width = z self.height = z
from population import Population from individual import Individual from random import random, randint class Algorithm(): #Constants Uniform_rate = 0.5 Mutation_rate = 0.015 Tournament_size = 5 Elitism = True @staticmethod def evolve_population(population_passed): print("Evolving population...") new_population = Population(population_passed.size(), False) if Algorithm.Elitism: new_population.individuals.append(population_passed.get_fittest()) elitism_off_set = 1 else: elitism_off_set = 0 #Do crossover over the entire population for i in range(elitism_off_set, population_passed.size()): individual1 = Algorithm.tournament_selection(population_passed) individual2 = Algorithm.tournament_selection(population_passed) new_individual = Algorithm.crossover(individual1, individual2) new_population.individuals.append(new_individual) #Do mutation randomly for i in range(elitism_off_set, population_passed.size()): Algorithm.mutate(new_population.get_individual(i)) return new_population @staticmethod def crossover(individual1_passed, individual2_passed): new_sol = Individual() for i in range(individual1_passed.size()): if random() <= Algorithm.Uniform_rate: new_sol.set_gene(i, individual1_passed.get_gene(i)) else: new_sol.set_gene(i, individual2_passed.get_gene(i)) return new_sol @staticmethod def mutate(individual_passed): for i in range(individual_passed.size()): if random() <= Algorithm.Mutation_rate: gene = randint(0,1) individual_passed.set_gene(i, gene) @staticmethod def tournament_selection(population_passed): #Tournament pool tournament = Population(Algorithm.Tournament_size, False) """ Tournament selection technique. How it works: The algorithm choose randomly five individuals from the population and returns the fittest one """ for i in range(Algorithm.Tournament_size): random_id = int(random() * population_passed.size()) tournament.individuals.append(population_passed.get_individual(random_id)) fittest = tournament.get_fittest() return fittest
#!/usr/local/bin/python2.7 import urllib2 as url import datetime import pymongo from pymongo.errors import BulkWriteError import csv import re import sh import argparse import time import sys import pprint import copy pp = pprint.PrettyPrinter(indent=4) class ZillowParser( object ): def __init__( self, host='localhost', username='', password='', port=27017 ): self.client = pymongo.MongoClient(host=host, port=port) self.db = self.client['real_estate'] # will be created if it doesn't already exist self.db.authenticate(username, password) self.states = self.db['states'] # will be created if it doesn't already exist self.metros = self.db['metros'] # will be created if it doesn't already exist self.counties = self.db['counties'] # will be created if it doesn't already exist self.cities = self.db['cities'] # will be created if it doesn't already exist self.zips = self.db['zips'] # will be created if it doesn't already exist self.neighborhoods = self.db['neighborhoods'] # will be created if it doesn't already exist self.debug = False self.write_size = 50 self.data_types = [ 'State', 'Metro', 'County', 'City', 'Zip', 'Neighborhood' ] self.db_update_list = [ 'State', 'Metro', 'County', 'City', 'Zip', 'Neighborhood' ] self.collections = [ 'states', 'metros', 'counties', 'cities', 'zips', 'neighborhoods' ] self.collection_dict = dict(zip( self.data_types, self.collections ) ) self.directory = '' def download_zips( self ): for file in self.data_types: sh.wget('-P', self.directory, 'http://files.zillowstatic.com/research/public/%s.zip'%file) def unzip_files( self ): for file in self.data_types: sh.unzip('%s/%s.zip'%(self.directory, file), '-d', self.directory) def get_data_set( self, file, data ): file_re = re.search('%s_(\S+).csv'%data, file) if ( file_re ): return str(file_re.group(1)).replace('-', '_') file_re = re.search('(\S+)_%s(_Public)*.csv'%data, file) if ( file_re ): data_set = str(file_re.group(1)).replace('-', '_') if ( file_re.group(2) ): data_set += '_Public' return data_set return '' def parse_files( self ): for data in self.data_types: if data not in self.db_update_list: continue documents = [] dates_document = { 'dates_document' : 1 } # store all date arrays for a data type in a single document. There should be one for each time series csv path = '%s/%s'%(self.directory,data) self.collection = self.__dict__[ self.collection_dict[data] ] begin_parsing = False if ( self.start_file == '' ): begin_parsing = True for file in sh.ls(path): # dont begin parsing until we reach our start file # allows script to continue in middle of parse when interrupted file = file.replace('\n', '') if( file == self.start_file ): begin_parsing = True if ( not begin_parsing ): continue time_series = 'undetermined' if ( self.debug ): print 'inspecting %s'%file # For csvs with a time series, data_set = time_series. # If the csv does not have a time series, it may have # ambiguous column names in which case I'll have to # append the column name to the data_set name to make # it unique. data_set = self.get_data_set( file, data ) if ( data_set != '' ): if ( self.debug ): print 'parsing %s'%data_set fh = open( '%s/%s'%(path,file), 'r' ) fh_dr = csv.DictReader( fh ) # determine whether or not csv contains a time series # if it does initialize a list in the document. initial_document = {} time_series_begun = False for field in fh_dr.fieldnames: if re.search('\d{4}-\d{2}', field): if ( not time_series_begun ): time_series_begun = True time_series = data_set initial_document = { time_series : [] } dates_document['%s_dates'%time_series] = [] dates_document['%s_dates'%time_series].append( field ) if ( self.dates_only ): continue # iterate over csv, populate time series as well as any other data # each row in the csv is a document. for line in fh_dr: document = copy.deepcopy( initial_document ) # Note: the fieldnames list keeps the keys in order. line.keys() or line.iteritems() does not for field in fh_dr.fieldnames: if ( time_series != 'undetermined' and re.search('\d{4}-\d{2}', field) ): document[time_series].append( self.format( line[field] ) ) elif ( field in [ 'RegionName', 'State', 'City', 'County', 'Metro' ] ): document[field] = line[field].lower() # without deepcopy we will keep overwriting the document reference # and every item in documents will point to the last item appended documents.append( copy.deepcopy( document ) ) if ( len(documents) >= self.write_size ): try: self.save_documents( documents, time_series ) documents = [] except BulkWriteError as bwe: pp.pprint(bwe.details) sys.exit(1) # end of csv file print 'closing file' fh.close() if ( documents != [] ): try: self.save_documents( documents, time_series ) documents = [] except BulkWriteError as bwe: pp.pprint(bwe.details) sys.exit(1) # end of data type if ( self.debug ): print 'inserting dates document' self.collection.update_one( { 'RegionName' : 'dates_document' }, { '$set' : dates_document }, upsert=True ) def format( self, entry ): if ( re.search( '^ *\d+ *$', entry ) ): return int( entry ) elif ( re.search('^ *\d+\.\d+ *$', entry) ): return float( entry ) return entry def save_documents( self, documents, time_series ): requests = [] for document in documents: filter_dict = {} for field in document.keys(): if ( field in [ 'RegionName', 'State' ] ): filter_dict[field] = document[field] if( time_series in document.keys() ): # In most cases, if the document is already there, then we are only updating the time series if ( self.debug ): print 'updating %s'%filter_dict['RegionName'] requests.append( pymongo.UpdateOne( copy.deepcopy( filter_dict ), { '$set' : { time_series : document[time_series] } }, upsert=True ) ) else: requests.append( pymongo.UpdateOne( copy.deepcopy( filter_dict ), { '$set': document }, upsert=True ) ) if ( requests != [] ): #if ( self.debug ): # print 'BULK WRITE!' self.collection.bulk_write( requests ) if ( __name__ == '__main__' ): parser = argparse.ArgumentParser() parser.add_argument('--username', type=str, default='', help='username for db access') parser.add_argument('--password', type=str, default='', help='password for db access') parser.add_argument('--host', type=str, default='localhost', help='host for db access, default is localhost') parser.add_argument('--port', type=str, default=27017, help='port for db access') parser.add_argument('--debug', action='store_true', help='print debug statements') parser.add_argument('--download_zips', action='store_true', help='download zip files from zillow to directory specified in --directory or pwd if no directory specified') parser.add_argument('--unzip_files', action='store_true', help='unzip zip files downloaded from zillow in directory specified in --directory or pwd if no directory specified') parser.add_argument('--parse_files', action='store_true', help='parse csv files in unzipped directories under --directory ( or pwd ), populate/update db') parser.add_argument('--dates_only', action='store_true', help='parse csv files in unzipped directories under --directory ( or pwd ), only update dates_document') parser.add_argument('--directory', type=str, default='.', help='directory to store/retrieve zip/csv files') parser.add_argument('--start_at', type=str, default='', help='begin populating db when you reach this file') parser.add_argument('dbs', nargs='*', help='update only these dbs. By default the script will update each one.') args = parser.parse_args() zp = ZillowParser(username=args.username, password=args.password, host=args.host, port=int(args.port)) zp.debug = args.debug zp.directory = args.directory zp.start_file = args.start_at zp.dates_only = args.dates_only if ( len( args.dbs ) > 0 ): zp.db_update_list = args.dbs if ( zp.debug ): print 'Parser instantiated' if ( args.download_zips ): zp.download_zips() if ( args.unzip_files ): zp.unzip_files() if ( args.parse_files ): zp.parse_files()
from leetcode import test def longest_valid_parentheses(s: str) -> int: stack = [-1] result = 0 for i, c in enumerate(s): if c == "(": stack.append(i) else: stack.pop() if stack: valid_len = i - stack[-1] result = max(result, valid_len) else: stack.append(i) return result test( longest_valid_parentheses, [ ("(()", 2), (")()())", 4), ], )
from socket import socket, AF_INET, SOCK_DGRAM from datetime import datetime class Server: def __init__(self): self.__num = 0 self.__servidor_sk = socket(AF_INET, SOCK_DGRAM) self.__clientes = [] self.__destinos = {} host = '' porta = 33000 self.__BUFSIZ = 1024 self.__servidor_sk.bind((host, porta)) def _run(self): print("*** Servidor de comunicação UDP iniciando. ***.") print("Protocolo de inicialização bem sucedido. \n") while True: codigo, cliente = self.__servidor_sk.recvfrom(self.__BUFSIZ) mensagem = codigo.decode() if cliente in self.__clientes: if mensagem == '{quit}': self.__encerrando_conexao(cliente) else: nome = self.__destinos[cliente] mensagem = "{}: {}".format(nome, mensagem) self.__enviando_msm_chat(mensagem) else: nome = self.__primeira_conexao(cliente) mensagem = "{} conectou-se ao chat".format(nome) self.__enviando_msm_chat(mensagem) def __enviando_msm_chat(self, mensagem): hora = datetime.now().strftime('%H:%M:%S') for cliente in self.__clientes: mensagem_2 = "{} {}".format(hora, mensagem) self.__servidor_sk.sendto(mensagem_2.encode(), cliente) def __encerrando_conexao(self, cliente): nome = self.__destinos[cliente] mensagem = "{} Saiu do chat.".format(nome) self.__enviando_msm_chat(mensagem) self.__destinos.pop(cliente) def __primeira_conexao(self, destino): mensagem_1 = "Bem vindo ao protocolo de comunicação UDP." self.__servidor_sk.sendto(mensagem_1.encode(), destino) nome_c, cliente = self.__servidor_sk.recvfrom(self.__BUFSIZ) nome = nome_c.decode() self.__clientes.append(cliente) self.__destinos[cliente] = nome print(destino) return nome servidor = Server() servidor._run()
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc import vtctldata_pb2 as vtctldata__pb2 class VtctlStub(object): """Service Vtctl allows you to call vt commands through gRPC. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.ExecuteVtctlCommand = channel.unary_stream( '/vtctlservice.Vtctl/ExecuteVtctlCommand', request_serializer=vtctldata__pb2.ExecuteVtctlCommandRequest.SerializeToString, response_deserializer=vtctldata__pb2.ExecuteVtctlCommandResponse.FromString, ) class VtctlServicer(object): """Service Vtctl allows you to call vt commands through gRPC. """ def ExecuteVtctlCommand(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_VtctlServicer_to_server(servicer, server): rpc_method_handlers = { 'ExecuteVtctlCommand': grpc.unary_stream_rpc_method_handler( servicer.ExecuteVtctlCommand, request_deserializer=vtctldata__pb2.ExecuteVtctlCommandRequest.FromString, response_serializer=vtctldata__pb2.ExecuteVtctlCommandResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'vtctlservice.Vtctl', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
import numpy as np import pandas as pd from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_auc_score from .BaseModel import BaseModel from ..utils import binary_metrics, binary_evaluation class RF(BaseModel): """Random forest""" parametric = True bootlist = None # list of metrics to bootstrap bootlist = ["Y_pred", "model.eval_metrics_"] # list of metrics to bootstrap def __init__(self, n_estimators=100, max_features="auto", max_depth=None, criterion="gini", min_samples_split=2, min_samples_leaf=1, max_leaf_nodes=None, n_jobs=None): self.model = RandomForestClassifier(n_estimators=n_estimators, max_features=max_features, max_depth=max_depth, criterion=criterion, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, max_leaf_nodes=max_leaf_nodes, n_jobs=n_jobs) self.k = n_estimators self.__name__ = 'cimcb.model.RF' self.__params__ = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'criterion': criterion, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'max_leaf_nodes': max_leaf_nodes, 'n_jobs': n_jobs} def set_params(self, params): self.__init__(**params) def train(self, X, Y): """ Fit the RF model, save additional stats (as attributes) and return Y predicted values. Parameters ---------- X : array-like, shape = [n_samples, n_features] Predictor variables, where n_samples is the number of samples and n_features is the number of predictors. Y : array-like, shape = [n_samples, 1] Response variables, where n_samples is the number of samples. Returns ------- y_pred_train : array-like, shape = [n_samples, 1] Predicted y score for samples. """ # Ensure array and error check X, Y = self.input_check(X, Y) # Fit the model self.model.fit(X, Y) # Predict_proba was designed for multi-groups... # This makes it sure that y_pred is correct y_pred = self.model.predict_proba(X) pred_0 = roc_auc_score(Y, y_pred[:, 0]) pred_1 = roc_auc_score(Y, y_pred[:, 1]) if pred_0 > pred_1: self.pred_index = 0 else: self.pred_index = 1 # Calculate and return Y prediction value y_pred_train = np.array(self.model.predict_proba(X)[:, self.pred_index]) self.model.y_loadings_ = np.array([0, 0, 0]) self.model.x_scores_ = np.array([0, 0, 0]) self.model.pctvar_ = np.array([0, 0, 0]) # Storing X, Y, and Y_pred self.X = X self.Y = Y self.Y_pred = y_pred_train self.metrics_key = [] self.model.eval_metrics_ = [] bm = binary_evaluation(Y, y_pred_train) for key, value in bm.items(): self.model.eval_metrics_.append(value) self.metrics_key.append(key) self.Y_train = Y self.Y_pred_train = y_pred_train self.model.eval_metrics_ = np.array(self.model.eval_metrics_) return y_pred_train def test(self, X, Y=None): """Calculate and return Y predicted value. Parameters ---------- X : array-like, shape = [n_samples, n_features] Test variables, where n_samples is the number of samples and n_features is the number of predictors. Returns ------- y_pred_test : array-like, shape = [n_samples, 1] Predicted y score for samples. """ # Convert to X to numpy array if a DataFrame if isinstance(X, pd.DataFrame or pd.Series): X = np.array(X) # Calculate and return Y predicted value y_pred_test = np.array(self.model.predict_proba(X)[:, self.pred_index]) if Y is not None: self.metrics_key = [] self.model.eval_metrics_ = [] bm = binary_evaluation(Y, y_pred_test) for key, value in bm.items(): self.model.eval_metrics_.append(value) self.metrics_key.append(key) self.model.eval_metrics_ = np.array(self.model.eval_metrics_) self.Y_pred = y_pred_test return y_pred_test
# -*- coding: utf-8 -*- import os import sys try: from rov.database import db except: from database import db def prefsRootPath(): if sys.platform == "darwin": return os.path.expanduser("~/Library/Application Support/rov") elif sys.platform.startswith("win"): return os.path.join(os.environ['APPDATA'], "rov") else: return os.path.expanduser("~/.rov") class Prefs(): def __init__(self): # Check for ~/.rov if not os.path.isdir(prefsRootPath()): os.mkdir(prefsRootPath()) if not os.path.isdir(prefsRootPath() + "/photos"): os.mkdir(prefsRootPath() + "/photos") self.db = db.Db(os.path.join(prefsRootPath(), "prefs.db")) #self.configDb = db.Db(os.path.join(prefsRootPath(), "config.db")) #query = self.configDb.query("SELECT name FROM sqlite_master") #query = query.fetchall() #print query self.db.beginTransaction() self.db.checkTable("rov_server_settings", [ {"name": "name", "type": "text"}, {"name": "value", "type": "text"}]) self.db.checkTable("rov_misc_settings", [ {"name": "key", "type": "int"}, {"name": "value", "type": "text"}, {"name": "description", "type": "text"}, {"name": "type", "type": "text"}, {"name": "options", "type": "text"}]) self.db.checkTable("rov_plugin_settings", [ {"name": "key", "type": "text"}, {"name": "value", "type": "text"}, {"name": "description", "type": "text"}, {"name": "type", "type": "text"}, {"name": "options", "type": "text"}]) self.db.checkTable("rov", [ {"name": "name", "type": "text"}, {"name": "value", "type": "text"}]) self.db.checkTable("sensor", [ {"name": "name", "type": "text"}, {"name": "value", "type": "text"}]) self.db.checkTable("rovStreams", [ {"name": "name", "type": "text"}, {"name": "value", "type": "text"}]) self.db.commitTransaction() # Check rov server defaults self.checkDefaults("rov_server_settings", {"name": "timesRun", "value": "0"}) self.checkDefaults("rov_server_settings", {"name": "daemon", "value": "False"}) self.checkDefaults("rov_server_settings", {"name": "pidfile", "value": "False"}) self.checkDefaults("rov_server_settings", {"name": "pidFileName", "value": ""}) self.checkDefaults("rov_server_settings", {"name": "port", "value": 7000}) self.checkDefaults("rov_server_settings", {"name": "verbose", "value": "True"}) self.checkDefaults("rov_server_settings", {"name": "development", "value": "True"}) self.checkDefaults("rov_server_settings", {"name": "kiosk", "value": "False"}) self.checkDefaults("rov_server_settings", {"name": "noupdate", "value": "True"}) self.checkDefaults("rov_server_settings", {"name": "webroot", "value": ""}) # Check rov misc defaults self.checkDefaults("rov_misc_settings", data={'key': 'lights_capable', 'value': '0', 'description': 'Enable use of lights.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_misc_settings", data={'key': 'calibration_lasers_capable', 'value': '0', 'description': 'Enable use of lasers.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_misc_settings", data={'key': 'camera_mount_axis_capable', 'value': '0', 'description': 'Enable use of camera pan/tilt.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_misc_settings", data={'key': 'compass_capable', 'value': '0', 'description': 'Enable use of compass.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_misc_settings", data={'key': 'orientation_capable', 'value': '0', 'description': 'Enable use of accellorometer.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_misc_settings", data={'key': 'depth_capable', 'value': '0', 'description': 'Enable use of pressure transducer.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) # Check rov plugin defaults self.checkDefaults("rov_plugin_settings", data={'key': 'arduinofirmwareupload', 'value': '0', 'description': 'Enables remote arduino programming.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'blackbox', 'value': '0', 'description': 'Enables datalogging.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'capestatus', 'value': '0', 'description': 'Feedback for beaglebone cape.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'compass', 'value': '0', 'description': 'Enables compass use.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'diveprofile', 'value': '0', 'description': 'Enables depth tracking.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'flybywire', 'value': '0', 'description': 'Enables fly-by-wire.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'fpscounter', 'value': '0', 'description': 'Enables rate of travel tracking.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'headsupmenu', 'value': '0', 'description': 'Enables heads-up menu.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'horizon', 'value': '0', 'description': 'Enables artificial horizon.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'motor_diags', 'value': '0', 'description': 'Enables motor diagnostics.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) self.checkDefaults("rov_plugin_settings", data={'key': 'photocapture', 'value': '0', 'description': 'Enables capture of stills.', 'type': 'select', 'options': "{'1': 'Yes', '0': 'No'}"}) #Check rov defaults self.checkDefaults("rov", {"name": "deadzone_pos", "value": 50}) self.checkDefaults("rov", {"name": "deadzone_neg", "value": 50}) self.checkDefaults("rov", {"name": "smoothingIncriment", "value": 40}) self.checkDefaults("rov", {"name": "photoDirectory", "value": "~/.rov/photos"}) self.checkDefaults("rov", {"name": "water_type", "value": "Fresh Water"}) self.checkDefaults("rov", {"name": "thrust_modifier_port", "value": 1}) self.checkDefaults("rov", {"name": "thrust_modifier_starboard", "value": 1}) self.checkDefaults("rov", {"name": "thrust_modifier_vertical", "value": -1}) self.checkDefaults("rov", {"name": "thrust_modifier_nport", "value": 2}) self.checkDefaults("rov", {"name": "thrust_modifier_nstarboard", "value": 2}) self.checkDefaults("rov", {"name": "thrust_modifier_nvertical", "value": -2}) self.checkDefaults("rov", {"name": "debug", "value": "False"}) self.checkDefaults("rov", {"name": "debug_commands", "value": "False"}) self.checkDefaults("rov", {"name": "production", "value": "True"}) self.checkDefaults("rov", {"name": "dead_zone", "value": 10}) self.checkDefaults("rov", {"name": "video_device", "value": "/dev/video0"}) self.checkDefaults("rov", {"name": "serial_baud", "value": 115200}) self.checkDefaults("rov", {"name": "USE_MOCK", "value": "False"}) self.checkDefaults("rov", {"name": "title", "value": "BCC - ROV"}) self.checkDefaults("rov", {"name": "water_types", "value": "[\"Fresh Water\", \"Salt Water\"]"}) self.checkDefaults("rov", {"name": "joystick_name", "value": ""}) self.checkDefaults("rov", {"name": "CONTROL_UDP_IPSend", "value": "192.168.1.10"}) self.checkDefaults("rov", {"name": "CONTROL_UDP_IPReceive", "value": ""}) self.checkDefaults("rov", {"name": "CONTROL_UDP_PORT_SEND", "value": 5005}) self.checkDefaults("rov", {"name": "CONTROL_UDP_PORT_RECV", "value": 5006}) self.checkDefaults("rov", {"name": "ROV_UDP_IPSend", "value": "192.168.1.19"}) self.checkDefaults("rov", {"name": "ROV_UDP_IPReceive", "value": ""}) self.checkDefaults("rov", {"name": "ROV_UDP_PORT_SEND", "value": 5006}) self.checkDefaults("rov", {"name": "ROV_UDP_PORT_RECV", "value": 5005}) #Sensor Calibration Settings self.checkDefaults("sensor", {"name": "ACCEL_X_MIN", "value": 0.0}) self.checkDefaults("sensor", {"name": "ACCEL_X_MAX", "value": 0.0}) self.checkDefaults("sensor", {"name": "ACCEL_Y_MIN", "value": 0.0}) self.checkDefaults("sensor", {"name": "ACCEL_Y_MAX", "value": 0.0}) self.checkDefaults("sensor", {"name": "ACCEL_Z_MIN", "value": 0.0}) self.checkDefaults("sensor", {"name": "ACCEL_Z_MAX", "value": 0.0}) self.checkDefaults("sensor", {"name": "MAGN_X_MIN", "value": 0.0}) self.checkDefaults("sensor", {"name": "MAGN_X_MAX", "value": 0.0}) self.checkDefaults("sensor", {"name": "MAGN_Y_MIN", "value": 0.0}) self.checkDefaults("sensor", {"name": "MAGN_Y_MAX", "value": 0.0}) self.checkDefaults("sensor", {"name": "MAGN_Z_MIN", "value": 0.0}) self.checkDefaults("sensor", {"name": "MAGN_Z_MAX", "value": 0.0}) self.checkDefaults("sensor", {"name": "magn_ellipsoid_center", "value": "[0, 0, 0]"}) self.checkDefaults("sensor", {"name": "magn_ellipsoid_transform", "value": "[[0, 0, 0], [0, 0, 0], [0, 0, 0]]"}) self.checkDefaults("sensor", {"name": "GYRO_AVERAGE_OFFSET_X", "value": 0.0}) self.checkDefaults("sensor", {"name": "GYRO_AVERAGE_OFFSET_Y", "value": 0.0}) self.checkDefaults("sensor", {"name": "GYRO_AVERAGE_OFFSET_Z", "value": 0.0}) self.checkDefaults("sensor", {"name": "GYRO_GAIN", "value": 0.06957}) self.checkDefaults("sensor", {"name": "GRAVITY", "value": 256.0}) self.checkDefaults("sensor", {"name": "Kp_ROLLPITCH", "value": 0.02}) self.checkDefaults("sensor", {"name": "Ki_ROLLPITCH", "value": 0.00002}) self.checkDefaults("sensor", {"name": "Kp_YAW", "value": 1.2}) self.checkDefaults("sensor", {"name": "Ki_YAW", "value": 0.00002}) self.checkDefaults("sensor", {"name": "ACCEL_X_OFFSET", "value": 0}) self.checkDefaults("sensor", {"name": "ACCEL_Y_OFFSET", "value": 0}) self.checkDefaults("sensor", {"name": "ACCEL_Z_OFFSET", "value": 0}) self.checkDefaults("sensor", {"name": "ACCEL_X_SCALE", "value": 1}) self.checkDefaults("sensor", {"name": "ACCEL_Y_SCALE", "value": 1}) self.checkDefaults("sensor", {"name": "ACCEL_Z_SCALE", "value": 1}) self.checkDefaults("sensor", {"name": "MAGN_X_OFFSET", "value": 0}) self.checkDefaults("sensor", {"name": "MAGN_Y_OFFSET", "value": 0}) self.checkDefaults("sensor", {"name": "MAGN_Z_OFFSET", "value": 0}) self.checkDefaults("sensor", {"name": "MAGN_X_SCALE", "value": 1}) self.checkDefaults("sensor", {"name": "MAGN_Y_SCALE", "value": 1}) self.checkDefaults("sensor", {"name": "MAGN_Z_SCALE", "value": 1}) #Check video stream defaults self.checkDefaults("rovStreams", {"name": "videoURL", "value": "http://"}) self.checkDefaults("rovStreams", {"name": "video_port", "value": 8080}) def getDb(self): return self.db def checkDefaults(self, table, data): cursor = self.db.select(table, where=data) if not cursor.fetchone(): self.db.beginTransaction() self.db.insert(table, data) self.db.commitTransaction() def getPreference(self, table, name): cursor = self.db.select(table, where={"name": name}) row = cursor.fetchone() if not row: raise Exception("No preference " + name) return row["value"] def getrovSettingValue(self, key, default=None): try: data = self.db.select("rov_server_settings", where={"key": key}, what="value") value = data.fetchone() if value == '': return None return value except: return default def getRov(self, name): cursor = self.db.select("rov", where={"name": name}) row = cursor.fetchone() if not row: raise Exception("No rov property named: " + name) return row["value"] def getSensor(self, name): cursor = self.db.select("sensor", where={"name": name}) row = cursor.fetchone() if not row: raise Exception("No sensor property named: " + name) return row["value"] def getRovPlugin(self, key): cursor = self.db.select("rov_plugin_settings", where={"key": key}) row = cursor.fetchone() if not row: raise Exception("No rov plugin named: " + key) return row["value"] def getRovMisc(self, key): cursor = self.db.select("rov_misc_settings", where={"key": key}) row = cursor.fetchone() if not row: raise Exception("No rov option named: " + key) return row["value"] def getRovStreams(self): cursor = self.db.select("rovStreams") row = cursor.fetchall() return row def getRovPlugins(self): cursor = self.db.select("rov_plugin_settings") row = cursor.fetchall() return row def getAllRovMisc(self): cursor = self.db.select("rov_misc_settings") row = cursor.fetchall() return row def getTimesRun(self): return int(self.getPreference("rov_server_settings", "timesRun")) def getDaemon(self): return self.getPreference("rov_server_settings", "daemon") def getPidFile(self): return self.getPreference("rov_server_settings", "pidfile") def getPidFileName(self): return self.getPreference("rov_server_settings", "pidFileName") def getPort(self): return int(self.getPreference("rov_server_settings", "port")) def getVerbose(self): return self.getPreference("rov_server_settings", "verbose") def getDevelopment(self): return self.getPreference("rov_server_settings", "development") def getKiosk(self): return self.getPreference("rov_server_settings", "kiosk") def getNoUpdate(self): return self.getPreference("rov_server_settings", "noupdate") def getWebroot(self): return self.getPreference("rov_server_settings", "webroot") def incTimesRun(self): r = int(self.getTimesRun()) print((r)) self.db.beginTransaction() self.db.update("rov_server_settings", {"value": r + 1}, {"name": "timesRun"}) r = int(self.getTimesRun()) print((r)) self.db.commitTransaction() def setDaemon(self, value): self.db.beginTransaction() self.db.update("rov_server_settings", {"value": value}, {"name": "daemon"}) self.db.commitTransaction() def setPidFile(self, value): self.db.beginTransaction() self.db.insertOrUpdate("rov_server_settings", {"value": value}, {"name": "pidfile"}) self.db.commitTransaction() def setPidFileName(self, value): self.db.beginTransaction() self.db.insertOrUpdate("rov_server_settings", {"value": value}, {"name": "pidFileName"}) self.db.commitTransaction() def setPort(self, port): self.db.beginTransaction() self.db.insertOrUpdate("rov_server_settings", {"value": port}, {"name": "port"}) self.db.commitTransaction() def setVerbose(self, value): self.db.beginTransaction() self.db.insertOrUpdate("rov_server_settings", {"value": value}, {"name": "verbose"}) self.db.commitTransaction() def setDevelopment(self, value): self.db.beginTransaction() self.db.insertOrUpdate("rov_server_settings", {"value": value}, {"name": "development"}) self.db.commitTransaction() def setKiosk(self, value): self.db.beginTransaction() self.db.insertOrUpdate("rov_server_settings", {"value": value}, {"name": "kiosk"}) self.db.commitTransaction() def setNoUpdate(self, value): self.db.beginTransaction() self.db.insertOrUpdate("rov_server_settings", {"value": value}, {"name": "noupdate"}) self.db.commitTransaction() def setRov(self, name, value): self.db.beginTransaction() self.db.insertOrUpdate("rov", {"value": value}, {"name": name}) self.db.commitTransaction() def setSensor(self, name, value): self.db.beginTransaction() self.db.insertOrUpdate("sensor", {"value": value}, {"name": name}) self.db.commitTransaction() def addRovPlugin(self, data): cursor = self.db.select("rov_plugin_settings", where=data) if not cursor.fetchone(): self.db.beginTransaction() self.db.insert("rov_plugin_settings", data) self.db.commitTransaction() def addRovMiscSetting(self, data): cursor = self.db.select("rov_misc_settings", where=data) if not cursor.fetchone(): self.db.beginTransaction() self.db.insert("rov_misc_settings", data) self.db.commitTransaction() def addRovSetting(self, data): cursor = self.db.select("rov", where=data) if not cursor.fetchone(): self.db.beginTransaction() self.db.insert("rov", data) self.db.commitTransaction() def getRovServerSettings(self): timesRun = self.getTimesRun() daemon = self.getDaemon() pidFile = self.getPidFile() pidFilename = self.getPidFileName() port = self.getPort() verbose = self.getVerbose() dev = self.getDevelopment() kiosk = self.getKiosk() update = self.getNoUpdate() webroot = self.getWebroot() data = ({'timesRun': timesRun, 'daemon': daemon, 'pidFile': pidFile, 'pidFilename': pidFilename, 'port': port, 'verbose': verbose, 'dev': dev, 'kiosk': kiosk, 'update': update, 'webroot': webroot}) return {'success': True, 'data': data}
import os import pygame from HelpIcons import Icon # Position of game screen x_pos = 300 y_pos = 120 cmd = 'wmic desktopmonitor get screenheight, screenwidth' os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (x_pos, y_pos) pygame.init() # Create class to show Help Window class Hint: windowWidth = 880 windowHeight = 616 # Init all require Parameters def __init__(self): self._running = True self._display_surf = None self._background_surf = None self._W_surf = None self._A_surf = None self._S_surf = None self._D_surf = None self._P_surf = None self._Q_surf = None self._Esc_surf = None self._UP_surf = None self._LEFT_surf = None self._DOWN_surf = None self._RIGHT_surf = None self._SPACE_surf = None self._apple_surf = None self._orange_surf = None self._fish_surf = None self.W = Icon(5, 4) self.A = Icon(5, 7) self.S = Icon(5, 10) self.D = Icon(5, 13) self.P = Icon(2, 16) self.Q = Icon(2, 19) self.Esc = Icon(5, 19) self.UP = Icon(2, 4) self.LEFT = Icon(2, 7) self.DOWN = Icon(2, 10) self.RIGHT = Icon(2, 13) self.SPACE = Icon(2, 22) self.apple = Icon(20, 4) self.orange = Icon(20, 10) self.fish = Icon(20, 16) # Load All Photos to Show def on_init(self): self._display_surf = pygame.display.set_mode((self.windowWidth, self.windowHeight)) self._background_surf = pygame.image.load("start_font.jpg") self._running = True pygame.display.set_caption('Snake game') self._W_surf = pygame.image.load("W.png").convert() self._A_surf = pygame.image.load("A.png").convert() self._S_surf = pygame.image.load("S.png").convert() self._D_surf = pygame.image.load("D.png").convert() self._P_surf = pygame.image.load("P.png").convert() self._Q_surf = pygame.image.load("Q.png").convert() self._Esc_surf = pygame.image.load("Esc.png").convert() self._UP_surf = pygame.image.load("UP.png").convert() self._LEFT_surf = pygame.image.load("LEFT.png").convert() self._DOWN_surf = pygame.image.load("DOWN.png").convert() self._RIGHT_surf = pygame.image.load("RIGHT.png").convert() self._SPACE_surf = pygame.image.load("SPACE.png").convert() self._apple_surf = pygame.image.load("icon_apple.png").convert() self._orange_surf = pygame.image.load("icon_orange.jpg").convert() self._fish_surf = pygame.image.load("icon_fish.png").convert() # Define size and type of Text @staticmethod def text_objects(text, colour, size="medium"): global text_surface font_name = pygame.font.match_font('arial') small_font = pygame.font.SysFont(font_name, 35) med_font = pygame.font.SysFont(font_name, 45) large_font = pygame.font.SysFont(font_name, 95) if size == "small": text_surface = small_font.render(text, True, colour) if size == "medium": text_surface = med_font.render(text, True, colour) if size == "large": text_surface = large_font.render(text, True, colour) return text_surface, text_surface.get_rect() # Set Position of text def message_to_screen(self, msg, colour, x_displace=0, y_displace=0, size="medium"): text_surface, text_rectangle = self.text_objects(msg, colour, size) text_rectangle.center = (x_displace, int(self.windowHeight / 2) + y_displace) self._display_surf.blit(text_surface, text_rectangle) # Show All Loaded Photos def on_render(self): self._display_surf.blit(self._background_surf, (0, 0)) self.W.draw(self._display_surf, self._W_surf) self.A.draw(self._display_surf, self._A_surf) self.S.draw(self._display_surf, self._S_surf) self.D.draw(self._display_surf, self._D_surf) self.P.draw(self._display_surf, self._P_surf) self.Q.draw(self._display_surf, self._Q_surf) self.Esc.draw(self._display_surf, self._Esc_surf) self.UP.draw(self._display_surf, self._UP_surf) self.LEFT.draw(self._display_surf, self._LEFT_surf) self.RIGHT.draw(self._display_surf, self._RIGHT_surf) self.DOWN.draw(self._display_surf, self._DOWN_surf) self.SPACE.draw(self._display_surf, self._SPACE_surf) self.orange.draw(self._display_surf, self._orange_surf) self.apple.draw(self._display_surf, self._apple_surf) self.fish.draw(self._display_surf, self._fish_surf) self.message_to_screen("Game Hint", (255, 255, 255), 440, -260, "large") self.message_to_screen(" - move UP - increase lenght by 1", (255, 255, 255), 470, -195) self.message_to_screen("- move LEFT", (255, 255, 255), 260, -130) self.message_to_screen(" - move DOWN - increase lenght by 5", (255, 255, 255), 470, -65) self.message_to_screen("- move RIGHT", (255, 255, 255), 275, 0) self.message_to_screen(" - Pause in game - will eat you", (255, 255, 255), 450, 70) self.message_to_screen("- Quit Game", (255, 255, 255), 270, 135) self.message_to_screen("- Move to main menu", (255, 255, 255), 385, 195) pygame.display.flip() # Main Loop def on_execute(self): if self.on_init() == False: self._running = False while (self._running): pygame.event.pump() for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() quit() if event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: os.system("python start.py") pygame.quit() quit() if event.key == pygame.K_q or pygame.K_ESCAPE: pygame.quit() quit() self.on_render() @property def display_surf(self): return self._display_surf @property def background_surf(self): return self._background_surf # Init Help Window if __name__ == "__main__": Start = Hint() Start.on_execute()
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('admin', '0002_migrate_from_modoboa_admin'), ] operations = [ migrations.CreateModel( name='Record', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('source_ip', models.GenericIPAddressField()), ('count', models.IntegerField()), ('disposition', models.CharField(max_length=10, choices=[(b'none', 'None'), (b'quarantine', 'Quarantine'), (b'reject', 'Reject')])), ('dkim_result', models.CharField(max_length=9, choices=[(b'none', 'None'), (b'neutral', 'Neutral'), (b'pass', 'Pass'), (b'fail', 'Fail'), (b'temperror', 'Temporary error'), (b'permerror', 'Permanent error'), (b'policy', 'Policy')])), ('spf_result', models.CharField(max_length=9, choices=[(b'none', 'None'), (b'neutral', 'Neutral'), (b'pass', 'Pass'), (b'fail', 'Fail'), (b'temperror', 'Temporary error'), (b'permerror', 'Permanent error'), (b'softfail', 'Soft failure')])), ('reason_type', models.CharField(max_length=15, blank=True)), ('reason_comment', models.CharField(max_length=100, blank=True)), ('header_from', models.ForeignKey(to='admin.Domain', on_delete=models.CASCADE)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Report', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('report_id', models.CharField(max_length=100)), ('start_date', models.DateTimeField()), ('end_date', models.DateTimeField()), ('policy_domain', models.CharField(max_length=100)), ('policy_adkim', models.CharField(max_length=1)), ('policy_aspf', models.CharField(max_length=1)), ('policy_p', models.CharField(max_length=10)), ('policy_sp', models.CharField(max_length=10)), ('policy_pct', models.SmallIntegerField()), ], options={ 'permissions': (('view_report', 'Can view report'),), }, bases=(models.Model,), ), migrations.CreateModel( name='Reporter', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('org_name', models.CharField(max_length=100)), ('email', models.EmailField(unique=True, max_length=254)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Result', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('type', models.CharField(max_length=4, choices=[(b'dkim', b'DKIM'), (b'spf', b'SPF')])), ('domain', models.CharField(max_length=100)), ('result', models.CharField(max_length=9)), ('record', models.ForeignKey(to='dmarc.Record', on_delete=models.CASCADE)), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='report', name='reporter', field=models.ForeignKey(to='dmarc.Reporter', on_delete=models.CASCADE), preserve_default=True, ), migrations.AlterUniqueTogether( name='report', unique_together=set([('reporter', 'report_id')]), ), migrations.AddField( model_name='record', name='report', field=models.ForeignKey(to='dmarc.Report', on_delete=models.CASCADE), preserve_default=True, ), ]
def validate_word(word): w = word.lower() return len(word) == len([x for x in w if w.count(x) == w.count(w[0])]) ''' You are going to be given a word. Your job will be to make sure that each character in that word has the exact same number of occurrences. You will return true if it is valid, or false if it is not. For example: "abcabc" is a valid word because 'a' appears twice, 'b' appears twice, and'c' appears twice. "abcabcd" is NOT a valid word because 'a' appears twice, 'b' appears twice, 'c' appears twice, but 'd' only appears once! "123abc!" is a valid word because all of the characters only appear once in the word. For this kata, capitals are considered the same as lowercase letters. Therefore: 'A' == 'a' . #Input A string (no spaces) containing [a-z],[A-Z],[0-9] and common symbols. The length will be 0 < string < 100. #Output true if the word is a valid word, or false if the word is not valid. '''
# Copyright 2021-2022 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from itertools import permutations import numpy as np import pytest import cunumeric as cn def _sum(shape, axis, lib, dtype=None): return lib.ones(shape).sum(axis=axis, dtype=dtype) # Try various non-square shapes, to nudge the core towards trying many # different partitionings. @pytest.mark.parametrize("axis", range(3), ids=str) @pytest.mark.parametrize("shape", permutations((3, 4, 5)), ids=str) def test_3d(shape, axis): assert np.array_equal(_sum(shape, axis, np), _sum(shape, axis, cn)) assert np.array_equal( _sum(shape, axis, np, dtype="D"), _sum(shape, axis, cn, dtype="D") ) if __name__ == "__main__": import sys sys.exit(pytest.main(sys.argv))
""" A professor asked Lisha to list up all the marks in front of her name but she doesn't know how to do it. Help her to use a container that solves this problem. Input Format First line of input contains name Next three lines contains marks of a student in three subjects. Constraints type(marks) = int Output Format Print the container which contains the name and all the marks and can be accessed easily NOTE: Use the variable name to store name and varibale marks to store the marks Sample Input 0 Lisha 98 97 99 Sample Output 0 {'name': 'Lisha', 'marks': [98, 97, 99]} """ # Solution name=input() marks=[] marks.append(int(input())) marks.append(int(input())) marks.append(int(input())) d={} d["name"]=name d["marks"]=marks print(d)
from django.contrib import admin from genericadmin.admin import TabularInlineWithGeneric, GenericAdminModelAdmin, GenericTabularInline from .models import Page, Menu, MenuItem @admin.register(Page) class PageAdmin(admin.ModelAdmin): list_display = ('title', 'published', 'created', 'updated') class MenuItemInline(TabularInlineWithGeneric): model = MenuItem extra = 1 @admin.register(Menu) class MenuAdmin(GenericAdminModelAdmin): list_display = ('name', 'position') inlines = [MenuItemInline, ] content_type_whitelist = ('azov_main/page',) # @admin.register(MenuItem) # class MenuItemAdmin(GenericAdminModelAdmin): # raw_id_fields = ('menu',)
# 文件读写 # StringIO和BytesIO # 操作文件和目录 # 序列化 ''' 读取文件内容,需要以读文件的模式打开一个文件对象,用Python的内置函数open(),传入文件名和标识符 读取文件分三步, 1、用open()函数打开,如若传入的文件地址不存在,会抛出IOError 2、调用read()方法一次读取文件的所有内容,Python会用一个str对象表示 3、读取完毕,用close()方法关闭文件(必须关闭) f=open('notfound.txt','r') f.read() f.close() ''' #引入with()语句,自动调用close()方法 #优点:不用每次打开一个文件,就要写一个close() with open('path/tp/file','r') as f : print(f.read()) ''' 因为read()每次是读取全部内容,如果文件较大,内存会爆满,可以使用read(size)方法, size表示每次最多读取的字节数。 调用readline(),可以每次对一行内容, readlines() 一次读取所有内容,饼按行返回list. 如果文件很小,read()一次性读取最方便, 如果不能确定文件大小,反复调用read(size)比较保险, 如果是配置文件,调用readlines()最方便 ''' for line in f.readlines(): print(line.strip()) #末尾的‘\n’删掉 #读取二进制未见,比如图片、视频等,用‘rb’模式打开文件即可: #f = open('/Users/michael/test.jpg', 'rb') #要读取非UTF-8编码的文本文件,需要给open()函数传入encoding参数 f = open('/Users/michael/gbk.txt', 'r', encoding='gbk',errors='ignore') #直接忽略遇到的编码问题 #写入文件,即在open()打开一个文件对象时,以写入文件的模式打开,传入标识符'w'或者'wb'表示写文本文件或写二进制文件: f = open('/Users/kiki/test.txt','w') f.write('Hello,World!') f.close() #当我们写文件时,操作系统不会马上把数据写入磁盘,而只是在内存缓存起来,空闲时才写入。 #只有在调用close()方法时,操作系统才会保证把没有写入的数据全部写入磁盘。如果忘记调用close) #数据可能只写了一部分,剩下没写入的就丢失了。所以还是采用with语句稳妥 with open('/Users/kiki/test.txt','w') as f: f.write('Hello,World!') #写入特定编码的文本文件,要给open()函数传入一个encoding参数,将字符自动转换成指定编码 #在内存读写时,使用StringIO和BytesIO #读写str时使用StringIO,读写二进制数据时,使用BytesIO #写入时,需要创建一个StringIO或BytesIO,然后像文件一样写入即可: from io import StringIO f = StringIO() f.write('hello') f.write(' ') f.write('world!') print(f.getvalue()) f = StringIO('Hello!\nHi\nGoodbye!') while True: s = f.readline() if s==' ': break print(s.strip()) #输出结果>>>hello world! #用getvalue() 获得写入后的str,也可以用readline()方法进行读取 from io import BytesIO f = BytesIO() # 写入的不是str,而是经过UTF-8编码的bytes print(f.write(('中文'.encode('utf-8')))) print(f.getvalue()) f = BytesIO(b'\xe4\xb8\xad\xe6\x96\x87') print(f.read()) # 操作文件和目录 import os # 如果输出是posix,说明系统是Linux、Unix或Mac OS X,如果是nt,就是Windows系统。 print(os.name) # 此处获取系统信息 ,但不再支持windows. # print(os.uname()) # 查看操作系统中定义的环境变量 print(os.environ) # 获取某个环境变量的值 # print(os.environ.get('PATH')) # 查看当前目录的绝对路径 # print(os.path.abspath('.')) # 在某个目录下创建一个新的目录,首先把新目录的完整路径表示出来 # print(os.path.join('d:/', 'testdir')) # 创建一个目录 # os.mkdir('d:/testdir') # 删除一个目录 # os.rmdir('d:/testdir') # 把两个路径合成一个时,不要直接拼字符串,而要通过os.path.join()函数 # print(os.path.split('d:/testdir/file.txt')) # os.path.splitext()可以直接得到文件扩展名 # print(os.path.splitext('d:/testdir/file.txt')) # 对文件重命名: # os.rename('test.txt', 'test.py') # 删掉文件: # os.remove('test.py') # 要列出当前目录下的所有目录 # print([x for x in os.listdir('.') if os.path.isdir(x)]) # 列出所有的.py文件 # print([x for x in os.listdir('.') if os.path.isfile(x) and os.path.splitext(x)[1] == '.py']) # 序列化 ''' pickle.dumps()方法把任意对象序列化成一个bytes,然后,就可以把这个bytes写入文件。 或者用另一个方法pickle.dump()直接把对象序列化后写入一个file-like Object: ''' import pickle #d = dict(name='Bob',age=20,score=88) # print(pickle.dumps(d)) # f = open('dump.txt', 'wb') # pickle.dump(d, f) # f.close() # f = open('dump.txt', 'rb') # d = pickle.load(f) # f.close() # print(d) # python对象转为json import json d = dict(name='Bob',age=20,score=88) # dumps()方法返回一个str,内容就是标准的JSON print(json.dump(d)) # Json反序列化为Python对象,用loads()或者对应的load()方法 json_str = '{"age": 20, "score": 88, "name": "Bob"}' print(json.loads(json_str)) class Student(object): def __init__(self, name, age, score): self.name = name self.age = age self.score = score def student2dict(std): return { 'name': std.name, 'age': std.age, 'score': std.score } s = Student('Bob', 20, 88) print(json.dumps(s, default=student2dict)) # 把任意class的实例变为dict print(json.dumps(s, default=lambda obj: obj.__dict__))
from flask import Flask, request, json, jsonify from flask_cors import CORS, cross_origin from jsonschema import validate from collections import defaultdict from brickschema.namespaces import RDF, RDFS, BRICK import reasonable import resolver import logging import rdflib import re import time from contextlib import contextmanager import sqlite3 def preprocess(column): column = re.sub(' +', ' ', column) column = re.sub('\n', ' ', column) column = re.sub('-', ' ', column) column = column.strip().strip('"').strip("'").lower().strip() if not column : column = None return column def fix_term(term): if ' ' in term or term == 'unknown': return rdflib.Literal(term) if 'http' in term: return rdflib.URIRef(term) return rdflib.Literal(term) def fix_triple(t): return (fix_term(t[0]), fix_term(t[1]), fix_term(t[2])) def update_graph(triples): for t in triples: t = tuple(map(fix_term, t)) graph.add(t) def rewrite_labels(triples): for t in triples: if t[1] == RDFS.label: yield (t[0], BRICK.sourcelabel, t[2]) yield t class Triplestore: def __init__(self, path): self.conn = sqlite3.connect(path, check_same_thread=False) self.conn.execute("""CREATE TABLE IF NOT EXISTS triples ( s TEXT NOT NULL, p TEXT NOT NULL, o TEXT NOT NULL, sourcename TEXT NOT NULL, timestamp TEXT NOT NULL );""") self.conn.execute("""CREATE VIEW IF NOT EXISTS latest_versions AS SELECT sourcename, max(timestamp) as timestamp FROM triples GROUP BY sourcename""") self.conn.execute("""CREATE VIEW IF NOT EXISTS latest_triples AS SELECT s, p, o, lv.sourcename as src, lv.timestamp as timestamp FROM triples INNER JOIN latest_versions AS lv ON lv.sourcename = triples.sourcename AND lv.timestamp = triples.timestamp""") @contextmanager def cursor(self): cur = self.conn.cursor() try: yield cur self.conn.commit() finally: cur.close() def add_triples(self, src, ts, triples): # list of 3-tuples with self.cursor() as cur: values = ((s, p, o, src, ts) for (s, p, o) in triples) cur.executemany("INSERT INTO triples(s, p, o, sourcename, timestamp) VALUES (?, ?, ?, ?, ?)", values) def to_records(self): records = defaultdict(list) with self.cursor() as cur: cur.execute("SELECT src, s, p, o FROM latest_triples") for row in cur: records[row[0]].append((fix_term(row[1]), fix_term(row[2]), fix_term(row[3]))) return records def latest_version(self, srcname): with self.cursor() as cur: cur.execute("SELECT distinct timestamp FROM latest_triples WHERE src = ?", (srcname,)) return cur.fetchone() def dump(self): with self.cursor() as cur: cur.execute("SELECT s, p, o FROM latest_triples") for row in cur: print(">", row) triplestore = Triplestore("triples.db") app = Flask(__name__, static_url_path='') app.logger.setLevel(logging.DEBUG) cors = CORS(app, send_wildcard=True) _add_record_schema = json.load(open('./schemas/record.schema.json')) graph = rdflib.Graph() graph.parse("ttl/Brick.ttl", format="ttl") r = reasonable.PyReasoner() r.from_graph(graph) resolved = None @app.route('/add_record', methods=['POST']) def add_triples(): global resolved try: msg = request.get_json(force=True) validate(msg, schema=_add_record_schema) except Exception as e: print(e) return json.jsonify({'error': str(e)}), 500 num_added = 0 for rec in msg: if len(rec['triples']) == 0: continue # triples = map(tuple, rec['triples']) triples = map(fix_triple, rec['triples']) triples = rewrite_labels(triples) triples = list(triples) # for t in triples: # print(t) r.load_triples(triples) triplestore.add_triples(rec['source'], rec['timestamp'], triples) num_added += len(triples) print(f"Updating graph with {num_added} triple") t0 = time.time() triples = r.reason() update_graph(triples) t1 = time.time() print(f"Graph now contains {len(graph)} triples (updated in {t1-t0:.2f} sec)") graph.serialize('output.ttl', format='ttl') # clear cache resolved = None return jsonify({'latest': triplestore.latest_version(rec['source'])}) @app.route('/graph', methods=['GET']) @cross_origin() def get_graph(): global resolved if resolved is None: make_resolve_graph() context = {"@vocab": "https://brickschema.org/schema/1.1/Brick#", "@language": "en"} return resolved.serialize(format='json-ld', context=context) @app.route('/resolve', methods=['GET']) def resolve(): global resolved make_resolve_graph() return jsonify({'size': len(resolved)}) def make_resolve_graph(): global resolved t0 = time.time() records = triplestore.to_records() graph, _ = resolver.resolve(records) t1 = time.time() print(f"Resolve took {t1-t0:.2f} seconds, had {len(graph)} triples") res = list(graph.query("SELECT ?s ?p ?o WHERE { \ ?s rdf:type ?type .\ { ?type rdfs:subClassOf+ brick:Equipment } \ UNION \ { ?type rdfs:subClassOf+ brick:System } \ UNION \ { ?type rdfs:subClassOf+ brick:Point } \ UNION \ { ?type rdfs:subClassOf+ brick:Location } \ ?s ?p ?o . \ FILTER (!isBlank(?o))}")) resolved = rdflib.Graph() # add everything to the graph for row in res: resolved.add(row) # loop through and remove entities = set((r[0] for r in res)) for ent in entities: eclasses = list(resolved.objects(predicate=rdflib.RDF.type, subject=ent)) print("\n\nlook at", ent) if len(eclasses) == 1: print("only", eclasses) continue for eclass in eclasses: # if eclass is not the most specific class, remove this triple print(f"SELECT ?subc WHERE {{ ?subc rdfs:subClassOf+ <{eclass}> }}") subclasses = [r[0] for r in graph.query(f"SELECT ?subc WHERE {{ ?subc rdfs:subClassOf+ <{eclass}> }}")] if len(subclasses) < 10: print(f"----{eclass}:\nsubclasses {subclasses}\nbase {eclasses}") else: print(f"----{eclass}:\nsubclasses {len(subclasses)}\nbase {eclasses}") if len(subclasses) == 0: print(f"class {eclass} is specific (is leaf class)") continue overlap = len(set(subclasses).intersection(set(eclasses))) if overlap > 2 or (overlap <= 2 and eclass not in subclasses): print(f"class {eclass} is not specific enough (overlap) {overlap}") resolved.remove((ent, rdflib.RDF.type, eclass)) else: print(f"class {eclass} is specific") resolved.serialize('resolved.ttl', format='ttl') if __name__ == '__main__': app.run(host='localhost', port='6483')
import numpy as np from mnist import load_mnist from PIL import Image def img_show(img): pil_img = Image.fromarray(np.uint8(img)) pil_img.show() (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True,normalize=False) img = x_train[0] label = t_train[0] print(label) # 5 print(img.shape) # (784,) img = img.reshape(28, 28) # 형상을 원래 이미지의 크기로 변형 print(img.shape) # (28, 28) img_show(img)
# Generated by Django 3.2 on 2021-08-22 14:50 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('authors', '0002_rename_authors_author'), ] operations = [ migrations.AddField( model_name='author', name='middlename', field=models.CharField(blank=True, max_length=50, null=True), ), ]
from services import promise_service promise_service.PromiseService()
#-*- coding:utf8 -*- # Copyright (c) 2020 barriery # Python release: 3.7.0 # Create time: 2020-04-04 from setuptools import setup, find_packages from grpc_tools import protoc # run bdware proto codegen protoc.main(( '', '-I.', '--python_out=.', '--grpc_python_out=.', './scheduletool/bdware/proto/schedule_service.proto', )) protoc.main(( '', '-I.', '--python_out=.', '--grpc_python_out=.', './scheduletool/buaacps/proto/entity.proto', )) protoc.main(( '', '-I.', '--python_out=.', '--grpc_python_out=.', './scheduletool/buaacps/proto/result.proto', )) setup( name='scheduling-tool', packages=find_packages(where='.'), version='0.0.0', description='the scheduling tool for CloudCPS.', keywords='scheduling CloudCPS', author='barriery', url='https://github.com/barrierye/SchedulingTool', install_requires=[ 'protobuf>=3.12.2', 'PyMySQL==0.9.3', 'sshtunnel==0.1.5', ], )
#! /usr/bin/python import pandas as pd import matplotlib.pyplot as plt import numpy as np plt.rc('axes', titlesize=16) # fontsize of the axes title plt.rc('axes', labelsize=16) # fontsize of the x and y labels plt.rc('xtick', labelsize=12) # fontsize of the tick labels plt.rc('ytick', labelsize=12) # fontsize of the tick labels plt.rc('legend', fontsize=12) # legend fontsize #plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title #hash_size=[16, 32, 64, 128, 256, 512] #freq =[200, 190, ] raw_data = {'graph': ['Youtube', 'LiveJournal', 'Pokec', 'RMAT-19-32', 'RMAT-21-32'], 'Hash-16K': [37.856, 32.94, 15.897, 46.927, 29.432], 'Hash-32K': [43.638, 36.454, 20.594, 59.586, 40.099], 'Hash-2x16K': [43.794,36.669, 20.644, 62.129, 41.238], 'Hash-64K': [50.023, 40.722, 27.455, 72.192, 52.049], 'Hash-4x16K': [50.239, 41.302, 27.557, 76.571, 54.652], #'Hash-4x16K-visited-1x16K': [53.349, 41.916, 29.166, 79.286, 56.035], 'Hash-128K': [56.734, 46.386, 37.499, 82.962, 64.302], #'Hash-4x16K-visited-4x16K': [56.146, 44.254, 34.104, 85.135, 66.008], 'Hash-8x16K': [56.802, 46.922, 37.686, 87.944, 68.526], 'Hash-256K': [63.3, 52.975, 51.363, 91.06, 75.758], 'Hash-16x16K': [62.457, 53.638, 51.729, 93.737, 81.091], 'Hash-512K': [67.604, 61.359, 68.47, 96.326, 85.312], 'Hash-32x16K': [66.269, 61.919, 68.336, 95.395, 90.359] } df = pd.DataFrame(raw_data, columns = ['graph', 'Hash-16K', 'Hash-32K', 'Hash-2x16K', 'Hash-64K', 'Hash-4x16K', 'Hash-128K', 'Hash-8x16K', 'Hash-256K', 'Hash-16x16K', 'Hash-512K', 'Hash-32x16K']) label=('Hash-16K', 'Hash-32K', 'Hash-2x16K', 'Hash-64K', 'Hash-4x16K', 'Hash-128K', 'Hash-8x16K', 'Hash-256K', 'Hash-16x16K', 'Hash-512K', 'Hash-32x16K') # Setting the positions and width for the bars pos = list(range(len(df['Hash-16K']))) width = 0.07 ecolor='k' lw=0.5 print pos #cmap = plt.get_cmap('jet') #colors = cmap(np.linspace(0, 1.0, len(label))) # Plotting the bars fig, ax = plt.subplots(figsize=(10,5)) # Create a bar with pre_score data, # in position pos, plt.bar(pos, #using df['pre_score'] data, df['Hash-16K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, hatch=4*'/', # with color color='chocolate', # with label the first value in first_name label=label[0]) # Create a bar with mid_score data, # in position pos + some width buffer, plt.bar([p + width for p in pos], #using df['mid_score'] data, df['Hash-32K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, hatch=4*'.', # with color color='r', # with label the second value in first_name label=label[1]) plt.bar([p + 2*width for p in pos], #using df['mid_score'] data, df['Hash-2x16K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, hatch=2*'o', # with color color='r', # with label the second value in first_name label=label[2]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*3 for p in pos], #using df['post_score'] data, df['Hash-64K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, # with color color='b', hatch=4*'x', # with label the third value in first_name label=label[3]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*4 for p in pos], #using df['post_score'] data, df['Hash-4x16K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, # with color color='b', hatch=4*'O', # with label the third value in first_name label=label[4]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*5 for p in pos], #using df['post_score'] data, df['Hash-128K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, # with color color='y', hatch=4*'-', # with label the third value in first_name label=label[5]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*6 for p in pos], #using df['post_score'] data, df['Hash-8x16K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, # with color color='y', hatch=4*'+', # with label the third value in first_name label=label[6]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*7 for p in pos], #using df['post_score'] data, df['Hash-256K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, # with color color='g', hatch=4*'\\', # with label the third value in first_name label=label[7]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*8 for p in pos], #using df['post_score'] data, df['Hash-16x16K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, # with color color='g', hatch=2*'*', # with label the third value in first_name label=label[8]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*9 for p in pos], #using df['post_score'] data, df['Hash-512K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, # with color color='c', hatch=2*'-', # with label the third value in first_name label=label[9]) # Create a bar with post_score data, # in position pos + some width buffer, plt.bar([p + width*10 for p in pos], #using df['post_score'] data, df['Hash-32x16K'], # of width width, linewidth = lw, edgecolor = ecolor, # with alpha 0.5 alpha=0.5, # with color color='c', hatch=2*'/', # with label the third value in first_name label=label[10]) # Set the y axis label ax.set_ylabel('Normalized Performance') # Set the chart's title #ax.set_title('Test Subject Scores') # Set the position of the x ticks ax.set_xticks([p + 5 * width for p in pos]) # Set the labels for the x ticks ax.set_xticklabels(df['graph']) vals=ax.get_yticks() ax.set_yticklabels(['{:3.2f}%'.format(x) for x in vals]) ax.grid(linewidth=0.5) ax.xaxis.grid(False) # Setting the x-axis and y-axis limits #plt.xlim(min(pos)-width, max(pos)+width*4) #plt.ylim([0.6, 1.5]) # Adding the legend and showing the plot ret = plt.legend(['Hash-16K', 'Hash-32K', 'Hash-2x16K', 'Hash-64K', 'Hash-4x16K', 'Hash-128K', 'Hash-8x16K', 'Hash-256K', 'Hash-16x16K', 'Hash-512K', 'Hash-32x16K'], loc='upper left', ncol=3) ret.get_frame().set_alpha(0.4) #bbox_to_anchor=(0.1, 0.3)) #plt.grid() #plt.show() plt.savefig("../hash-redundancy.pdf", bbox_inches='tight')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __version__ = '1.0.1' create_hw_action_element_query = """ INSERT INTO public.hw_action AS hwa (name, proto_field, meta_information, min_value, max_value, active, deleted, created_on, updated_on) VALUES ($1, $2, $3, $4, $5, $6, FALSE, now(), now()) RETURNING *; """
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.shortcuts import render, get_object_or_404, redirect from django.http import HttpResponse from django.utils import timezone from .models import * # Башкы бет def index(request): post = Post.objects.order_by('-id')[:4] main_post = Post.objects.all()[Post.objects.count()-1] slider = Slider.objects.order_by('-id')[:4] idiom = Idiom.objects.order_by('-id')[:5] studlife = Studlife.objects.order_by('-id')[:5] gallery = PhotoGallery.objects.order_by('-id')[:5] abiturient = Abiturient.objects.all()[Abiturient.objects.count()-1] structure = Structure.objects.only('str_category') studlife_main = Studlife.objects.all()[Studlife.objects.count()-1] ads_main = Ads.objects.all()[Ads.objects.count()-1] main_competition = Competition.objects.all()[Post.objects.count()-1] parms = { "posts" : post, "slider" : slider, "main_post" : main_post, "idiom" : idiom, "studlife" : studlife, "studlife_main" : studlife_main, "ads_main" : ads_main, "main_competition": main_competition, "gallery" : gallery, "abiturient" : abiturient, "structure" : structure, } return render(request, 'index.html', parms) def error_404(request): redirect('/') def structure_detail(request): structure = Structure.objects.all() str_education = Structure.objects.filter(str_category = 1) parms = { "structure" : structure, "str_detail" : str_detail, } return render(request, 'faqs.html', parms) # Структура 1 def structure_education(request): str_education = Structure.objects.filter(str_category = 1) parms = { "str_education" : str_education, } return render(request, 'structure_education.html', parms) # Структура 2 def structure_edc_plan(request): str_edc_plan = Structure.objects.filter(str_category = 2) parms = { "str_edc_plan" : str_edc_plan, } return render(request, 'structure_education_plan.html', parms) # Сруктура 3. Инсанга багытталган окутуу def structure_development(request): str_development = Structure.objects.filter(str_category = 3) parms = { "str_development" : str_development, } return render(request, 'structure_development.html', parms) # Структура 4. Кабыл алуу жана Билим берүү def structure_reception(request): str_reception = Structure.objects.filter(str_category = 4) parms = { "str_reception" : str_reception, } return render(request, 'structure_reception.html', parms) # Структура 5. Окутуучулук жана окутуучу-көмөкчү курам def structure_komokchu_kuram(request): str_komokchu_kuram = Structure.objects.filter(str_category = 5) parms = { "str_komokchu_kuram" : str_komokchu_kuram, } return render(request, 'structure_komokchu_kuram.html', parms) # Структура 6. Материалдык-техникалык база жана маалымат ресурстары def structure_tech_base(request): str_tech_base = Structure.objects.filter(str_category = 6) parms = { "str_tech_base" : str_tech_base, } return render(request, 'structure_tech_baza.html', parms) # Структура 7. Маалыматты башкаруу жана аны коомчулукка жеткирүү def structure_info_leading(request): str_info_leading = Structure.objects.filter(str_category = 7) parms = { "str_info_leading" : str_info_leading, } return render(request, 'structure_leading.html', parms) # Slider детально def slider_detail(request, key): slider = get_object_or_404(Slider, pk=key) parms = { "slider" : slider, } return render(request, 'slider_detail.html', parms) # Жаңылыктар детально def post_detail(request, key): post = get_object_or_404(Post, pk=key) parms = { "post" : post } return render(request, 'post-item-details.html', parms) # Студ кеңещ жаңылыктары детально def studlife_detail(request, key): studlife_detail = get_object_or_404(Studlife, pk = key) parms = { "studlife_detail" : studlife_detail, } return render(request, 'studlife_detail.html', parms) # Жарыя жана Кулактандыруу детально def ads_detail(request, key): ads_detail = get_object_or_404(Ads, pk = key) parms = { "ads_detail" : ads_detail, } return render(request, 'ads_detail.html', parms) # Конкурс детально def competition_detail(request, key): comp_detail = get_object_or_404(Competition, pk = key) parms = { "comp_detail" : comp_detail, } return render(request, 'competition.html', parms) # Абитуриент детально def abiturient_detail(request): abiturient_detail = get_object_or_404(Abiturient) parms = { "abiturient_detail" : abiturient_detail, } return render(request, 'abiturient.html', {}) # View all posts def post_list(request): all_post = Post.objects.all().order_by('-id') paginator = Paginator(all_post, 6) page_number = request.GET.get('page', 1) try: posts = paginator.page(page_number) except PageNotAnInteger: # If page is not an integer, deliver first page. posts = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. posts = paginator.page(paginator.num_pages) parms = { "posts" : posts, } return render(request, 'all_posts.html', parms) # Studlife list def studlife_list(request): all_post = Studlife.objects.all().order_by('-id') paginator = Paginator(all_post, 6) page_number = request.GET.get('page', 1) try: studlife_posts = paginator.page(page_number) except PageNotAnInteger: studlife_posts = paginator.page(1) except EmptyPage: studlife_posts = paginator.page(paginator.num_pages) parms = { "posts" : studlife_posts, } return render(request, 'studlife_list.html', parms) # Фотогалерея все посты def photo_gallery(request): all_photo = PhotoGallery.objects.all().order_by('-id') paginator = Paginator(all_photo, 10) page_number = request.GET.get('page', 1) try: photos = paginator.page(page_number) except PageNotAnInteger: # if page is not integer then put page 1 photos = paginator.page(1) except EmptyPage: # if page is out of range, deliver last page photos = paginator.page(paginator.num_pages) parms = { "pag_photo" : photos, } return render(request, 'gallery.html', parms) def structure(request, key): all_structure = Structure.objects.all() # Студ кеңеш жаңылыктары # def stud_life(request): # studlife = Post.objects.order_by('-id')[:5] # parms = { # "studlife" : studlife, # } # pa
import pytest from task_19.app.application import Application @pytest.fixture def app(request): app = Application() request.addfinalizer(app.quit) return app def test_can_add_product_to_cart(app): app.add_product_to_cart(product_name='Green Duck') app.remove_product_to_cart()
# coding=utf-8 """ 题目: 地上有个m行n列的方格。一个机器人从坐标(0,0)的格子开始移动, 它每一次可以向左、右、上、下移动一格,但不能进入行坐标和列坐标的数 位之和大于k的格子。例如,当k为18时,机器人能够进入方格(35,37), 因为3+5+3+7=18.但它不能进入方格(35,38),因为3+5+3+8=19. 请问该机器人能够达到多少格子? """ def moving_count_core(rows, columns, row, column, visited_matrix, k): def get_digit_sum(number): sum = 0 while number > 0: sum += number % 10 number /= 10 return sum def check_position(row, column, k): if get_digit_sum(row) + get_digit_sum(column) <= k: return True return False moving_count = 0 if ( 0 <= row < rows and 0 <= column < columns and not visited_matrix[row][column] and check_position(row, column, k) ): moving_count += 1 visited_matrix[row][column] = True moving_count += moving_count_core(rows, columns, row + 1, column, visited_matrix, k) + \ moving_count_core(rows, columns, row - 1, column, visited_matrix, k) + \ moving_count_core(rows, columns, row, column + 1, visited_matrix, k) + \ moving_count_core(rows, columns, row, column - 1, visited_matrix, k) return moving_count def moving_count(rows, columns, k): if rows < 1 and columns < 1 and k < 0: return 0 visited_matrix = [[False for _ in range(columns)] for _ in range(rows)] count = moving_count_core(rows, columns, 0, 0, visited_matrix, k) return count if __name__ == '__main__': print moving_count(3, 3, 3)
with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\06-extract-in_SJIS.csv") as file: count = 0 for line in file: data = line.split(",") if count <= 2: count = count + 1 continue print(f"{data[0]}:{data[1]}") # ここは無くても良い with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\20k1026-06-extract.txt",mode="a") as file: file.write(f"{data[0]},{data[1]}\n") #
# test_arithmetic.py # import code to be tested from sum import sum # Write a smoke test def test_smoke(): assert True == True # test the sum(a,b) function def test_sum(): assert sum(3,4) == 7 assert sum(5.5,12) == 17.5 assert sum(-3,35) == 32 assert sum(1234,5678) == 6912
from jsonschema import validate
import psutil import os for pid in psutil.pids(): try: proc = psutil.Process(pid) exeFile = os.path.basename(proc.exe()) threads = psutil._psutil_windows.proc_threads(pid) times = 0 for thread in threads: for timeUsed in thread[1:]: times += timeUsed print('='*40) print('Exe file:', os.path.basename(proc.exe())) print('Number of threads:', len(threads)) print('Time used:', times) except: pass
# Task: Implement cut command import argparse import glob import os import fileinput import sys class Cut(object): def __init__(self): parser = argparse.ArgumentParser() parser.add_argument("-d", help="Delimeter", type=str) parser.add_argument("-f", help="Fields", type=str) self._top_level_args = parser.parse_args() print('action=__init__ top_level_args=%s', self._top_level_args) def cut(self): # for name in glob.iglob("*", recursive=False): # print(name) for line in sys.stdin.readlines(): self._process(line) def _process(self, line): if self._top_level_args.d: line = self._split(line) if self._top_level_args.f: line = self._get_by_field(line) print(line) def _split(self, line): return line.split(self._top_level_args.d) def _get_by_field(self, arr): if self._top_level_args.find('-') is not -1: arr = arr.split('-') arr = arr[arr[0]:arr[1]] if __name__ == '__main__': Cut().cut()
import curses import screen_setup as ss import sys import os import time import subprocess import distro_obj as dis from downloader_class import Downloader import downloader_class as dc from subprocess import DEVNULL, STDOUT, run, Popen screen, red, yellow_background, blue, default, yellow_text, cyan_dots = ss.curses_setup() window_height, window_width = ss.window_res(screen) def odds_screen(): # Audio File for welcome screen "smash themed sound effects" with open(os.devnull, 'wb') as devnull: subprocess.Popen(['aplay', dis.file_directory + 'smash.wav'], stdout=devnull, stderr=subprocess.STDOUT) # Printing PLayer info while True: try: middle_height, middle_width = ss.window_res(screen) screen.addstr(5,middle_width - 10, "*press space to start*") screen.addstr(3,middle_width - 8, "Odds Display Page", curses.A_UNDERLINE) screen.border() # Y componet offset for players X ussed in percent_print array y_offset = 5 j = 4 x = 0 for p in dis.players_array: # Displaying the OS being downloaded for player screen.addstr(y_offset+4, 13, "Player"+str(x+1)+" |::| OS:", curses.A_BOLD) screen.addstr(y_offset+4, 26, str(dis.url_array_random_os[x]), curses.color_pair(cyan_dots)) screen.addstr(y_offset+10, 10, "-"*(window_width*2-20)) # Displaying File Name screen.addstr(y_offset+4, 45, "| File: ", curses.A_BOLD) screen.addstr(y_offset+4, 53, str(dis.five_files[x]),curses.color_pair(default)) # Displaying URL screen.addstr(y_offset+8, 24, '| URL:') screen.addstr(y_offset+8, 30, str(dis.url_array[x])) # Displaying Latency screen.addch(y_offset+6, 24, '|',curses.A_BOLD) screen.addstr(y_offset+6, 26, 'Latency: ',curses.A_BOLD) screen.addstr(y_offset+6, 35, p.latency + ' ms',curses.A_STANDOUT) # Displaying File Size screen.addstr(y_offset+6, 45, '| File Size: ', curses.A_BOLD) screen.addstr(y_offset+6, 58, '~ ' + dc.file_size_run[x] +' Mbs', curses.color_pair(cyan_dots)) # Offesting the next racer in y-direction and updating x for player percent_print array y_offset += 8 x += 1 # Space to continue to next screen key = screen.getkey() if(key == ' '): with open(os.devnull, 'wb') as devnull: subprocess.Popen(['aplay', dis.file_directory + 'smash-effect.wav'], stdout=devnull, stderr=subprocess.STDOUT) time.sleep(1.5) break screen.clear() screen.refresh() time.sleep(.1) except KeyboardInterrupt: curses.endwin() sys.exit("Keyboard, Interrupt Quitting...") # Print screen pre stats page def print_pre_intro(): try: middle_height, middle_width = ss.window_res(screen) screen.border() # Y componet offset for players X ussed in percent_print array y_offset = 11 j = 4 x = 0 screen.addstr(middle_height-8,middle_width-35, " __ __ _ _____ _____ _ ", curses.A_BOLD) screen.addstr(middle_height-7,middle_width-35, "/ / /\ \ \___| | ___ ___ _ __ ___ ___ /__ \___ /__ \ |__ ___ ", curses.A_BOLD) screen.addstr(middle_height-6,middle_width-35, "\ \/ \/ / _ \ |/ __/ _ \| '_ ` _ \ / _ \ / /\/ _ \ / /\/ '_ \ / _ \\", curses.A_BOLD) screen.addstr(middle_height-5,middle_width-35, " \ /\ / __/ | (_| (_) | | | | | | __/ / / | (_) | / / | | | | __/ ", curses.A_BOLD) screen.addstr(middle_height-4,middle_width-35, " \/ \/ \___|_|\___\___/|_| |_| |_|\___| \/ \___/ \/ |_| |_|\___| ", curses.A_BOLD) screen.addstr(middle_height-3,middle_width-45," _ ___ ___ _ _ ",curses.A_BOLD) screen.addstr(middle_height-2,middle_width-45," /\ /\___ | |_ / \___ __ _ / \_____ ___ __ | | ___ __ _ __| | ___ _ __ ",curses.A_BOLD) screen.addstr(middle_height-1,middle_width-45," / /_/ / _ \| __| / /\ / _ \ / _` | / /\ / _ \ \ /\ / / '_ \| |/ _ \ / _` |/ _` |/ _ \ '__| ",curses.A_BOLD) screen.addstr(middle_height,middle_width-45, "/ __ / (_) | |_ / /_// (_) | (_| | / /_// (_) \ V V /| | | | | (_) | (_| | (_| | __/ | ",curses.A_BOLD) screen.addstr(middle_height+1,middle_width-45,"\/ /_/ \___/ \__| /___,' \___/ \__, | /___,' \___/ \_/\_/ |_| |_|_|\___/ \__,_|\__,_|\___|_| ",curses.A_BOLD) screen.addstr(middle_height+2,middle_width-45," |___/ ", curses.A_BOLD) # Determining the file sizes for the stats page #temp = Downloader() #for i in range(0, 5, 1): # temp.get_size_runtime(dis.url_array[i]) # Audio File for welcome screen with open(os.devnull, 'wb') as devnull: subprocess.Popen(['aplay', dis.file_directory + 'welcome.wav'], stdout=devnull, stderr=subprocess.STDOUT) screen.refresh() except KeyboardInterrupt: curses.endwin() sys.exit("Keyboard, Interrupt Quitting...") screen.clear() try: # Determining the file sizes for the stats page temp = Downloader() for i in range(0, 5, 1): temp.get_size_runtime(dis.url_array[i]) # Determining he latancy to the host k = 0 for p in dis.players_array: p.server, directories = p.parse_server_info(str(dis.url_array[k])) p.determine_latancy() k += 1 except KeyboardInterrupt: curses.endwin() sys.exit("Keyboard Interupt Quitting...") # Print Hot dog litterally just prints the ascii hotdog dawggg def print_hotdog(): with open(os.devnull, 'wb') as devnull: subprocess.Popen(['aplay', dis.file_directory + 'countdown.wav'], stdout=devnull, stderr=subprocess.STDOUT) try: for k in range(3,0,-1): count = 0 while(count != 12): middle_height, middle_width = ss.window_res(screen) screen.clear() screen.border() screen.addstr(middle_height - 4, middle_width - 22 , '(>0.0)> -_-_COUNTDOWN HYPE_-_- <(0.0<)', curses.color_pair(yellow_background)) screen.addch(middle_height-2, middle_width - 1, str(k) , curses.color_pair(yellow_background)) screen.addstr(middle_height,middle_width-40+19, ',,,,,,,,,,,,' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+1, middle_width-40+15, '..,,,,,,,,,,,,,,,......' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+2, middle_width-40+13, '.........,,,,,,,*******,,,,,,,,...' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+2, middle_width-40+11, '.,,,,,,...' ,curses.color_pair(red)) screen.addstr(middle_height+3, middle_width-40+32, '.....,,,************,,,,,...' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+3, middle_width-40+9, '.*/(((((((((//*,,..... ' ,curses.color_pair(red)) screen.addstr(middle_height+4, middle_width-40+37, '.......,,,**********,,,,...' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+4, middle_width-40+8, '.,**//((((((((((((((((//**,,.' ,curses.color_pair(red)) screen.addstr(middle_height+5, middle_width-40+8, '.....,,**///((((((((((((((###(((/*,,.' ,curses.color_pair(red)) screen.addstr(middle_height+5, middle_width-40+45, '.....,,*********,,,...' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+6, middle_width-40+53, '..,,**///**,,,..' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+6, middle_width-40+8, '.,,,,.......,,***///(((((((((((((#(((((//*,,.' ,curses.color_pair(red)) screen.addstr(middle_height+7, middle_width-40+12, '****,,,.........,,**//((((((((((((((#####(//*,,,*' ,curses.color_pair(red)) screen.addstr(middle_height+7, middle_width-40+10, '.,' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+7, middle_width-40+61, '*//**,,..' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+8, middle_width-40+63, '***,,,.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+8, middle_width-40+15, '*********,,,,......,,***///((((((((##########(/*' ,curses.color_pair(red)) screen.addstr(middle_height+8, middle_width-40+11, '..,,' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+9, middle_width-40+19, '***/////******,,,,,....,,**//((((###########(/*,..' ,curses.color_pair(red)) screen.addstr(middle_height+9, middle_width-40+66, ',..' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+9, middle_width-40+13, '...,,*' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+10, middle_width-40+66, '.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+10, middle_width-40+26, '*****//////****,,,,,,,,***//((######(((*' ,curses.color_pair(red)) screen.addstr(middle_height+10, middle_width-40+16, '.....,,***' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+11, middle_width-40+34, '****///////*****,,***/(((#####(/.' ,curses.color_pair(red)) screen.addstr(middle_height+11, middle_width-40+21, '....,,,,,,****.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+12, middle_width-40+39, '***///////////////(((####(*.' ,curses.color_pair(red)) screen.addstr(middle_height+12, middle_width-40+26, '.....,,,,,***' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+13, middle_width-40+45, '****///(((//(((###(*.' ,curses.color_pair(red)) screen.addstr(middle_height+13, middle_width-40+66, '.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+13, middle_width-40+32, '....,,,,*****' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+14, middle_width-40+36, '........,,,***,***//' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+14, middle_width-40+54, '///*****,' ,curses.color_pair(red)) screen.addstr(middle_height+14, middle_width-40+63, ',.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+15, middle_width-40+39, '.....,,,,,,,****/***,,,,,.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+16, middle_width-40+44, '....,,,,,********,.' ,curses.color_pair(yellow_text)) screen.refresh() time.sleep(.1) count += 1 count1 = 0 while(count1 != 12): middle_height, middle_width = ss.window_res(screen) screen.clear() screen.border() screen.addstr(middle_height - 4, middle_width - 22 , '(>0.0)> -_-_COUNTDOWN HYPE_-_- <(0.0<)', curses.color_pair(yellow_background)) screen.addstr(middle_height -2, middle_width - 2, 'GO!', curses.color_pair(yellow_background)) screen.addstr(middle_height,middle_width-40+19, ',,,,,,,,,,,,' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+1, middle_width-40+15, '..,,,,,,,,,,,,,,,......' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+2, middle_width-40+13, '.........,,,,,,,*******,,,,,,,,...' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+2, middle_width-40+11, '.,,,,,,...' ,curses.color_pair(red)) screen.addstr(middle_height+3, middle_width-40+32, '.....,,,************,,,,,...' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+3, middle_width-40+9, '.*/(((((((((//*,,..... ' ,curses.color_pair(red)) screen.addstr(middle_height+4, middle_width-40+37, '.......,,,**********,,,,...' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+4, middle_width-40+8, '.,**//((((((((((((((((//**,,.' ,curses.color_pair(red)) screen.addstr(middle_height+5, middle_width-40+8, '.....,,**///((((((((((((((###(((/*,,.' ,curses.color_pair(red)) screen.addstr(middle_height+5, middle_width-40+45, '.....,,*********,,,...' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+6, middle_width-40+53, '..,,**///**,,,..' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+6, middle_width-40+8, '.,,,,.......,,***///(((((((((((((#(((((//*,,.' ,curses.color_pair(red)) screen.addstr(middle_height+7, middle_width-40+12, '****,,,.........,,**//((((((((((((((#####(//*,,,*' ,curses.color_pair(red)) screen.addstr(middle_height+7, middle_width-40+10, '.,' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+7, middle_width-40+61, '*//**,,..' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+8, middle_width-40+63, '***,,,.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+8, middle_width-40+15, '*********,,,,......,,***///((((((((##########(/*' ,curses.color_pair(red)) screen.addstr(middle_height+8, middle_width-40+11, '..,,' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+9, middle_width-40+19, '***/////******,,,,,....,,**//((((###########(/*,..' ,curses.color_pair(red)) screen.addstr(middle_height+9, middle_width-40+66, ',..' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+9, middle_width-40+13, '...,,*' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+10, middle_width-40+66, '.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+10, middle_width-40+26, '*****//////****,,,,,,,,***//((######(((*' ,curses.color_pair(red)) screen.addstr(middle_height+10, middle_width-40+16, '.....,,***' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+11, middle_width-40+34, '****///////*****,,***/(((#####(/.' ,curses.color_pair(red)) screen.addstr(middle_height+11, middle_width-40+21, '....,,,,,,****.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+12, middle_width-40+39, '***///////////////(((####(*.' ,curses.color_pair(red)) screen.addstr(middle_height+12, middle_width-40+26, '.....,,,,,***' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+13, middle_width-40+45, '****///(((//(((###(*.' ,curses.color_pair(red)) screen.addstr(middle_height+13, middle_width-40+66, '.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+13, middle_width-40+32, '....,,,,*****' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+14, middle_width-40+36, '........,,,***,***//' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+14, middle_width-40+54, '///*****,' ,curses.color_pair(red)) screen.addstr(middle_height+14, middle_width-40+63, ',.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+15, middle_width-40+39, '.....,,,,,,,****/***,,,,,.' ,curses.color_pair(yellow_text)) screen.addstr(middle_height+16, middle_width-40+44, '....,,,,,********,.' ,curses.color_pair(yellow_text)) screen.refresh() time.sleep(.1) count1 += 1 screen.clear() except KeyboardInterrupt: curses.endwin() sys.exit("Keyboard Interrupt, Quitting...") ''' screen.addstr(y, x, ' ..,,,,,,,,,,,,,,,......') screen.addstr(y+1, x, ' .........,,,,,,,*******,,,,,,,,...') screen.addstr(y+2, x, ' .,,,,,,... ......,,,,,**********,,,,,,,...') screen.addstr(y+3, x, ' .*/(((((((((//*,,..... .....,,,************,,,,,...') screen.addstr(y+4, x, ' .,**//((((((((((((((((//**,,........,,,**********,,,,...') screen.addstr(y+5, x, ' .....,,**///((((((((((((((###(((/*,,......,,*********,,,...') screen.addstr(y+6, x, ' .,,,,.......,,***///(((((((((((((#(((((//*,,...,,**///**,,,..') screen.addstr(y+7, x, ' .,****,,,.........,,**//((((((((((((((#####(//*,,,**//**,,..') screen.addstr(y+8, x, ' ..,,*********,,,,......,,***///((((((((##########(/****,,,.') screen.addstr(y+9, x, ' ...,,****/////******,,,,,....,,**//((((###########(/*,..') screen.addstr(y+10, x, ' .....,,********//////****,,,,,,,,***//((######(((*.') screen.addstr(y+11, x, ' ....,,,,,,*******///////*****,,***/(((#####(/.') screen.addstr(y+12, x, ' .....,,,,,******///////////////(((####(*.') screen.addstr(y+13, x, ' ....,,,,*********///(((//(((###(*..') screen.addstr(y+14, x, ' ........,,,***,***/////*****,,.') screen.addstr(y+15, x, ' .....,,,,,,,****/***,,,,,.') screen.addstr(y+16, x, ' ....,,,,,********,.') '''
import struct from Alphabet import Alphabet class StreamAlphabetic: def __init__(self, cipher_path): self.cipher_path = cipher_path f = open(cipher_path, "r") self.cipher = f.read() f.close() print(self.cipher) self.dict_replace = {} self.decrypted = "" def crack_cipher(self): # semplicemente tento con tutti i caratteri dell'alfabeto dict = {} for k in Alphabet.CHARACTERS: plain_text = self.autokey_decrypt(k, self.cipher) # essendo in inglese, mi aspetto che il testo in chiaro sia "vicino" alla distribuzione di occorenza # delle parole inglese, quindi il decriptato più vicino sarà quello corretto distance = self.get_proximity(plain_text) dict[distance] = {k, plain_text} dict_sorted = sorted(dict.items()) return next(iter(dict_sorted)) def autokey_encrypt(self, key, plain_text): # Ez(x) = x + z mod 26 k = key cipher = "" for plain_char in plain_text: pos_x = Alphabet.CHARACTERS.find(plain_char) pos_z = Alphabet.CHARACTERS.find(k) new_pos = (pos_x + pos_z) % 26 cipher += Alphabet.CHARACTERS[new_pos] k = plain_char return cipher def autokey_decrypt(self, key, cipher): # Dz(y) = y - z mod 26 k = key plain_text = "" for cipher_char in cipher: pos_y = Alphabet.CHARACTERS.find(cipher_char) pos_z = Alphabet.CHARACTERS.find(k) new_pos = (pos_y - pos_z) % 26 plain_char = Alphabet.CHARACTERS[new_pos] plain_text += plain_char k = plain_char return plain_text def get_proximity(self, ciphertext): dict = {} N = len(ciphertext) for letter in Alphabet.CHARACTERS: dict[letter] = ciphertext.count(letter) / N distance = 0 for letter in Alphabet.WORD_USAGE: usage = float(Alphabet.WORD_USAGE[letter]) usage_plain_text = float(dict[letter]) # usage_a = struct.pack('d', usage) # print(f"usage_a {usage_a}") # distance += self.xor_float(usage_plain_text, usage) distance += abs(usage_plain_text - usage) return distance def xor_float(self, f1, f2): f1 = int(''.join(hex(ord(e))[2:] for e in struct.pack('d', f1)), 16) f2 = int(''.join(hex(ord(e))[2:] for e in struct.pack('d', f2)), 16) xor = f1 ^ f2 xor = "{:016x}".format(xor) xor = ''.join(chr(int(xor[i:i + 2], 16)) for i in range(0, len(xor), 2)) return struct.unpack('d', xor)[0]
# File: gpa_calculator.py # Author: Joel Okpara # Date: 4/4/2016 # Section: 04 # E-mail: joelo1@umbc.edu # Description: YOUR DESCRIPTION GOES HERE AND HERE # YOUR DESCRIPTION CONTINUED SOME MORE CLASSES = 3 def convertLetter(letter): if letter == "A": return 4 elif letter == "B": return 3 elif letter == "C": return 2 elif letter == "D": return 1 else: return 0 def main(): gradesFile = open("grades.txt") gpaFile = open("results.txt","w") for l in gradesFile: fName, lName, g1, g2, g3 = l.strip().split(";") g1 = convertLetter(g1) g2 = convertLetter(g2) g3 = convertLetter(g3) gpa = (g1 + g2 + g3)/CLASSES gpa = format(gpa, ".2f") print(fName, lName+"'s", "GPA is:", gpa) gpaFile.write(fName+" "+lName+"'s GPA is: "+str(gpa)+"\n") gpaFile.close() main()
from django.conf.urls import url from django.urls import path from .views import ( Login_View, AccountEmailActivateView, Register_View, Logout_view, Home, profile ) app_name = 'accounts' urlpatterns = [ url(r'^email/confirm/(?P<key>[0-9A-Za-z]+)/$', AccountEmailActivateView.as_view(), name='email-activate'), url(r'^email/resend-activation/$', AccountEmailActivateView.as_view(), name='resend-activation'), path('logout',Logout_view,name='logout'), path('login',Login_View.as_view(),name='login'), path('home', Home.as_view(), name='home'), path('profile', profile, name='profile'), path('register',Register_View.as_view(),name='register'), ]
import numpy as np import matplotlib.cm as cm from matplotlib.pyplot import figure, show, rc # force square figure and square axes looks better for polar, IMO fig = figure(figsize=(8,8)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True) N = 6 #theta = np.arange(0.0, 2*np.pi, 2*np.pi/N) #radii = 10*np.random.rand(N) #width = np.pi/4*np.random.rand(N) #風向 theta = (40, 60, 80, 90, 180, 270) #風速(直径を決める) radii = (30, 30, 30, 30, 30, 30) #幅(ここで指定しないと縁になってしまう:) #width = np.pi/4*np.random.rand(N) width = np.pi/4*np.random.rand(N) #-- ここでグラフに描画  --# bars = ax.bar(theta, radii, width=width, bottom=0.0) #ここで順番に色をつけている for r,bar in zip(radii, bars): bar.set_facecolor( cm.jet(r/10.)) bar.set_alpha(0.5) show()
from sense_hat import SenseHat from time import sleep sense = SenseHat() RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) while True: sense.clear(BLUE) sleep(1)
from keras.models import Sequential, load_model from keras.layers import Dense, Dropout import pandas as pd #X = pd.read_csv('test_sample.csv') #X.drop('Unnamed: 0', axis=1, inplace=True) def predict_location(X, model='Model_01.h5'): # clean dataset for i in range(0,2048): X[str(i)] = pd.to_numeric(X[str(i)], errors='coerce') X.dropna(axis=1, inplace=True) if len(X) < 1: print('Error') return 'Error' X.drop('ID', axis=1, inplace=True) # load the model from disk loaded_model = load_model(model) #loaded_model.load_weights('NN_Models/test.hdf5') # Generate predictions predictions = loaded_model.predict(X) print('Predictions: ', predictions, sep='\n', end='\n') return predictions #predictions = predict_location(X)
from rest_framework import serializers from . import models from rest_framework.exceptions import ValidationError from django.conf import settings class NewsSerializer(serializers.ModelSerializer): class Meta: model = models.news fields = ['id','title', 'news_img', 'checked', 'catagory_name','catagorys','recommended'] extra_kwargs = { 'id':{'read_only':True} } class NewsDetailSerializer(serializers.ModelSerializer): class Meta: model = models.news_detail fields = ['id','title','author','article', 'date', 'news_detail_img','thumbup'] extra_kwargs = { 'id':{'read_only':True} } class CommentSerializer(serializers.ModelSerializer): # user class Meta: model = models.comment fields = ['username', 'content', 'cmt_time',] extra_kwargs = { 'id': {'read_only': True} } def _get_usericon(self,attrs): usericon = attrs.get('usericon') def _filter_bad_words(self,attrs): content = attrs.get('content') badword_list = ['sb','傻逼','弱智'] for badword in badword_list: if badword in content: content.replace(badword,'**') raise ValidationError('包含敏感词') return content # def create(self, validated_data): # comment = models.comment.save(**validated_data) # return comment
price_str = input("the price of apples:") weight_str = input("the weight of apples:") # 两个字符串之间是不能直接作乘法的,要进行类型转换 # money = price_str * weight_str price = float(price_str) weight = float(weight_str) money = price * weight print(money)
# Generated by Django 2.0.3 on 2019-07-24 22:21 from django.db import migrations class Migration(migrations.Migration): atomic = False dependencies = [ ('orders', '0002_auto_20190723_2048'), ] operations = [ migrations.RenameModel( old_name='Sizes', new_name='Size', ), migrations.RenameModel( old_name='Styles', new_name='Style', ), migrations.RenameModel( old_name='Toppings', new_name='Topping', ), ]
""" While I'm installing UKDP, I will use this mail to keep track of my actions. UKDP is located on spikings.com, a Ubuntu server. First I needed to install mercurial: > sudo apt-get install mercurial Then I went to download the dpjudge package. Following the instructions on the googlecode download page: > cd ~/site-packages > hg clone https://woelpad@code.google.com/p/dpjudge/ This however creates a dpjudge folder inside site-packages, which in turn contains the DPjudge root folder. That's one level to deep, so we erase the clone, go back to the home directory and tell clone to rename dpjudge to site-packages: > cd ~ > hg clone https://woelpad@code.google.com/p/dpjudge/ site-packages This would also be a good time to identify ourselves to Mercurial. Next time we commit something it will know who the author is. To do this, I added the following to site-packages/.hg/hgrc: --- [ui] username = ukdp <ukdp@uk.diplom.org> --- There, source downloaded and ready to edit. Now let's set up the judge proper. Let's start with the structure of the judge folder (that what is not part of the project). BTW, to distinguish the source and the judge, we use two environment variables: PKG pointing to the DPjudge source folder (e.g. ~/site-packages/DPjudge, meaning you unpacked the project in a folder called site-packages), and JDG pointing to the judge root folder (e.g. ~/usdp). After creating the judge root folder ($JDG), you put in there the following subfolders: bin games log web and the host.py file, an example of which I'm attaching here. Note that both dppd and smtpmail are set to None, and that tester is set to '@' to generate no mail. You will probably want to change that to None and set smtpmail correctly. In bin you can put a few simple scripts that call the python files in $PKG/bin. I'm attaching the most useful one, inspect, for reference. The rest all follow the same pattern. You will need to make these files executable. The games folder simply can start empty. It will be populated by game folders and a status file when you create games. That status file, which should be distinguished from the status file inside a particular game folder, lists every available game, its variant, its status (forming, active, completed, etc.) and optionally whether it's a private game. A few lines in the status file for DPFG (our test judge on floc.net): pawn standard active ae090802 payola completed private This status file is managed by Status.py. A typical game folder contains the following files: access press results status and a list of saved status files, one per processed phase that have the phase attached at the end (e.g. status.S1901M). These are used to rollback the game, should the necessity arise. Very useful for testing, as you can now rollback and roll forward any game, diffing the results to see if any code changes resulted in unexpected differences in the results file. I think the other file names are pretty self-explanatory: access stores all logins to the game page, press keeps a copy of all press between the players, results contains the judge results, and status has the current game status. It's the status file that you will be most concerned about. The log folder also starts out empty and will contain various error logs, which may be of some help, though I mostly ignore them. A useful one is smtperror.log in case you use smtpmail and are having mail problems. The web folder contains the following folders: images maps and the file index.cgi, attached here. You will need to replace the path given in there with the parent folder of $PKG. Note that images can simply be a soft link to $PKG/web/images. The maps folder will contain all your game maps (gif, ps, pdf). If all went well, you should now be able to run inspect. Run cd $JDG/bin inspect Now you are in a Python shell with the DPjudge preloaded. >>>> self = Status().createGame('john@doe.edu', 'testgame', 'test') should create a game called testgame with the given email address as master and test as password. [Continued] After following my own instructions spelled out above, I copied the inspect script in $JDG/bin to check, mail and reopen and altered the last word in each ("inspect") to the respective script names. I changed the path name in $JDG/web/index.cgi and then edited $JDG/host.py, replacing path names, urls and mail addresses as I saw fit. A .vimrc that ensures that tab stops are 4 spaces was added. Script files in $JDG/bin need to be executable, hence "chmod a+x *". inspect turned out to have dos line endings, messing up with the python shebang. Load in vi, type ":set ff=unix", then save and quit and repeat that for each script file. I ran inspect, and created the test game with the Status.createGame() call. So far, so good. Let's not forget to make $JDG/web/index.cgi executable, otherwise we may run into deep troubles. I added "umask 002" and PKG and JDG environment variables to .bashrc. I should have done that earlier, as now I had to go back and do a "chmod g+w" on all files and folders in $JDG (except $JDG/bin, I guess). This is so that apache, logging in in the same group as ukdp, will be able to run DPjudge code and modify games. I like to run multiple terminals in screen, so that I can look in source code, inspect games and visit game folders at the same time, simply by switching screens. For that I have made the following alias, and put it in .bash_profile: ~ alias attach='screen -D -RR $(screen -ls | grep \(..tached\) | awk "NR == 1 {print \$1}")' Every time I log in, I just have to enter "attach" and it will reattach to my screen session. Now, to install apache, do the same as for mercurial: > sudo apt-get install apache2 We'll also need mod-python, otherwise apache2 doesn't know how to execute python scripts: > sudo apt-get install libapache2-mod-python Next, we add apache_configuration.conf. I don't know how Alain made sure that this file got loaded. I simply wrote in httpd.conf to load the file. > sudo vi /etc/apache2/httpd.conf ~ Include /home/ukdp/apache_configuration.conf We now add the apache user to group ukdp: > sudo usermod -a -G ukdp www-data The "sudo apachectl graceful" given in the comments there must have applied to an older apache installation. Nowadays you restart apache2 as follows: > sudo /etc/init.d/apache2 restart To check that ukdp is now running on apache, we start python: > python >>> import urllib >>> response = urllib.urlopen('http://localhost/ukdp') >>> response.read() '\n\n\t\t\t<H3>DPjudge Error</H3><p class=bodycopy>\n\t\t\tPlease <a href=mailto:woelpad@gmail.com>e-mail the judgekeeper</a>\n\t\t\tand report how you got this error. Thank you.\n\t\t\t<!--\n\t\t\t\n File "/home/ukdp/site-packages/DPjudge/web/index.py", line 5, in handle\n try: DPjudge.Page(form)\n File "/home/ukdp/site-packages/DPjudge/Page.py", line 51, in __init__\n if self.include(): raise SystemExit\n File "/home/ukdp/site-packages/DPjudge/Page.py", line 90, in include\n .replace(\'<DPPD>\',\thost.dppdURL)\nTraceback (most recent call last):\n File "/home/ukdp/site-packages/DPjudge/web/index.py", line 5, in handle\n try: DPjudge.Page(form)\n File "/home/ukdp/site-packages/DPjudge/Page.py", line 51, in __init__\n if self.include(): raise SystemExit\n File "/home/ukdp/site-packages/DPjudge/Page.py", line 90, in include\n .replace(\'<DPPD>\',\thost.dppdURL)\nTypeError: expected a character buffer object\n-->\n' >>> Right, it doesn't like it that we didn't assign a dppd in host.py. Let's change that: > vi $JDG/host.py ~ dppd = 'dppd@diplom.org' ~ dppdURL = 'http://www.floc.net/dpjudge?variant=dppd' After this little upheaval, we repeat the urllib sequence and, behold, we get the DPjudge home page in the response! Let's see if the outside world can already see something. We enter the site's address in a browser (http://mediacentre.spikings.com/ukdp), but alas we get a "Page not found" error. Time to contact the site owner. [Continued] The site owner, Peter Spikings, wants the ukdp/web folder as the DocumentRoot, so that you simply have to enter the host address to get the DPjudge home page, e.g. "http://ukdp.spikings.com/" or "http://uk.diplom.org" which was the host name 10 years ago. To accomplish that, we need to add a new site to apache, and can as well disable the default one, which is staring at /var/www. We start by copying default to ukdp: > sudo cp /etc/apache2/sites-available/default /etc/apache2/sites-available/ukdp Then we open that file in vi and start messing around, integrating all what was in apache_configuration.conf, so that we can throw the latter out afterwards. Incidentally we let ErrorLog and CustomLog point to $JDG/log/apache2 and create that folder. We then disable default and enable ukdp: > sudo a2dissite default & a2ensite ukdp > sudo /etc/init.d/apache2 reload > sudo /etc/init.d/apache2 restart Checking with python whether we can read localhost: >>> response = urllib.urlopen('http://localhost/') >>> response.read() '\n<html>\n<head>\n<title>UKDP: The DPjudge</title>\n\n<style type="text/css">...' That works. Now we can go and delete apache_configuration.conf and make /etc/apache2/httpd.conf empty again. While we're waiting for Peter and Millis (site owner of diplom.org) to create the new host name, let's check and see if we can set up and run a test game proper. So far we only created the game testing (see previous mail), but it's still in its forming state, with no players signed up yet. We don't need players though, we can use dummies. The fastest way is to simply edit the status file: > vi $JDG/games/testing/status --- MORPH DUMMY AUSTRIA ENGLAND FRANCE GERMANY ITALY RUSSIA TURKEY RULE HIDE_DUMMIES --- The rule added will ensure that we can log in to any dummy's game page. Note: If you want to start with less players (at least 2), simply omit the rest on the DUMMY line. Now let's start the game: > cd $JDG/bin > inspect testing >>> self.begin() sh: /home/ukdp/site-packages/DPjudge/tools/ps2pdf: not found >>> self.phase u'SPRING 1901 MOVEMENT' It's complaining that ps2pdf is not installed, but it did move to S1901M. Indeed, we did not put any links in $PKG/tools for all the external resources needed to run the judge. Now, there's a __init__.py file there that is just a Readme file telling you what should be installed here. Since it didn't list the actual packages, I'm listing them here and adding that to the __init__ file. These are: ... gs -> /usr/bin/gs ... ps2pdf -> /usr/bin/ps2pdf to convert from ps to pdf (package: ghostscript). ... psselect -> /usr/bin/psselect to extract a page from a ps file (package: psutils). ... pnmcrop -> /usr/bin/pnmcrop ... pnmcut -> /usr/bin/pnmcut ... pnmflip -> /usr/bin/pnmflip ... ppmtogif -> /usr/bin/ppmtogif to convert from ps to gif (package: netpbm). ... sendmail -> /usr/sbin/sendmail to send mail, obviously (package: sendmail). Only used if you want to use the smtp service. Note that Ubuntu users are quite vocal in saying you are better off installing the more secure Postfix frontend. ... zone.tab -> /usr/share/zoneinfo/zone.tab to convert time zones (on system installation). You can also specify this file directly into your host.py file, making this link superfluous. Afer installing all 4 packages ("sudo apt-get install <package>"), try to remake the maps: > inspect testing 'self.view.makeMaps()' No error gets reported and checking $JDG/web/maps, we see a ps, pdf and gif file. All well then. Note that by providing a second parameter, embraced in quotes to not mess with the shell substitutions, inspect executes that command and exits. The self here points to the Game instance. More info in $PKG/bin/inspect.py. Let us now set up the smtp service. > sudo sendmailconfig Perhaps not really a necessity, since I'm just accepting the defaults. Anyway, let's test sendmail by sending a simple test mail to myself. > echo "sendmail test" | sendmail woelpad@gmail.com Success, I'm getting mail. The only problem is, it's coming from ukdp@ukdp.diplom.org, not the intended ukdp@uk.diplom.org. By repeating the same command as above, but adding the -v option (verbose), I come to know that the EHLO is ukdp.diplom.org. I ask Peter to change that, but even then the from-address is still the same. Turns out it's written like that in the /etc/hosts file. > cat /etc/hosts --- 127.0.0.1 localhost 192.168.10.2 ukdp.diplom.org ukdp --- A quick edit to change the first ukdp to uk, a new test mail, and yes, we're ok now. Big news from Millis: The site is up. I can access the site, but any mail I send gets swallowed whole. Oh right, forgot to install procmail. A "dpkg -s procmail" tells me procmail is already installed (comes with sendmail, I guess). Just need to copy .procmailrc to the home directory, and replace all instances of dpjudge and usdp with ukdp, as unlike dpjudge/dpforge ukdp is both the user name and the judge name. Well, that doesn't do anything yet. Let's try to dry run procmail. First I create a message with the following contents: --- From: me@spikings.com To: ukdp@uk.diplom.org (self test) signon mtesting test broadcast Broadcasting... endpress signoff --- Then I run that through .procmailrc > procmail VERBOSE=on DEFAULT=/dev/null .procmailrc < test.msg procmail: [19776] Sun Feb 19 21:18:38 2012 procmail: Assigning "DEFAULT=/dev/null" procmail: Rcfile: "/home/ukdp/.procmailrc" procmail: Assigning "MAILDIR=/home/ukdp" procmail: Assigning "SHELL=/bin/sh" procmail: Assigning "PATH=.:/home/ukdp/bin:/bin:/usr/bin:/usr/local/bin:/usr/sbin:/usr/lib" procmail: Assigning "LOGFILE=/home/ukdp/ukdp/log/procmail.log" procmail: Opening "/home/ukdp/ukdp/log/procmail.log" And then nothing. After waiting for a while I interrupt the process and inspect the log-file given above. > tail /home/ukdp/ukdp/log/procmail.log procmail: Locking "ukdp" procmail: [19776] Sun Feb 19 21:19:26 2012 procmail: Locking "ukdp" procmail: [19776] Sun Feb 19 21:19:34 2012 procmail: Locking "ukdp" procmail: Terminating prematurely whilst waiting for lockfile "" Folder: **Bounced** 0 procmail: Unlocking "/home/ukdp/ukdp/log/.procmail.lock" After 8 seconds it seems to respawn the process, but gets stuck because the previous lock was not released yet. What's the command it's executing like? Looking in .procmailrc, I see: --- :0 H: ukdp * ^To:.*ukdp@.* |(cd /home/ukdp/ukdp; /user/usr/bin/timelimit -t 300 bin/mail > output.email 2>&1) ---- The first line indicates that this is the ukdp lock, the second line the matching condition (any message destined for user ukdp), the third line what to execute. Wait, ukdp, is that not the name of the lock file? That gets written where? In the home dir? But there's already a dir with that name! (In all fairness, Peter tipped me off on this one.) Let's add ".lock" to the lock file name and dump it in the /tmp folder for good measure: ":0 H: /tmp/ukdp.lock". One more surprise, "timelimit"?! Pretty evocative, but is it installed and what kind of a path is that: /user/usr/bin? Must be some floc peculiarity. A new apt-get to install timelimit and check that it's in /usr/bin, a small edit on .procmailrc, a new dry run, and bingo. Next I send a similar message from my gmail account, and check that I get a reply. Looks like procmail is working. Now, the dpforge .procmailrc file also had a dppd lock for any mails sent to dppd@..., with exactly the same timelimit command and its body. Being efficient, I combine that into one regexp: "* ^To:.*(ukdp|dppd)@", because I don't see the need for a second lock file. And as Peter proposes to add a judge alias as well, I expand that to "* ^To:.*(ukdp|judge|dppd)@", and add these aliases to /etc/aliases: > sudo vi /etc/aliases --- # Other aliases judge: ukdp dppd: ukdp --- > sudo newaliases A test message to judge@uk.diplom.org and dppd@diplom.org confirms that this works. DPPD? Now that you mention it, we're still using the USDP DPPD. Setting up our own database will be for some other time. Note: the .procmailrc in attachment is the modified one for ukdp. One more thing to do now: Start the cron job. > crontab -e --- UKDP=/home/ukdp/ukdp */20 * * * * $UKDP/bin/check > $UKDP/log/check.log 2> $UKDP/log/check.err --- Does it work? Sure it does, because a few minutes later (note that it runs every 20 minutes) we receive a late notice for game testing! That's enough for now. Let's go out and announce the resurrection of UKDP to the rest of the world. [Continued] Problems that came up later. Some mail programs insist on being able to do reverse DNS lookup. To accomplish that, you need to add PTR records. Check the net on how to do that. Basically you have to request your IP-provider to set it up for you. Also, IPv4 and IPv6 use different IP addresses (obviously), so if your site has both, you need Reverse DNS for both. If not, you might end up with "DNS: Service unavailable (dns=5.0.0) in your /var/log/mail.log file. In such cases your mail gets bounced, which gets logged in the daemon.log file we configured previously (that's $JDG/log/daemon.log if you configured it as written here). Check that too for better diagnostics. After a few players joined the first games, I started receiving "Suspicious activity" emails. This was not because they were on the same network, but because they, and even I, the master, were routed through home.spikings.com when accessing the web page at uk.diplom.org. I decided to create a publicDomains host parameter, move the problematic .aol.com domain names in there, and add .spikings.com to that list. Not entirely satisfactory, so let's look for a real solution. Game.logAccess() gets its host url from an environment variable called REMOTE_ADDR that Apache fills in. But with a reverse proxy the host gets in the way. To resolve that we need to install a package called mod_rpaf. > sudo apt-get install libapache2-mod-rpaf > sudo /etc/init.d/apache2 restart Log in to a game as GM and check the access file (link at the bottom of the page, above the Submit button). Still home.spikings.com. Hmm. Right, we need to add the host ip to rpaf's config parameters. To find the ip, we comment out the socket.gethostbyaddr() lines in Game.logAccess() and log in to the web page again. The top of the access log (or bottom of the access file) now shows: --- Wed Jun 20 01:09:14 2012 2a01:348:1f1:10::1 MASTER !-MASTER-! --- Let's add that ip address (never mind that it's in IPv6 format) to the proxy list of rpaf: > sudo vi /etc/apache2/mods-enabled/rpaf.conf --- <IfModule mod_rpaf.c> RPAFenable On RPAFsethostname On RPAFproxy_ips 127.0.0.1 2a01:348:1f1:10::1 </IfModule> --- Restart apache, log in to a game page, check the access log, and presto: The correct host name appears. New feature: A judgekeeper password to rule all the others. In host.py we now have the option to fill in judgePassword, so that we don't have to look up the Master password to log in to a game on the web or by email, or even to log in to the DPPD to change someone's email address. To limit the damage if ever this password gets leaked, we can use a little script called scramble to periodically overwrite this password in the host file. Just add this to your crontab: --- 0 0 1 * * $UKDP/bin/scramble -o > $UKDP/log/scramble.log --- The log file will contain a copy of the password. You can check the date to confirm that the script is running correctly. It should run on midnight on the first day of the month, but you can adapt the parameters to update the password more or less often. Note that you don't have to assign a judgekeeper password. Omitting the judgePassword line or leaving the value blank in the host file will disable this feature. Just make sure that the scramble script is removed from crontab as well. Speaking of crontab, here's another improvement I doctored out. The check script checks deadlines every 20 minutes, but once in a while, every day at midnight actually, sends reminders to the GMs of any waiting and forming games. I split up this functionality, so that I can more easily decide how often to send out these reminders. Let's say I want them to be sent out once a week, on Sunday night. I direct crontab to run 2 check jobs, one for the active games (indicated by -a), and one for reminders (-r). > crontab -e --- UKDP=/home/ukdp/ukdp 0 0 * * 0 $UKDP/bin/check -t -r > $UKDP/log/remind.log 2> $UKDP/log/remind.err */20 * * * * $UKDP/bin/check -t -a > $UKDP/log/check.log 2> $UKDP/log/check.err --- Notice that I also chose different log files. If no parameters are fed to the check script, it would operate as before. The observant reader may have noticed a second flag added to these commands, -t. This flag guards against processing games immediately after a server outage when the server comes back online. This is to prevent that games that went past their deadline during the outage would process before players got the chance to enter orders. It is the judgekeeper's task to extend deadlines first, and then run the check script once without the -t flag to restart the cron jobs. Remote backup. After floc.net, the server hosting USDP and DPFG, crashed and stayed down for a couple of days, I decided to back up the game data and even the program data on this server. And vice versa to back up UKDP on floc.net. I chose rsync as the backup application and wrote a script, rbackup, to perform the task. Inside rbackup, which is stored in ~/bin, you need to set USR to the current user (ukdp), JDG to the name of the judge (ukdp), choose the target directory in TRG (start with a local directory first), select whether to do a dry-run with DRY (recommended for the first few runs) and enter the correct port number in the -p option inside the --rsh option in the actual rsync command. Two other files are important, both in the same bin directory. The first is rbackup.files, which lists the directories to include, relative to the user home dir, and rbackup.exclude, which lists the file patterns to exclude. I chose to exclude vim swap files (of course) and game maps, because of their size and because they're relatively easy to regenerate by invoking inspect: > inspect "<game>.makeMaps()" If during dry-run you spot other files that are too big and don't need to be backed up, like tar files and older log files, consider to exclude them as well, especially if your remote server file space is limited. This is the case for the ukdp server, and for that reason I have opted to back up only running games from USDP, not completed or terminated games, using a replicate script that I won't explain here. To connect to a remote server, rsync, or actually ssh, will ask for a password. To avoid that we need to make a public-private key pair and send the public key to the remote server. The following commands will do the trick: > ssh-keygen Generates the key pair. Press Enter on every question. > ssh-copy-id -i ~/.ssh/id_rsa.pub "mario@floc.net -p 4" Copies the public key to my home dir on floc.net through port 4. It will ask for your password one last time, after that you won't need to enter it anymore. Now you're set. Do a couple of dry-runs, which will mimic the operation but not actually copy any file or directory, and when satisfied execute the first backup. Are we finished now? No. The last step is to add it to crontab. To reduce the risk of losing a processed turn, the best timing is to back up right after the check script finishes. This can be done by appending the rbackup call to the check entry, separated by a semicolon. > crontab -e --- ... BIN=/home/ukdp/bin ... */20 * * * * $UKDP/bin/check -t -a > $UKDP/log/check.log 2> $UKDP/log/check.err; $BIN/rbackup >> $UKDP/log/rbackup.log 2>> $UKDP/log/rbackup.err ... --- Displaying an icon in the browser tab title. Ever noticed that USDP has a little image of a cannon displayed in front of the browser tab title? It's called a favicon. There are two ways of adding it to your web pages, either by adding a link to the header of every web page (flexible, but too cumbersome), or by putting an image file called favicon.ioo in the document root folder of your web site. For reference the link in the header would look like this: <link rel="shortcut icon" href="http://uk.diplom.org/favicon.ico"> Add something like "?v=2" if ever you made a change to it and want to force your browser to reload it. We go with the second method. Where's our document root? That's determined by how you configured Apache. The default Apache root folder is /var/www, which is the one used on USDP. But you need to be root to put a file there. (On UKDP we become root by entering "sudo -s" with the same password as for the ukdp user.) On UKDP however we set it up better. If we browse to /etc/apache2/sites-available, there's a ukdp configuration file there that sets DocumentRoot to /home/ukdp/ukdp/web. So we just need to copy favicon.ico, conveniently located in $PKG/web, to that location. Does this immediately reflect in our browser? Unfortunately no, and clearing the browser's cache also doesn't seem to work. What we need to do is to visit the icon itself and do a refresh. In your browser enter "http://uk.diplom.org/favicon.ico", load the image and then press Ctrl+F5. It immediately changes the icons in the title bar for all your UKDP tabs. --- After upgrading to Python 2.7 I could no longer run hg. An error message appeared telling that mercurial was not in the install path or PYTHONPATH. I did a find to confirm that mercurial was only located under python2.6. To fix this I purged and reinstalled mercurial. > sudo apt-get purge --remove mercurial > sudo apt-get autoremove > sudo apt-get install mercurial This did not place a mercurial folder or link in python2.7, but somehow the problem was solved. > find / -name mercurial -print ... /usr/lib/pyshared/python2.6/mercurial /usr/lib/pymodules/python2.6/mercurial ... --- Again a chilling surprise today. On visiting http://uk.diplom.org/maps I could see a directory list of all game maps. Imagine playing a blind game and knowing that you can simply download the maps for every player, including the master. That's simply not acceptable! After searching on the Internet and comparing with USDP, I browsed through this setup file and noticed the extra configuration files I had created, in particular /etc/apache2/sites-available/ukdp. I knew that Options Indexes was the likely culprit, and finally I found a file that had it set. I put a "-" in front of Indexes and restarted the apache2 service. Now this server too displays a "No permission" error page. --- Installing MySQL It couldn't last forever. We'll crack the nut that's called MySQL. I want to set up a database locally so that I can test the DPPD on this machine instead of having to jump to USDP. First order of the day: Installing MySQL itself. > sudo apt-get install mysql-server libapache2-mod-auth-mysql I chose my login password for the root password. I removed obsolete packages afterwards with: > sudo apt-get autoremove Install and set up: > sudo mysql_install_db > sudo /usr/bin/mysql_secure_installation Answering y on all questions, as this is kind of a production system. Next comes installing MySQLdb for python. A lot is mentioned about pip or easy_istall, but let's see if simply doing an apt-get works. > sudo apt-get install python-mysqldb This installed it alright, but not in python2.7, but 2.6. After lots of frustrations I managed to install it in the right place using easy_install-2.7 > wget --no-check-certificate https://bootstrap.pypa.io/ez_setup.py > sudo python ez_setup.py --insecure > sudo easy_install-2.7 MySQL-python I then launch python and import the library, but that generates an annoying warning. >>> import MySQLdb /usr/local/lib/python2.7/site-packages/setuptools-14.0-py2.7.egg/pkg_resources/__init__.py:1224: UserWarning: /home/ukdp/.python-eggs is writable by group/others and vulnerable to attack when used with get_resource_filename. Consider a more secure location (set with .set_extraction_path or the PYTHON_EGG_CACHE environment variable). Turns out to be simply a file permission problem. > chmod go-wx ~/.python-eggs gets rid of it. We need to create a new user. We could go for ukdp, but to mimic USDP and DPFG where the users are dpjudge and dpforge resp., let's go for dplodge. This will also be an e-mail alias for ukdp. To make an e-mail alias, first we edit /etc/aliases, adding: > sudo vi /etc/aliases ... dplodge: ukdp ... To make this active this should be written to /etc/mail/aliases.db, which can be achieved by: > sudo newaliases Let's not forget to add the alias to .procmailrc. > vi ~/.procmailrc ... * ^To:.*(ukdp|dplodge|judge|dppd)@ ... And when all that is done, we can send a message to dplodge@uk.diplom.org (not dplodge@spikings.com) with "List" in the body and get a list of all openings on UKDP. Another great feat. Let's create this dplodge user in MySQL. First we need a password, and for that we can use our scramble tool. > $JDG/bin/scramble -n The result: ********. Because MySQL passwords are case sensitive, we randomly uppercase a few letters, then use that as our password. > mysql -u root -p mysql> create user 'dplodge'@'localhost' identified by '********'; Good, but what privileges do we grant this user? Let's check on USDP. > mysql -u dpjudge -p mysql> show grants for 'dpjudge'@'localhost'; +------------------------------------------------------------------------------+ | Grants for dpjudge@localhost | +------------------------------------------------------------------------------+ | GRANT USAGE ON *.* TO 'dpjudge'@'localhost' IDENTIFIED BY PASSWORD '*<hash>' | | GRANT ALL PRIVILEGES ON `dpjudge`.* TO 'dpjudge'@'localhost' | +------------------------------------------------------------------------------+ Well then, the first grant comes automatically when creating the user. It's then those same privileges on the not yet existing dplodge database that we grant to our new user dplodge. Let's now create the database. Or rather, as with dpforge, we clone the latest backup file of dpjudge, which conveniently is already on this server thanks to the offices of rsynch. Mimicking the backup script from dpforge, we execute: > mysql -udplodge -p******** dplodge < ~/rbackup/dpforge/backup/dpjudge.db.bak But this results in an error "Unknown database dplodge". Ok, we need to create it. > mysql -u dplodge -p mysql> create database dplodge; We quit and run the former command again, and presto, our database is filled. If you don't believe me, try the following mysql commands: mysql> show tables in dplodge; mysql> select count(*) from dplodge.Game; We want to sync this database often, just like we did for dpforge. For that reason we adapt the backup script and add it to crontab. Let's say we do this daily at 5 past midnight: > crontab -e 5 0 * * * $BIN/backup Of course this would mean that every night whatever updates were made to the dplodge database for games running on UKDP would go lost. To prevent that from happening we extend the host.dppdURL parameter with a second address pointing to the USDP dppd, so that both databases get updated at the same time. """
#!/usr/bin/python3 def ChangeInt(a): a = 10 b = 2 ChangeInt(b) print(b) # 结果是2
#plot_ts_hydrographs.py #python script to loop time series files in a folder and plot hydrographs #Author: Cody Moser, PhD #cody.lee.moser@noaa.gov #NOAA/NWS/MARFC print "Start Script" import os import matplotlib import matplotlib.pyplot as plt import numpy import pylab from pylab import * from matplotlib.ticker import AutoMinorLocator csv_read = open(r'P:\NWS\Python\Plot_Runoff\SERFC\SERFC_Runoff.csv', 'r') data = numpy.genfromtxt(csv_read, delimiter=',', skip_header=1, skip_footer=0, names=['WY', 'COCF1', 'FGOG1', 'FLRV2', 'PLMF1', 'SRDV2', 'WORF1']) x = data['WY'] COCF1 = data['COCF1'] FGOG1 = data['FGOG1'] FLRV2 = data['FLRV2'] PLMF1 = data['PLMF1'] SRDV2 = data['SRDV2'] WORF1 = data['WORF1'] #eq = r'$\mathdefault{N=0.8A^{0.3}}$' fig = plt.figure() ax1 = plt.subplot(111) ax1.plot(x,COCF1, color='black', markersize=4, linestyle='-', linewidth=2, label='COCF1', zorder=5) ax1.plot(x,FGOG1, color='red', markersize=4, linestyle='-', linewidth=2, label='FGOG1', zorder=5) ax1.plot(x,FLRV2, color='blue', markersize=4, linestyle='-', linewidth=2, label='FLRV2', zorder=5) ax1.plot(x,PLMF1, color='green', markersize=4, linestyle='-', linewidth=2, label='PLMF1', zorder=5) ax1.plot(x,SRDV2, color='purple', markersize=4, linestyle='-', linewidth=2, label='SRDV2', zorder=5) ax1.plot(x,WORF1, color='yellow', markersize=4, linestyle='-', linewidth=2, label='WORF1', zorder=5) ax1.minorticks_on() ax1.grid(which='major', axis='both', color='black', linestyle='-', zorder=3) ax1.grid(which='minor', axis='both', color='grey', linestyle='-', zorder=3) ax1.set_xlabel('Water Year', fontsize='8') ax1.set_ylabel('Runoff (inches)', fontsize='8') ax1.tick_params(labelsize=8) ax1.set_xlim([1960,2015]) ax1.set_ylim([0,28]) ax1.legend(loc='upper right', prop={'size':8}) ax1.xaxis.set_minor_locator(AutoMinorLocator(5)) plt.title('SERFC Water Year Runoff', fontsize='8') plt.ioff() figname = r'P:\\NWS\\Python\\Plot_Runoff\\SERFC\\SERFC_Runoff.png' plt.savefig(figname, dpi=300) #plt.show() csv_read.close() print "End Script" raw_input("Press Enter to continue...")
from pymongo import MongoClient class MongoClientConnect(object): def __init__(self, target="", name=""): """ Returens Mongo Connect Status """ pass
import tensorrt as trt onnx_file_name = 'bert.onnx' tensorrt_file_name = 'bert.plan' fp16_mode = True # int8_mode = True TRT_LOGGER = trt.Logger(trt.Logger.WARNING) EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) builder = trt.Builder(TRT_LOGGER) network = builder.create_network(EXPLICIT_BATCH) parser = trt.OnnxParser(network, TRT_LOGGER) builder.max_workspace_size = (1 << 30) builder.fp16_mode = fp16_mode # builder.int8_mode = int8_mode with open(onnx_file_name, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print (parser.get_error(error)) # for int8 mode # print(network.num_layers, network.num_inputs , network.num_outputs) # for layer_index in range(network.num_layers): # layer = network[layer_index] # print(layer.name) # tensor = layer.get_output(0) # print(tensor.name) # tensor.dynamic_range = (0, 255) # input_tensor = layer.get_input(0) # print(input_tensor) # input_tensor.dynamic_range = (0, 255) engine = builder.build_cuda_engine(network) buf = engine.serialize() with open(tensorrt_file_name, 'wb') as f: f.write(buf) print('done, trt model')
#python imports import sys import os import subprocess from termcolor import colored #programmer generated imports from logger import logger from fileio import fileio ''' ***BEGIN DESCRIPTION*** Type: Office - Description: Uses oledump to extract any ole object from a Microsoft Office file. ***END DESCRIPTION*** ''' def POE(POE): if (POE.logging == True): LOG = logger() newlogentry = '' oledump_dump = '' oledump_output_data = '' oledump_dump_data = '' output = POE.logdir + 'oledump.txt' if (POE.logging == True): newlogentry = 'Module: oledump' LOG.WriteStrongLog(POE.logdir, POE.targetfilename, newlogentry) FI = fileio() print (colored('\r\n[*] Running oledump against: ' + POE.target, 'white', attrs=['bold'])) subproc = subprocess.Popen('python3 /opt/oledump/oledump.py ' + POE.target + ' > ' + output, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for oledump_data in subproc.stdout.readlines(): oledump_output_data += str(oledump_data) if (POE.debug == True): print (oledump_data) print (colored('[*] oledump data has been generated to file here: ', 'green') + colored(output, 'blue', attrs=['bold'])) if ((POE.logging == True) and (POE.nolinksummary == False)): newlogentry = 'oledump file has been generated to file here: <a href=\"' + output + '\"> oledump Output </a>' LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry) return 0
import os import fileinput import nltk class TextProcesser: def __init__(self): self.get_docs_from_bd() def get_docs_from_bd(self): self.documents_paths = [] path = "/home/mateus/Documents/TESIIII/episodes" for i in range(1, 7): season_path = path + "/season_" + str(i) for filename in os.listdir(season_path): self.documents_paths.append(season_path + "/" + filename) def tag_document(self, doc_path, list_terms): with fileinput.FileInput(doc_path, inplace=True, backup='.bak') as file: for line in file: #print(line) #print("iha") line_to_print = line for t in list_terms: t_com_virg = t + ", " if (t in line): if ("<Person>" + t + "</Person>" not in line): line_to_print = line_to_print.replace(t, "<Person = " + t + "</>") elif(t_com_virg in line): line_to_print = line_to_print.replace(t_com_virg, "<Person>" + t_com_virg + "</>") print(line_to_print, end='') def pos_tag(self, text): tokens = nltk.word_tokenize(text) pos_tags = nltk.pos_tag(tokens) return pos_tags def pos_tag_docs(self): self.documents_pos_tagged = {} for d in self.documents_paths: d_text = open(d).read() d_text_pos_tagged = self.pos_tag(d_text) self.documents_pos_tagged[d] = d_text_pos_tagged def format_pos_tagged_en(self, en, en_type): s = en.replace("(", "").replace(")", "") # tira os parenteses s = s.replace(en_type, "")[1:] # tira o primeiro espaco s = s.split() # divide em um vetor, pelos espacos final_s = "" for e in s: final_s += nltk.tag.str2tuple(e)[0] + " " # tira o pos_tag de cada palavra return final_s[:len(final_s)-1] # tira espaco em branco do final da palavra def format_pos_tagged_result(self, text_tags, en_type): ens_formatted = [] for en in text_tags: if (hasattr(en, 'label') and (en.label() == en_type)): ens_formatted.append(self.format_pos_tagged_en(str(en), en_type)) return ens_formatted def setup(self): self.get_docs_from_bd() self.pos_tag_docs()
from flask import session import pandas as pd import os class Dataframe: __path = os.getcwd() + '/static/uploads/' def __init__(self) -> None: self.fileread = False self.filename = '' self.__dataframe = pd.core.frame.DataFrame() self.__dataframe_original = pd.core.frame.DataFrame() def set_filename(self, filename) -> None: self.filename = filename def get_filename(self) -> 'str': return self.filename def set_dataframe(self) -> None: path_to_write = self.__path + self.get_filename() try: self.__dataframe = pd.read_csv( path_to_write, dtype=object, encoding='utf-8') self.__dataframe_original = pd.read_csv( path_to_write, encoding='utf-8') self.__dataframe.fillna("0", inplace=True) self.__dataframe_original.fillna(0, inplace=True) self.fileread = True except Exception as e: print(e) def get_dataframe(self, flag) -> 'Pandas.dataframe': if flag: return self.__dataframe else: return self.__dataframe.head(5) def check_data_frame(self) -> None: if self.get_dataframe(True).empty: self.set_filename(session['filename']) self.set_dataframe() def get_titles(self) -> 'list': self.check_data_frame() titles = list(self.__dataframe.columns) return titles def get_unique_col_val(self) -> 'list': self.check_data_frame() col_val = [list(self.__dataframe[col].unique()) for col in self.__dataframe.columns] return col_val def get_data_type(self) -> 'list': self.check_data_frame() return dict(self.__dataframe_original.dtypes) def get_original_dataframe(self) -> 'pandas.Dataframe': self.check_data_frame() return self.__dataframe_original
import os import shutil def ensure_dir(f): #d = os.path.dirname(f) if not os.path.exists(f): os.makedirs(f) def copyfile(srcfile,dstname,dstroot): #assert not os.path.isabs(srcfile) filename = os.path.basename(srcfile) dstdir = os.path.join(dstroot, dstname) #os.makedirs(dstdir) # create all directories, raise an error if it already exists shutil.copy(srcfile, dstdir)
#!/usr/bin/python3 # coding=utf-8 ''' Created on 2012-1-5 @author: hbprotoss ''' import platform, os import urllib import http.client import xml.etree.ElementTree as ET def GetExplanations(directory, word, conn): word = urllib.parse.quote(word.encode("utf8")) szURL = "/openapi.do?keyfrom=digimon&key=1660474757&type=data&doctype=xml&version=1.1&q=" + word conn.request("GET", szURL) explanationFile = word + ".xml" file = open(explanationFile, "wb") file.write(conn.getresponse().read()) file.close() conn.close() return ET.parse(explanationFile).getroot() def PrintPhonetic(basic): phonetic = basic.find("phonetic") if(phonetic != None): try: print("/" + phonetic.text + "/") except UnicodeEncodeError: # Not supported by Microsoft Windows default console return except Exception: print("Cannot print phonetic. Unknown error occurred!") def PrintBasic(explains): try: exs = explains.findall("ex") for ex in exs: print(ex.text) except Exception: return def PrintWeb(web): if(web == None): print("No translation found! Please retry :(") return explains = web.findall("explain") try: for explain in explains: print("> " + explain.find("key").text) exs = explain.find("value").findall("ex") for ex in exs: print(" " + ex.text) except Exception: return def PrintSeparator(): print("----------------------------------------------------") def Clear(): os.system(cmd_clear_screen) os.system(cmd_del) def Usage(): print("pythonDict v1.2(Powered by youdao.com)") print("Author: hbprotoss(hbprotoss@qq.com)") print("Supported translation:") print("English -> Chinese") print("Chinese -> English") print("enjoy ;-)", "\n") def MakeDirectory(): if(not os.path.exists(directory)): os.system(cmd_mkdir) # If directory does not exist, create it os.chdir(directory) else: os.chdir(directory) os.system(cmd_del) # If directory exists, clear all files in it def InitGlobal(): global system global directory global cmd_mkdir global cmd_del global cmd_clear_screen system = platform.system() if(system == "Linux" or system == "Darwin"): directory = "/tmp/pythonDict" cmd_mkdir = "mkdir %s" % directory cmd_del = "rm ./* -f" cmd_clear_screen = "clear" elif(system == "Windows"): directory = os.environ.get("TEMP") + "\\pythonDict" cmd_mkdir = "mkdir %s" % directory cmd_del = "del * /q" cmd_clear_screen = "cls" else: cmd_clear_screen = "echo " + "Not supported operating system. Please clear the screen manually if you wish :(" ################################################################### # Entry point # Global variant for cross-platform system = str() # System name directory = str() # Directory to store temporary explanation files cmd_mkdir = str() # Command for making the directory cmd_del = str() # Command for clearing the directory cmd_clear_screen = str() # Command for clearing the screen # End global variant InitGlobal() Usage() directory = MakeDirectory() conn = http.client.HTTPConnection("fanyi.youdao.com") Continue = 'y' while(Continue != 'N' and Continue != 'n'): word = input("Please input the word you want to look up:") root = GetExplanations(directory, word, conn) more = '' basic = root.find("basic") if(basic != None): # Basic explanations PrintSeparator() PrintPhonetic(basic) PrintBasic(basic.find("explains")) PrintSeparator() more = input("Need more web explanations?(y/N)") print("") # More web explanations if(more == 'y' or more == 'Y'): PrintWeb(root.find("web")) PrintSeparator() else: print("We are sorry, but only web explanations avaliable:") PrintSeparator() PrintWeb(root.find("web")) PrintSeparator() if(basic != None and more != 'y' and more != 'Y'): Continue = 'y' else: Continue = input("Continue looking up another word?(Y/n)") # Clear the screen and temprary explanation file Clear() else: print("Thank you for using pythonDict.") print("Contact me at hbprotoss@qq.com if you have any problems or bugs to report :)") tmp = input("\nPress any key to continue...")
#import cmath as np import matplotlib.pyplot as plt #from cmath import sqrt import numpy as np m = (1500.0/1023) n = (1539.0/1650) x = np.linspace(0,360,100000) y = np.absolute((m*np.cos(np.pi*x/180.0)-np.sqrt((n**2-(np.sin(np.pi*x/180.0))**2) + 0j))/(m*np.cos(np.pi*x/180.0)+np.sqrt((n**2-(np.sin(np.pi*x/180.0))**2) + 0j))) plt.plot(x,y) plt.show()
from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from Bio.Blast.Applications import NcbiblastpCommandline from Bio.Blast import NCBIXML import UseFilter in_protein_name = r"C:\test.fa" out_aligments_file = "result.fa" filter_taxid = '9606' filter_chain = 'all' filter_Igtype = 'all' xml_results_name = "temp_results.xml" nr_db_name = "nr" gilist_name = UseFilter.Filter(filter_taxid, filter_chain, filter_Igtype) in_protein = list(SeqIO.parse(open(in_protein_name), format="fasta")) blastx_cline = NcbiblastpCommandline(query=in_protein_name, db=nr_db_name, gilist=gilist_name, evalue=0.001, outfmt=5, out=xml_results_name) blastx_cline() bad_aligments=0 resultlist = [] for blast_record in NCBIXML.parse(open(xml_results_name)): for alignment in blast_record.alignments: for hsp in alignment.hsps: if hsp.expect < 0.001: resultlist.append(SeqRecord(Seq(hsp.sbjct), id=alignment.title, description=';length:'+str(alignment.length)+';e-value:'+str(hsp.expect)+';similarity:'+str(hsp.identities*1000//hsp.align_length/10)+'%')) else: bad_aligments += 1 #print('bad_aligments = ',bad_aligments) SeqIO.write(resultlist, out_aligments_file, "fasta")
import requests from bs4 import BeautifulSoup import json import time def extract_url(sub): #fix image url so it's valid ret = sub[sub.find('https://'):] ret = ret[:ret.find(' ')] ret = ret[:len(ret) - 11] + "640x640cc.jpg" return ret def get_artists_tracks(url): #crawl Apple website and return dictionary with artists, songs, and images links returndict = {'Tracks' : [], 'Description' : "", 'Images' : []} page = requests.get(url) #request page html = BeautifulSoup(page.text, 'html.parser') #parse page image = html.find_all(class_ = 'we-artwork--less-round we-artwork ember-view') for item in image: #get image links item = str(item) sub = extract_url(item) returndict['Images'].append(sub) tracks = html.find(id = 'shoebox-ember-data-store').get_text() longdict = json.loads(tracks) returndict['Name'] = longdict['data']['attributes']['name'] #get playlist name if 'description' in longdict['data']['attributes'].keys(): #get playlist description returndict['Description'] = longdict['data']['attributes']['description']['standard'] for thing in longdict['included']: #get all songs in playlist if 'song' in thing['type']: returndict['Tracks'].append("|Track|: " + str(thing['attributes']['name']) + ' |Artist|: ' + str(thing['attributes']['artistName']) + ' |URL|: ' + str(thing['attributes']['url'])) changeid = False ind = 0 return returndict
""" Model objects for the Heat mimic. """ import attr import json from random import randrange from mimic.model.behaviors import BehaviorRegistryCollection, EventDescription @attr.s class Stack(object): """ A :obj:`Stack` is a representation of a Heat stack. """ collection = attr.ib() stack_name = attr.ib() stack_id = attr.ib(default=attr.Factory(lambda: Stack.generate_stack_id())) action = attr.ib(default=attr.Factory(lambda: Stack.CREATE)) status = attr.ib(default=attr.Factory(lambda: Stack.COMPLETE)) tags = attr.ib(default=attr.Factory(list)) ACTIONS = ( CREATE, DELETE, UPDATE, ROLLBACK, SUSPEND, RESUME, ADOPT, SNAPSHOT, CHECK, RESTORE ) = ( 'CREATE', 'DELETE', 'UPDATE', 'ROLLBACK', 'SUSPEND', 'RESUME', 'ADOPT', 'SNAPSHOT', 'CHECK', 'RESTORE' ) STATUSES = (IN_PROGRESS, FAILED, COMPLETE ) = ('IN_PROGRESS', 'FAILED', 'COMPLETE') def links_json(self, absolutize_url): """ Create a JSON-serializable data structure describing the links to this stack. :param callable absolutize_url: see :obj:`default_create_behavior`. """ tid = self.collection.tenant_id sid = self.stack_id sname = self.stack_name href = absolutize_url("v1/{0}/stacks/{1}/{2}".format(tid, sname, sid)) return [{"href": href, "rel": "self"}] def json(self, absolutize_url): """ Returns the JSON representation of the stack. """ return { 'stack_name': self.stack_name, 'stack_status': self.action + '_' + self.status, 'id': self.stack_id, 'tags': ','.join(self.tags), 'links': self.links_json(absolutize_url), 'creation_time': 'Not implemented', 'updated_time': 'Not implemented', 'stack_status_reason': 'Not implemented', 'description': 'Not implemented', } def update_action(self, action): """ Updates the action of a stack. """ if action not in self.ACTIONS: raise ValueError("Action %s not in %s" % (action, self.ACTIONS)) self.action = action return self def update_status(self, status): """ Updates the status of a stack. """ if status not in self.STATUSES: raise ValueError("Status %s not in %s" % (status, self.STATUSES)) self.stack = status return self def update_action_and_status(self, action, status): """ Convenience method for updating action and status. """ return self.update_action(action).update_status(status) def is_deleted(self): """ Checks if stack is in a successfully deleted state. """ return self.action == self.DELETE and self.status == self.COMPLETE def has_tag(self, tag): """ Checks if stack has a tag. """ return tag in self.tags @classmethod def generate_stack_id(cls): """ Generates a stack ID. """ return 'test-stack{0}-id-{0}'.format(str(randrange(9999999999))) @classmethod def from_creation_request_json(cls, collection, creation_json): """ Creates a :obj:`Stack` and adds it to a collection. """ def get_tags(): tags = creation_json.get('tags', None) return tags.split(',') if tags else [] stack = cls( collection=collection, stack_name=creation_json['stack_name'], tags=get_tags() ) collection.stacks.append(stack) return stack def creation_response_json(self, absolutize_url): """ Returns the response associated with the stack's creation. """ return { "stack": { "id": self.stack_id, "links": self.links_json(absolutize_url) } } stack_creation = EventDescription() stack_check = EventDescription() stack_update = EventDescription() stack_deletion = EventDescription() @stack_creation.declare_default_behavior def default_create_behavior(collection, request, body, absolutize_url): """ Successfully create a stack. """ new_stack = Stack.from_creation_request_json(collection, body) response = new_stack.creation_response_json(absolutize_url) request.setResponseCode(201) return json.dumps(response) @stack_update.declare_default_behavior def default_update_behavior(collection, request, stack_name, stack_id): """ Successfully update a stack as long as it exists. Updates the stacks status. """ stack = collection.stack_by_id(stack_id) if stack is None: request.setResponseCode(404) return b'' stack.update_action_and_status(Stack.UPDATE, Stack.COMPLETE) request.setResponseCode(202) return b'' @stack_check.declare_default_behavior def default_check_behavior(collection, request, stack_name, stack_id): """ Successfully check a stack as long as it exists. Updates the stacks status. """ stack = collection.stack_by_id(stack_id) if stack is None: request.setResponseCode(404) return b'' stack.update_action_and_status(Stack.CHECK, Stack.COMPLETE) request.setResponseCode(201) return b'' @stack_deletion.declare_default_behavior def default_delete_behavior(collection, request, stack_name, stack_id): """ Successfully delete a stack as long as it exists. Updates the stacks status. """ stack = collection.stack_by_id(stack_id) if stack is None: request.setResponseCode(404) return b'' stack.update_action_and_status(Stack.DELETE, Stack.COMPLETE) request.setResponseCode(204) return b'' @attr.s class RegionalStackCollection(object): """ A collection of :obj:`Stack` objects for a region. """ tenant_id = attr.ib() region_name = attr.ib() stacks = attr.ib(default=attr.Factory(list)) behavior_registry_collection = attr.ib(default=attr.Factory( lambda: BehaviorRegistryCollection())) def stack_by_id(self, stack_id): """ Retrieves a stack by its ID """ for stack in self.stacks: if stack.stack_id == stack_id: return stack def request_list(self, absolutize_url, show_deleted=False, tags=[]): """ Tries a stack list operation. """ def should_show_stack(stack): """ Determines if a stack should be shown for the list response. """ if stack.is_deleted() and not show_deleted: return False for tag in tags: if not stack.has_tag(tag): return False return True result = { "stacks": [stack.json(absolutize_url) for stack in self.stacks if should_show_stack(stack)] } return json.dumps(result) def request_creation(self, request, body, absolutize_url): """ Tries a stack create operation. """ registry = self.behavior_registry_collection.registry_by_event( stack_creation) behavior = registry.behavior_for_attributes({ 'tenant_id': self.tenant_id, 'stack_name': body['stack_name'] }) return behavior(collection=self, request=request, body=body, absolutize_url=absolutize_url) def request_check(self, request, stack_name, stack_id): """ Tries a stack check operation. """ registry = self.behavior_registry_collection.registry_by_event( stack_check) behavior = registry.behavior_for_attributes({ 'tenant_id': self.tenant_id, 'stack_name': stack_name, 'stack_id': stack_id }) return behavior(collection=self, request=request, stack_name=stack_name, stack_id=stack_id) def request_update(self, request, stack_name, stack_id): """ Tries a stack update operation. """ registry = self.behavior_registry_collection.registry_by_event( stack_update) behavior = registry.behavior_for_attributes({ 'tenant_id': self.tenant_id, 'stack_name': stack_name, 'stack_id': stack_id }) return behavior(collection=self, request=request, stack_name=stack_name, stack_id=stack_id) def request_deletion(self, request, stack_name, stack_id): """ Tries a stack delete operation. """ registry = self.behavior_registry_collection.registry_by_event( stack_deletion) behavior = registry.behavior_for_attributes({ 'tenant_id': self.tenant_id, 'stack_name': stack_name, 'stack_id': stack_id }) return behavior(collection=self, request=request, stack_name=stack_name, stack_id=stack_id) @attr.s class GlobalStackCollections(object): """ A set of :obj:`RegionalStackCollection` objects owned by a tenant. """ tenant_id = attr.ib() regional_collections = attr.ib(default=attr.Factory(dict)) def collection_for_region(self, region_name): """ Retrieves a :obj:`RegionalStackCollection` for a region. """ if region_name not in self.regional_collections: self.regional_collections[region_name] = ( RegionalStackCollection(tenant_id=self.tenant_id, region_name=region_name) ) return self.regional_collections[region_name]
def collectionSort(): collection = [1, 2, 3, 4] collection.append(5) collection.reverse() print(collection) def loopW(): numberP = 1 while numberP < 6: print(numberP) numberP = numberP+1 languages = ['R', 'Python', 'Scala', 'Java', 'Julia'] for index in range(len(languages)): print('Current language is: ', languages[index]) collectionSort() loopW()
""" converter_functions.py Conversion Functions for common layers. Add new functions here with a decorator. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from uff.converters.tensorflow.converter import TensorFlowToUFFConverter as tf2uff from uff.model.utils import convert_to_str from uff.model.exceptions import * import numpy as np @tf2uff.register(["Placeholder"]) def convert_placeholder(name, tf_node, inputs, uff_graph, **kwargs): dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['dtype'].type) shape = tf2uff.get_tf_shape_as_int_list(tf_node.attr['shape']) uff_graph.input(shape, dtype, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Identity"]) def convert_placeholder(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.identity(inputs[0], name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Const"]) def convert_const(name, tf_node, inputs, uff_graph, **kwargs): array = tf2uff.convert_tf2numpy_const_node(tf_node) uff_node = uff_graph.const(array, name) uff_node.array = array return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Add"]) def convert_add(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.binary(inputs[0], inputs[1], 'add', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Sub"]) def convert_sub(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.binary(inputs[0], inputs[1], 'sub', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Mul"]) def convert_mul(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.binary(inputs[0], inputs[1], 'mul', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Div", "RealDiv"]) def convert_div(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.binary(inputs[0], inputs[1], 'div', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Relu"]) def convert_relu(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.activation(inputs[0], 'relu', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Relu6"]) def convert_relu6(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.activation(inputs[0], 'relu6', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Tanh"]) def convert_tanh(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.activation(inputs[0], 'tanh', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Sigmoid"]) def convert_sigmoid(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.activation(inputs[0], 'sigmoid', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Elu"]) def convert_elu(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.activation(inputs[0], 'elu', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Selu"]) def convert_selu(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.activation(inputs[0], 'selu', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Softsign"]) def convert_softsign(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.activation(inputs[0], 'softsign', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Softplus"]) def convert_softplus(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.activation(inputs[0], 'softplus', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Negative", "Neg"]) def convert_negative(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'neg', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Abs"]) def convert_abs(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'abs', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Sin"]) def convert_sin(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'sin', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Cos"]) def convert_cos(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'cos', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Tan"]) def convert_tan(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'tan', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Sinh"]) def convert_sinh(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'sinh', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Cosh"]) def convert_cosh(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'cosh', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Asin"]) def convert_asin(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'asin', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Acos"]) def convert_acos(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'acos', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Atan"]) def convert_atan(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'atan', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Asinh"]) def convert_asinh(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'asinh', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Acosh"]) def convert_acosh(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'acosh', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Atanh"]) def convert_atanh(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'atanh', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Ceil"]) def convert_ceil(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'ceil', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Floor"]) def convert_floor(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'floor', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Sqrt"]) def convert_sqrt(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'sqrt', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Rsqrt"]) def convert_rsqrt(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'rsqrt', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Square"]) def convert_square(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'square', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Pow"]) def convert_pow(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'pow', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Exp"]) def convert_exp(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'exp', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Log"]) def convert_log(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.unary(inputs[0], 'log', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Softmax"]) def convert_softmax(name, tf_node, inputs, uff_graph, **kwargs): # Some Softmax ops don't have an axis node. if len(inputs) > 1: tf_axis_node = kwargs["tf_nodes"][inputs[-1]] axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node)) inputs = inputs[:-1] else: axis = 0 fmt = convert_to_str(tf_node.attr['data_format'].s) fmt = fmt if fmt else "NHWC" data_fmt = tf2uff.convert_tf2uff_data_format(fmt) uff_graph.softmax(inputs[0], axis, data_fmt, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Minimum"]) def convert_minimum(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.binary(inputs[0], inputs[1], 'min', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Maximum"]) def convert_maximum(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.binary(inputs[0], inputs[1], 'max', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Shape"]) def convert_shape(name, tf_node, inputs, uff_graph, **kwargs): uff_graph.shape(inputs[0], name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["ExpandDims"]) def convert_expand_dims(name, tf_node, inputs, uff_graph, **kwargs): # Retrieve and remove the axis node. tf_axis_node = kwargs["tf_nodes"][inputs[-1]] if tf_axis_node.op != "Const": raise UffException("ExpandDims Axis node has op " + str(tf_axis_node.op) + ", expected Const. The axis must be specified as a Const node.") axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node)) inputs.pop(-1) # Add the op. uff_graph.expand_dims(inputs[0], axis, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["ArgMax"]) def convert_argmax(name, tf_node, inputs, uff_graph, **kwargs): # Retrieve and remove the axis node. tf_axis_input_node = kwargs["tf_nodes"][inputs[-1]] if tf_axis_input_node.op != "Const": raise UffException("ArgMax Axis node has op " + str(tf_axis_input_node.op) + ", expected Const. The axis must be specified as a Const node.") axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_input_node)) inputs.pop(-1) # Add the op. uff_graph.argmax(inputs[0], axis, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["ArgMin"]) def convert_argmin(name, tf_node, inputs, uff_graph, **kwargs): # Retrieve and remove the axis node. tf_axis_input_node = kwargs["tf_nodes"][inputs[-1]] if tf_axis_input_node.op != "Const": raise UffException("ArgMin Axis node has op " + str(tf_axis_input_node.op) + ", expected Const. The axis must be specified as a Const node.") axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_input_node)) inputs.pop(-1) # Add the op. uff_graph.argmin(inputs[0], axis, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Reshape"]) def convert_reshape(name, tf_node, inputs, uff_graph, **kwargs): str_name = tf_node.name.split('/') if len(str_name) > 1 and tf_node.name.split('/')[-2].lower().find('flatten') != -1: print('DEBUG: convert reshape to flatten node') uff_graph.flatten(inputs[0], name=name) # flatten axis is ignored here return [tf2uff.split_node_name_and_output(inputs[0])[0]] # second input of shape is dropped else: uff_graph.reshape(inputs[0], inputs[1], name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] # tensorflow does not have flatten op. # tensorflow.contrib.slim has a flatten function that combines slice/shape to # implement flatten. 'We' decided to hack it through chopping the reshape and # slice, to add a flatten op. So it's easier to patch it with uff/TensorRT. # # @tf2uff.register(["Flatten"]) # def _flatten_helper(name, tf_node, inputs, uff_graph, **kwargs): # axis = tf2uff.get_tf_int_list(tf_node.attr['axis']) # uff_graph.flatten(inputs[0], name=name, axis=axis) # return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Transpose"]) def convert_transpose(name, tf_node, inputs, uff_graph, **kwargs): tf_permutation_node = kwargs["tf_nodes"][inputs[1]] if tf_permutation_node.op != "Const": raise UffException("Transpose permutation has op " + str(tf_permutation_node.op) + ", expected Const. Only constant permuations are supported in UFF.") permutation = tf2uff.convert_tf2numpy_const_node( tf_permutation_node).tolist() inputs = inputs[:1] uff_graph.transpose(inputs[0], permutation, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Pack"]) def convert_pack(name, tf_node, inputs, uff_graph, **kwargs): axis = tf_node.attr['axis'].i inputs = inputs uff_graph.stack(inputs, axis, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["ConcatV2"]) def convert_concatv2(name, tf_node, inputs, uff_graph, **kwargs): if "axis" in tf_node.attr: # Handle cases where the axis is not a node, but an attribute instead. axis = tf_node.attr["axis"].i else: tf_axis_node = kwargs["tf_nodes"][inputs[-1]] if tf_axis_node.op != "Const": raise UffException("Concat Axis node has op " + str(tf_axis_node.op) + ", expected Const. The axis for a Concat op must be specified as either an attribute, or a Const node.") axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node)) inputs = inputs[:-1] uff_graph.concat(inputs, axis, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["MaxPool"]) def convert_maxpool(name, tf_node, inputs, uff_graph, **kwargs): return _pool_helper(name, tf_node, inputs, uff_graph, func='max', **kwargs) @tf2uff.register(["AvgPool"]) def convert_avgpool(name, tf_node, inputs, uff_graph, **kwargs): return _pool_helper(name, tf_node, inputs, uff_graph, func='avg', **kwargs) def _pool_helper(name, tf_node, inputs, uff_graph, **kwargs): func = kwargs["func"] window_size = tf2uff.get_tf_int_list(tf_node.attr['ksize']) strides = tf2uff.get_tf_int_list(tf_node.attr['strides']) fmt = convert_to_str(tf_node.attr['data_format'].s) fmt = fmt if fmt else "NHWC" inputs, padding, fields = tf2uff.apply_fused_padding( tf_node, inputs, kwargs["tf_nodes"]) data_format = tf2uff.convert_tf2uff_data_format(fmt) if fmt == 'NCHW': window_size = window_size[2:] strides = strides[2:] if padding is not None: padding = padding[2:] elif fmt == 'NHWC': window_size = [window_size[1], window_size[2]] strides = [strides[1], strides[2]] if padding is not None: padding = [padding[1], padding[2]] else: raise ValueError("Unsupported data format: " + fmt) uff_graph.pool( inputs[0], func, window_size, strides, padding, data_format=data_format, name=name, fields=fields) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["LRN"]) def convert_lrn(name, tf_node, inputs, uff_graph, **kwargs): lhs = inputs[0] fmt = convert_to_str(tf_node.attr['data_format'].s) fmt = fmt if fmt else "NC+" window_size = tf_node.attr["depth_radius"].i alpha = tf_node.attr["alpha"].f beta = tf_node.attr["beta"].f bias = tf_node.attr["bias"].f uff_graph.lrn(lhs, window_size, alpha, beta, bias, fmt, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["MatMul"]) def convert_matmul(name, tf_node, inputs, uff_graph, **kwargs): lhs, rhs = inputs trans_a = tf_node.attr['transpose_a'].b trans_b = tf_node.attr['transpose_b'].b lhs_fmt = 'CN' if trans_a else 'NC' rhs_fmt = 'KC' if trans_b else 'CK' uff_graph.fully_connected( lhs, rhs, lhs_fmt, rhs_fmt, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Conv2D"]) def convert_conv2d(name, tf_node, inputs, uff_graph, **kwargs): return _conv2d_helper(name, tf_node, inputs, uff_graph, func="conv2d", **kwargs) @tf2uff.register(["DepthwiseConv2dNative"]) def convert_depthwise_conv2d_native(name, tf_node, inputs, uff_graph, **kwargs): return _conv2d_helper(name, tf_node, inputs, uff_graph, func="depthwise", **kwargs) def _conv2d_helper(name, tf_node, inputs, uff_graph, **kwargs): func = kwargs["func"] fmt = convert_to_str(tf_node.attr['data_format'].s) fmt = fmt if fmt else "NHWC" strides = tf2uff.get_tf_int_list(tf_node.attr['strides']) inputs, padding, fields = tf2uff.apply_fused_padding( tf_node, inputs, kwargs["tf_nodes"]) lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt) rhs_fmt = '+CK' if fmt == 'NCHW': strides = strides[2:] if padding is not None: padding = padding[2:] elif fmt == 'NHWC': strides = [strides[1], strides[2]] if padding is not None: padding = [padding[1], padding[2]] else: raise ValueError("Unsupported data format: " + fmt) if func == "depthwise": wt = kwargs["tf_nodes"][inputs[1]] number_groups = int(wt.attr['value'].tensor.tensor_shape.dim[2].size) else: number_groups = None # If this node represents a dilated conv, pull in the dilations. dilation = None if "dilations" in tf_node.attr: if fmt == "NCHW": dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[2:] else: dilation = tf2uff.get_tf_int_list(tf_node.attr['dilations'])[1:3] # FIXME: Need a better way to check for dilated convs. This just checks if the block_shape input is as expected. # Ideally we should have a 'get_input_by_name' function. Maybe we can leverage GS here. # Another possibility is that GS can add these as attributes to the node rather than maintaining them as # separate const nodes. tf_block_shape_node = kwargs["tf_nodes"][inputs[1]] if "block_shape" in tf_block_shape_node.name.split('/')[-1] and tf_block_shape_node.op == "Const": # Get the second input (block_shape) - of the form [1, dilation_value, dilation_value] dilation = np.frombuffer(tf_block_shape_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist() if len(dilation) > 2: dilation = [dilation[1], dilation[2]] inputs.pop(1) tf_paddings_node = kwargs["tf_nodes"][inputs[1]] if "paddings" in tf_paddings_node.name.split('/')[-1] and tf_paddings_node.op == "Const": # Get the second input (paddings, since block_shape is already removed) paddings_temp = np.frombuffer(tf_paddings_node.attr["value"].tensor.tensor_content, dtype=np.int32).tolist() inputs.pop(1) # Get cropping information, but only if paddings is also present. tf_crops_node = kwargs["tf_nodes"][inputs[1]] if "crops" in tf_crops_node.name.split('/')[-1] and tf_crops_node.op == "Const": # Get the second input (crops, since block_shape is already removed) crops = np.frombuffer(tf_crops_node.attr["value"].tensor.tensor_content, dtype=np.int32) inputs.pop(1) paddings_temp = (np.array(paddings_temp) - crops).tolist() # TF paddings are [[top,bottom], [left,right]], so we need to rearrange. perm = [0, 2, 1, 3] # HACK: Sometimes paddings has [0, 0] at the front. if len(paddings_temp) == 6: paddings_temp = paddings_temp[2:] paddings_temp = [paddings_temp[p] for p in perm] # Symmetric padding ("same") if paddings_temp[0] == paddings_temp[2] and paddings_temp[1] == paddings_temp[3]: paddings_temp = paddings_temp[0:2] padding = paddings_temp if not padding else [p + pt for p, pt in zip(padding, paddings_temp)] else: print("Asymmetric padding for dilated convolutions is currently unsupported in the UFF converter.") uff_graph.conv( inputs[0], inputs[-1], strides, padding, dilation=dilation, number_groups=number_groups, left_format=lhs_fmt, right_format=rhs_fmt, name=name, fields=fields) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Conv2DBackpropInput"]) def convert_conv2d_backprop_input(name, tf_node, inputs, uff_graph, **kwargs): return _conv2d_transpose_helper(name, tf_node, inputs, uff_graph, func="conv2d_transpose", **kwargs) def _conv2d_transpose_helper(name, tf_node, inputs, uff_graph, **kwargs): kwargs.pop("func") # FIXME support depthwise transpose fmt = convert_to_str(tf_node.attr['data_format'].s) fmt = fmt if fmt else "NHWC" strides = tf2uff.get_tf_int_list(tf_node.attr['strides']) fields = {} padding = None number_groups = None tf_padding = convert_to_str(tf_node.attr['padding'].s) if tf_padding == "SAME": fields['implicit_padding'] = "same" elif tf_padding != "VALID": raise ValueError("Padding mode %s not supported" % tf_padding) lhs_fmt = tf2uff.convert_tf2uff_data_format(fmt) rhs_fmt = '+KC' if fmt == 'NCHW': strides = strides[2:] elif fmt == 'NHWC': strides = [strides[1], strides[2]] else: raise ValueError("Unsupported data format: " + fmt) uff_graph.conv_transpose( inputs[2], inputs[1], inputs[0], strides, padding, dilation=None, number_groups=number_groups, left_format=lhs_fmt, right_format=rhs_fmt, name=name, fields=fields) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["BiasAdd"]) def convert_bias_add(name, tf_node, inputs, uff_graph, **kwargs): fmt = convert_to_str(tf_node.attr['data_format'].s) fmt = fmt if fmt else "NHWC" biases_name = inputs[1] biases_array = tf2uff.convert_tf2numpy_const_node( kwargs["tf_nodes"][biases_name]) inputs = inputs[:1] if fmt == 'NCHW': ndim = 4 new_shape = [-1] + [1] * (ndim - 2) biases_array = biases_array.reshape(new_shape) uff_graph.const(biases_array, biases_name) uff_graph.binary(inputs[0], biases_name, 'add', name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["FusedBatchNorm"]) def convert_fused_batch_norm(name, tf_node, inputs, uff_graph, **kwargs): input_node, gamma, beta, mean, variance = inputs eps = tf_node.attr['epsilon'].f fmt = convert_to_str(tf_node.attr['data_format'].s) fmt = fmt if fmt else "NHWC" data_fmt = tf2uff.convert_tf2uff_data_format(fmt) uff_graph.batchnorm(input_node, gamma, beta, mean, variance, eps, data_fmt, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["StridedSlice"]) def convert_strided_slice(name, tf_node, inputs, uff_graph, **kwargs): begin_mask = tf_node.attr['begin_mask'].i end_mask = tf_node.attr['end_mask'].i shrink_axis_mask = tf_node.attr['shrink_axis_mask'].i if tf_node.attr['ellipsis_mask'].i != 0: raise ValueError("ellipsis_mask not supported") if tf_node.attr['new_axis_mask'].i != 0: raise ValueError("new_axis_mask not supported") uff_graph.strided_slice(inputs[0], inputs[1], inputs[2], inputs[3], begin_mask, end_mask, shrink_axis_mask, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] def _reduce_helper(name, tf_node, inputs, uff_graph, **kwargs): func = kwargs.pop("func") tf_axes_node = kwargs["tf_nodes"][inputs[1]] array = tf2uff.convert_tf2numpy_const_node(tf_axes_node) axes = array.tolist() inputs = inputs[:1] keepdims = tf_node.attr['keep_dims'].b print("Warning: keepdims is ignored by the UFF Parser and defaults to True") uff_graph.reduce(inputs[0], func, axes, keepdims, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Sum"]) def convert_sum(name, tf_node, inputs, uff_graph, **kwargs): return _reduce_helper(name, tf_node, inputs, uff_graph, func="sum", **kwargs) @tf2uff.register(["Prod"]) def convert_prod(name, tf_node, inputs, uff_graph, **kwargs): return _reduce_helper(name, tf_node, inputs, uff_graph, func="prod", **kwargs) @tf2uff.register(["Min"]) def convert_min(name, tf_node, inputs, uff_graph, **kwargs): return _reduce_helper(name, tf_node, inputs, uff_graph, func="min", **kwargs) @tf2uff.register(["Max"]) def convert_max(name, tf_node, inputs, uff_graph, **kwargs): return _reduce_helper(name, tf_node, inputs, uff_graph, func="max", **kwargs) @tf2uff.register(["Mean"]) def convert_mean(name, tf_node, inputs, uff_graph, **kwargs): return _reduce_helper(name, tf_node, inputs, uff_graph, func="mean", **kwargs) @tf2uff.register(["Squeeze"]) def convert_squeeze(name, tf_node, inputs, uff_graph, **kwargs): axis = tf2uff.get_tf_int_list(tf_node.attr['squeeze_dims']) uff_graph.squeeze(inputs[0], name=name, axis=axis) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] # TODO: add attributes of MODE / constant_values @tf2uff.register(["Pad"]) def convert_pad(name, tf_node, inputs, uff_graph, **kwargs): pad = inputs[1] uff_graph.pad(inputs[0], pad, name) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["Gather"]) def convert_gather(name, tf_node, inputs, uff_graph, **kwargs): indices_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tindices'].type) params_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tparams'].type) validate_indices = tf_node.attr['validate_indices'].b uff_graph.gather(inputs, name, indices_dtype, params_dtype, validate_indices) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["GatherV2"]) def convert_gather_v2(name, tf_node, inputs, uff_graph, **kwargs): if len(inputs) > 2: tf_axis_node = kwargs["tf_nodes"][inputs[-1]] axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node)) inputs = inputs[:-1] else: axis = 0 indices_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tindices'].type) params_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tparams'].type) uff_graph.gather_v2(inputs, name, axis, indices_dtype, params_dtype) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs] @tf2uff.register(["ResourceGather"]) def convert_resource_gather(name, tf_node, inputs, uff_graph, **kwargs): if len(inputs) > 2: tf_axis_node = kwargs["tf_nodes"][inputs[-1]] axis = int(tf2uff.convert_tf2numpy_const_node(tf_axis_node)) inputs = inputs[:-1] else: axis = 0 indices_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['Tindices'].type) params_dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr['dtype'].type) uff_graph.gather_v2(inputs, name, axis, indices_dtype, params_dtype) return [tf2uff.split_node_name_and_output(inp)[0] for inp in inputs]
from pytest import raises from family_foto.models.file import File from tests.base_test_case import BaseTestCase class FileTestCase(BaseTestCase): """ Tests the base file class. """ def setUp(self): super().setUp() self.file = File(filename='test.txt') def test_path(self): """ Test path is not implemented. """ with raises(NotImplementedError): _ = self.file.path def test_meta(self): """ Test meta is not implemented. """ with raises(NotImplementedError): _ = self.file.meta def test_thumbnail(self): """ Test thumbnail is not implemented. """ with raises(NotImplementedError): _ = self.file.thumbnail(200, 200)
#coding: utf-8 DEBUG = True TEMPLATE_DEBUG = DEBUG import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'BrokerTour.db', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = ''
import numpy as np import torch import torch.utils.data #import matplotlib.pyplot as plt # Sub-functions def addRectangle(X, N): i = np.random.randint(0, N-3) j = np.random.randint(0, N-3) width = np.random.randint(2, 8) height = np.random.randint(2,8) row_min = np.max((0, i)) row_max = np.min((N - 1, i + width)) col_min = np.max((0, j)) col_max = np.min((N - 1, j + height)) X[row_min:row_max, col_min:col_max] = 1 def randExpand(i, j, rowNext, colNext, p, X, N): if ((i != 0) and (i != N-1) and (j != 0) and (j != N-1)): expand = np.random.binomial(1, p, (8)) if ((X[i-1, j-1] == 0) and (expand[0] == 1)): X[i-1, j-1] = 1 rowNext.append(i - 1) colNext.append(j - 1) if ((X[i-1, j] == 0) and (expand[1] == 1)): X[i-1, j] = 1 rowNext.append(i - 1) colNext.append(j) if ((X[i-1, j+1] == 0) and (expand[2] == 1)): X[i-1, j+1] = 1 rowNext.append(i - 1) colNext.append(j+1) if ((X[i, j+1] == 0) and (expand[3] == 1)): X[i, j+1] = 1 rowNext.append(i) colNext.append(j+1) if ((X[i+1, j+1] == 0) and (expand[4] == 1)): X[i+1, j+1] = 1 rowNext.append(i+1) colNext.append(j+1) if ((X[i+1, j] == 0) and (expand[5] == 1)): X[i+1, j] = 1 rowNext.append(i+1) colNext.append(j) if ((X[i+1, j-1] == 0) and (expand[6] == 1)): X[i+1, j-1] = 1 rowNext.append(i+1) colNext.append(j-1) if ((X[i, j-1] == 0) and (expand[6] == 1)): X[i, j-1] = 1 rowNext.append(i) colNext.append(j-1) def environment(N, p): # These are variable that will eventually be passed into the function #N = 20 # Start of function N2 = N**2 #p = 0.15 # Random seed a bunch of rectangles nrectangles = np.random.randint(2, 5) X = np.zeros((N, N)) for i in range(nrectangles): addRectangle(X, N) rowNonzero, colNonzero = np.nonzero(X) rowNext = [] colNext = [] for i in range(len(rowNonzero)): rowNext.append(rowNonzero[i]) colNext.append(colNonzero[i]) row = [] col = [] diff = 10 while((diff != 0)): del row[:] del col[:] row = rowNext[:] col = colNext[:] l = len(row) del rowNext[:] del colNext[:] for i in range(0, l): row_current = row[i] col_current = col[i] randExpand(row_current, col_current, rowNext, colNext, p, X, N) diff = len(rowNext) # Now we want to random place predator, prey and cave # Need a list of zeros row, col = np.nonzero(X) X_invert = np.ones((N, N)) X_invert[row, col] = 0 row_zero, col_zero = np.nonzero(X_invert) prey = np.zeros((N, N)) predator = np.zeros((N, N)) cave = np.zeros((N, N)) idx_prey = np.random.choice(len(row_zero), 1, replace = False) idx_predator = np.random.choice(len(row_zero), 3, replace = False) idx_cave = np.random.choice(len(row_zero), 1, replace = False) prey[row_zero[idx_prey], col_zero[idx_prey]] = 1 predator[row_zero[idx_predator], col_zero[idx_predator]] = 1 cave[row_zero[idx_cave], col_zero[idx_cave]] = 1 return X, prey, predator, cave # plt.imshow(X, cmap='RdBu', interpolation='none') # plt.show() # Two functions for generating the environment
from flask_pymongo import PyMongo # MongoDB client mongo = PyMongo()
import colorsys, random import cv2 import colorsys def random_colors(N): """ Generate random colors. To get visually distinct colors, generaate them in HSV space then convert it to RGB (Red Green Blue) param: N : Nomber of classes in the Frame """ random.seed(0) all_colors = [] for i in range(N): x, y, z = random.randint(0,255), random.randint(0,255), random.randint(0,255) all_colors.append((x,y,z)) return all_colors def draw(frame, x, y, w, h, color): #8 line frame = cv2.line(frame, (x,y), (x+w, y), color, 1) frame = cv2.line(frame, (x,y), (x, y+h), color, 1) frame = cv2.line(frame, (x,y+h), (x+w, y+h), color, 1) frame = cv2.line(frame, (x+w,y), (x+w, y+h), color, 1) return frame def text(frame, string, color, x=0, y=0): font = cv2.FONT_HERSHEY_COMPLEX s = cv2.getTextSize(string, cv2.FONT_HERSHEY_COMPLEX, 1, 1)[0] frame = cv2.rectangle(frame, (x, y-2*s[1]), (x+s[0], y), color, -1) frame = cv2.putText(frame, string, (x,y-int(s[1]/2)), font, 1, (0,0,0),1) return frame
from flask import Flask, request, flash, redirect import pika import os import json from werkzeug.utils import secure_filename from tempfile import gettempdir from files_ms_client import upload from DAO.connection import Connection # from files_ms_client.client import upload app = Flask(__name__) app.config['UPLOAD_FOLDER'] = gettempdir() @app.route('/vad', methods=['POST']) def vad(): if request.method == 'POST': file = request.files['file'] if file.filename == '': flash('No selected file') return redirect(request.url) if file and (file.filename.endswith('.mp3') or file.filename.endswith('.wav')): connection = pika.BlockingConnection( pika.ConnectionParameters(host=os.environ['QUEUE_SERVER'])) channel = connection.channel() channel.queue_declare(queue='vad', durable=True) file_url = upload(file.read(), buffer=True, mime=file.mimetype)['name'] # DB db_conn = Connection() oid = db_conn.insert_jobs( type='vad', status='new', file=file.read()) message = {'type': 'vad', 'status': 'new', 'oid': oid, 'file': file_url} channel.basic_publish( exchange='', routing_key='vad', body=json.dumps(message)) connection.close() return 'Done' else: return 'Invalid file' @app.route('/asr', methods=['POST']) def asr(): if request.method == 'POST': file = request.files['file'] if file.filename == '': flash('No selected file') return redirect(request.url) if file and (file.filename.endswith('.mp3') or file.filename.endswith('.wav')): connection = pika.BlockingConnection( pika.ConnectionParameters(host='rabbitmq')) channel = connection.channel() channel.queue_declare(queue='asr') file_url = upload(file.read(), buffer=True, mime=file.mimetype)['name'] # DB db_conn = Connection() oid = db_conn.insert_jobs( type='asr', status='new', file=file.read()) message = {'type': 'asr', 'status': 'new', 'oid': oid, 'file': file_url} channel.basic_publish( exchange='', routing_key='asr', body=str(message)) connection.close() return 'Done' else: flash('Invalid file') return redirect(request.url) @app.route('/segmentation', methods=['POST']) def extract_audio(): if request.method == 'POST': file = request.files['file'] if file.filename == '': flash('No selected file') return redirect(request.url) if file and (file.filename.endswith('.mp4') or file.filename.endswith('.avi')): connection = pika.BlockingConnection( pika.ConnectionParameters(host='rabbitmq')) channel = connection.channel() channel.queue_declare(queue='audio_extractor', durable=True) print("XAXA: ", os.getenv('FILES_URL', 'XORUMELOS')) file_url = upload(file.read(), buffer=True, mime=file.mimetype)['name'] # DB db_conn = Connection() file_oid = db_conn.insert_doc_mongo(file.read()) oid, project_id = db_conn.insert_jobs( type='audio_extractor', status='new', file=file_oid, file_name=file.filename) message = {'type': 'audio_extractor', 'status': 'new', 'oid': file_oid, 'project_id': project_id, 'file': file_url} channel.basic_publish( exchange='', routing_key='audio_extractor', body=json.dumps(message)) connection.close() return {'project_id': project_id} else: flash('Invalid file') return redirect(request.url) if __name__ == '__main__': app.run(debug=True, host='0.0.0.0')
score1 = int(input("enter score 1 : ")) score2 = int(input("enter score 2 : ")) score3 = int(input("enter score 3 : ")) score4 = int(input("enter score 4 : ")) avg = (score1+score2+score3+score4)/4 print("Average score is : ",avg)
from math import exp from PyQt5 import QtWidgets, QtCore from pyqtgraph import PlotWidget, plot import pyqtgraph as pg import sys # We need sys so that we can pass argv to QApplication import os from random import randint import numpy as np import Methods.Bisection as bisection from sympy import * import os.path class Ui_SecondWindow(object): function = "" maxIteration = 0 epsilon = 0 a = 0 b = 0 def __init__(self, function,maxIteration, epsilon, a, b): print(function) self.function = function self.maxIteration = maxIteration self.epsilon = epsilon self.a = a self.b = b def setupUi(self, MainWindow): # self.function = input("Enter your function") bisection.mainFunc(self.function, self.maxIteration, self.epsilon, self.a, self.b) # super(Ui_MainWindow, self).__init__(*args, **kwargs) MainWindow.graphWidget = pg.PlotWidget() MainWindow.setCentralWidget(MainWindow.graphWidget) # random data start = self.a-1 stop = self.b+1 step = 0.001 float_range_array = np.arange(start, stop, step) float_range_array = np.array(float_range_array, dtype=float) self.float_range_list = list(float_range_array) #print(self.float_range_list) # functions will be added here self.expr = sympify(self.function) x = var('x') scale = int((stop-start)//step)+1 self.data = np.zeros((scale,), dtype=float) print(len(self.float_range_list)) i = 0 while i < len(self.float_range_list): # print(self.float_range_list[i]) result = self.expr.subs(x, self.float_range_list[i]) self.data[i] = result #print(self.data[i]) i = i+1 #self.data = [self.expr.subs(x, value) for value in self.float_range_list] MainWindow.graphWidget.showGrid(x=True, y=True) MainWindow.graphWidget.setBackground('w') pen = pg.mkPen(color=(255, 0, 0)) self.file = open("../View/values.txt", "r") if os.path.isfile('../View/values.txt'): print("File exist") else: print("File not exist") self.input = [float(x) for x in next(self.file).split()] print(self.input) MainWindow.graphWidget.plot(self.float_range_list, self.data, pen=pen) self.a_values = [self.input[0], self.input[2]] self.b_values = [self.input[1], self.input[3]] pen2 = pg.mkPen(color=(0, 0, 255), width=3) self.data_line = MainWindow.graphWidget.plot(self.a_values, self.b_values, pen=pen2) # self.graphWidget.plot(self.tuples, pen=pen) self.timer = QtCore.QTimer() self.timer.setInterval(1500) self.timer.timeout.connect(self.update_plot_data) self.timer.start() def update_plot_data(self): next_line = next(self.file) if(next_line != None): self.input = [float(x) for x in next_line.split()] self.a_values = self.a_values[2:] # Remove the first y element. self.a_values.append(self.input[0]) self.a_values.append(self.input[2]) # Add a new value 1 higher than the last. self.b_values = self.b_values[2:] # Remove the first self.b_values.append(self.input[1]) self.b_values.append(self.input[3]) # Add a new random value. self.data_line.setData(self.a_values, self.b_values) # Update the data. else: pass if __name__ == '__main__': import sys app = QtWidgets.QApplication(sys.argv) graph = QtWidgets.QMainWindow() ui = Ui_SecondWindow() ui.setupUi(graph) graph.show() sys.exit(app.exec_())
# Write a function eqfn that will calculate f(x) = x^2 + 1/x for all elements in an array. # Since division by 0 is not possible, the function will instead remove the elements equal to 0 # prior to calculating f(x). import numpy as np def eqfn(x): if 0 not in x: out = x**2 + 1/x else: nonzero = np.where(x != 0) x = x[nonzero] out = x**2 + 1/x return out
import sys from socket import * # portable socket interface plus constants serverHost = 'localhost' # server name, or: 'starship.python.net' serverPort = 50007 # non-reserved port used by the server sockobj = socket(AF_INET, SOCK_STREAM) # make a TCP/IP socket object sockobj.connect((serverHost, serverPort)) # connect to server machine + port time = sockobj.recv(1024) # receive time of connection from server print(time.decode() + '\n') # decode and print while True: jumbled_word = sockobj.recv(1024) # receive the jumbled word string from server print(jumbled_word.decode()) # decode and print it to client print('Type your answer') client_input = input() # take client input from command line if not client_input: break # if client provides no input, break this loop sockobj.send(client_input.encode()) # encode and send input to server over socket server_output = sockobj.recv(1024) # receive output from server print(server_output.decode() + '\n') # decode and print it to client sockobj.close() # close socket to send eof to server
import os os.system("sudo apt-get install python3") os.system("sudo apt-get install python3-pip") os.system("pip3 install request") os.system("pip3 install sys") os.system("pip3 install colorama") from colorama import Fore print("\033[1;33m\nAll DONE..!! Required packages installed\033[1;m")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from astree.Tree import Tree # Skip statement class StatementSkip(Tree): def __init__(self, label=None): Tree.__init__(self, label=label) def eval(self, state, catch_vars=None, include_assign=False): pass def to_exp(self): return "skip"
import random random_word_list = ["kámen", "nůžky", "papír"] tah_pocitace = random.choice(random_word_list) tah_hrace = input("Zadej svůj tah: ") if tah_hrace == "kámen": if tah_pocitace == "kámen": print('Remíza!') if tah_pocitace == "nůžky": print('Vyhrála jsi!') if tah_pocitace == "papír": print('Prohrála jsi!') elif tah_hrace == "nůžky": if tah_pocitace == "kámen": print('Prohrála jsi!') if tah_pocitace == "nůžky": print('Remíza!') if tah_pocitace == "papír": print('Vyhrála jsi!') elif tah_hrace == "papír": if tah_pocitace == "kámen": print('Vyhrála jsi!') if tah_pocitace == "nůžky": print('Prohrála jsi!') if tah_pocitace == "papír": print('Remíza!') else: print("Promiň, ale znám pouze slova: kámen, nůžky, papír.")
import numpy as np import segment as sg import coordinate_transform as ct ''' A leg is a collection of 2D segments. Every position in 3D is viewed in cylindrical coordinates, which allows us to use the first segment in horizontal plane, and the rest of the segments in the plane specified by an angle from x-axis. ''' class leg(object): def __init__ (self,num_segs,lens,base_location=[0,0,0],base_angle=0,positions=[[0,0,0]],forward_angle=0,leg_ID=0,beta1=0.8,beta2=2,step_offset=0,z_offset_height=0): # lens is a list of leg segment lengths self.segments = [sg.segment(0,0,lens[0],0)] for i in range(1,num_segs): self.segments.append(sg.segment(self.segments[i-1].get_base()[0]+lens[i-1],0,lens[i],0)) self.base_location = np.array(base_location) self.base_angle = base_angle # Used for non-linear least-squares self.beta1 = beta1 self.beta2 = beta2 self.ID = leg_ID self.positions = np.array(positions) self.forward_angle = 0 self.step_count = 0 self.set_forward_angle(forward_angle) self.max_step = len(positions) - 1 self.forward = 1 step_offset = int(step_offset*self.max_step) self.step(force_step=step_offset) #self.z_height_offset = z_height_offset ct.translate(self.positions,0,0,z_offset_height) def follow_lsq(self,target): #print(target) # target is a 3D numpy vector # convert cartesian to cylindrical x = target[0] y = target[1] z = target[2] #print(x,y,z) theta = np.arctan2(y,x) rho = np.linalg.norm([x,y]) # set angle for base leg self.segments[0].set_angle(theta) # follow in the theta plane using the remain segments tar_theta_plane = np.array([rho,z]) #print("Plane target:",rho,z) # Compute new angle, using LM damped by angle change self.follow_in_theta_plane_lsq(tar_theta_plane) return None def follow_in_theta_plane_lsq(self,tar_theta_plane): # PARAMs to tune: # angle change tolerance # iterations of damped LM # damping factor ang_change_tol = np.pi/18 damped_LM_iters = 3 damping_factor = 0.3 n = len(self.segments) prev_angs = self.get_angles(mode='servo')[1:] x = self.LM_algo(tar_theta_plane,prev_angs,gamma=0) # Compare results to previous angles, if change is large, use multi-objective LM for i in range(0,n-1): if np.abs(x[i]-prev_angs[i]) > ang_change_tol: large_change = True break else: large_change = False if large_change: # More iterations better results # gamma controls the relative importance of angle change for k in range(0,damped_LM_iters): prev_angs = self.get_angles(mode='servo')[1:] x = self.LM_algo(tar_theta_plane,prev_angs,gamma=damping_factor) # Set the segment angles using the non-linear least-squares solution for i in range(1,n): self.segments[i].set_angle(np.sum(x[0:i])) if i < n-1: self.segments[i+1].set_base(self.segments[i].get_tip()) else: # Set the segment angles using the non-linear least-squares solution for i in range(1,n): self.segments[i].set_angle(np.sum(x[0:i])) if i < n-1: self.segments[i+1].set_base(self.segments[i].get_tip()) return None def LM_algo(self,tar_theta_plane,prev_angs,gamma=0): n = len(self.segments) x = np.zeros(n-1) f = self.compute_f(x,tar_theta_plane,prev_angs,gamma) #print("f:",f) lamb = 0.1 # More iterations better results for i in range(100): delta_x = self.compute_delta_x(x,lamb,tar_theta_plane,prev_angs,gamma) if np.linalg.norm(delta_x) < 1e-6: #print("i:",i) break x_hat = x - delta_x # Check if function value actually decreases if np.linalg.norm(self.compute_f(x_hat,tar_theta_plane,prev_angs,gamma)) < np.linalg.norm(self.compute_f(x,tar_theta_plane,prev_angs,gamma)): x = x_hat lamb = self.beta1*lamb else: x = x lamb = self.beta2*lamb return x # Computes the actual function value that we are minimizing at the current step def compute_f(self,x,des_pt,prev_angs,gamma=0): n = len(self.segments) f = np.zeros(2*(n-1)) f[0] = self.segments[0].len ang = 0 for i in range(1,n): ang = np.sum(x[0:i]) f[0] = f[0] + self.segments[i].len * np.cos(ang) f[1] = f[1] + self.segments[i].len * np.sin(ang) f[0] = f[0] - des_pt[0] f[1] = f[1] - des_pt[1] f[2:] = gamma*(x - prev_angs) return f # Computes update at current step def compute_delta_x(self,x,lamb,des_pt,prev_angs,gamma=0): # Gamma controls the relative weight of ensuring small angle changes n = len(self.segments) jab = np.zeros((2*(n-1),n-1)) for i in range(1,n): # jab_i is the ith row of the Jacobian matrix jab_1 = 0 jab_2 = 0 ang = 0 for j in range(i,n): ang = np.sum(x[0:j]) jab_1 = jab_1 + self.segments[j].len * -1 * np.sin(ang) jab_2 = jab_2 + self.segments[j].len * np.cos(ang) jab[0,i-1] = jab_1 jab[1,i-1] = jab_2 jab[2,0] = np.sqrt(gamma) jab[2,1] = 0 jab[3,0] = 0 jab[3,1] = np.sqrt(gamma) a = np.matmul( np.transpose(jab) , jab) b = a + lamb*np.identity(n-1) c = np.linalg.inv(b) d = np.matmul( c, np.transpose(jab)) delta_x = np.matmul( d , self.compute_f(x,des_pt,prev_angs,gamma)) return delta_x # Returns n+1 points def get_3D_endpoints(self): results = [np.array([0,0,0])] results.append(np.array([self.segments[0].get_tip()[0],self.segments[0].get_tip()[1],0])) i = 2 while i <= len(self.segments): x = self.segments[i-1].get_tip()[0]*np.cos(self.segments[0].get_angle()) y = self.segments[i-1].get_tip()[0]*np.sin(self.segments[0].get_angle()) z = self.segments[i-1].get_tip()[1] results.append(np.array([x,y,z])) i = i+1 final_results = [] for r in results: r = (np.matmul(r,np.array([[np.cos(self.base_angle),-1*np.sin(self.base_angle),0],[np.sin(self.base_angle),np.cos(self.base_angle),0],[0,0,1]]))) r = r+np.array(self.base_location) final_results.append(r) return final_results def get_angles(self,mode='servo'): results = [] for s in self.segments: results.append(s.get_angle()) if mode == 'servo': # If values are sent to servo, the angles are with reference to each joint for i in range(2,len(results)): results[i] = results[i] - results[i-1] elif mode == 'sim': # If values are for simulation output, we can show angles with reference to horizontal pass else: print("Unknown angle mode") exit() return np.array(results) def get_angles_deg(self,mode='servo'): ang = self.get_angles(mode) return ang/np.pi*180 def set_forward_angle(self,new_ang): ang_change = new_ang - self.forward_angle self.turn(ang_change) def turn(self,ang): ani_x = self.positions[0,0] ct.translate(self.positions,-1*ani_x,0,0) ct.rotate(self.positions,ang) ct.translate(self.positions,ani_x,0,0) self.forward_angle = self.forward_angle + ang def step(self,force_step=None): #print(self.ID) if (force_step == None): self.follow_lsq(self.positions[self.step_count,:]) self.step_count = self.step_count + 1; else: if (force_step > self.max_step): print("Force step exceed maximum") else: self.follow_lsq(self.positions[force_step,:]) self.step_count = force_step + 1 if (self.step_count > self.max_step): self.step_count = 0 def reverse(self): current_point = np.array(self.positions[self.step_count,:]) self.turn(self.forward*np.pi) self.forward = self.forward * -1 self.step_count = self.find_ind(current_point) def find_ind(self,current_point): norms = [] for k in range(0,self.max_step+1): dif = self.positions[k,:] - current_point norms.append(np.linalg.norm(dif)) norms = np.array(norms) return np.argmin(norms)