text
stringlengths
8
6.05M
#!/usr/bin/env python import scipy as sp from sys import argv from config import * if len(argv) != 2: print 'Usage: ' + argv[0] + ' [run file]' exit(-1) fd = open(argv[1], 'r') # TODO: cleaner header handling N = int(fd.readline()[4:-1]) beta = float(fd.readline()[7:-1]) print 'N = {:d}'.format(N) print 'beta = {:f}'.format(beta) up, updown = sp.transpose(sp.loadtxt(fd)) energy = 2*(updown - N) import pylab as plt plt.figure() plt.plot(up) plt.ylabel(r'$N_+$') plt.xlabel('Step') plt.savefig(argv[1][:-len(output_suffix)]+'up.png') plt.figure() plt.plot(energy) plt.ylabel('Energy') plt.xlabel('Step') plt.savefig(argv[1][:-len(output_suffix)]+'energy.png') plt.show()
from socket import * from PyQt5 import QtWidgets, QtCore, QtGui from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QListView, QMessageBox from PyQt5.QtCore import QStringListModel from ftp_login import Ui_login from ftp_main import Ui_main from PyQt5.QtWidgets import QFileDialog, QInputDialog import sys import time import os import re import operator HOST = '127.0.0.1' PORT = 6789 # 标准客户端21.自己的server是6789 BUFFSIZE = 8192 ADDR = (HOST, PORT) # list的索引长度 index = 0 # 创建control socket server_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) # 连接 server_socket.connect(ADDR) # 创建data socket client_socket = None # 创建port的监视socket port_socket = None connect_info = server_socket.recv(BUFFSIZE) class login_window(QtWidgets.QWidget, Ui_login): def __init__(self): super(login_window, self).__init__() self.setupUi_login(self) self.connect_line.setText(connect_info.decode('utf-8')) # 把确定按钮与confirm函数连接起来 self.login_btn.clicked.connect(self.confirm) def confirm(self): try: # 获取用户名,得到命令:USER username并传入server user_name = self.user_line.text() command = 'USER ' + user_name + '\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.connect_line.setText(info_back) # 正则表达式提取数字部分 match = re.search(r'\d+', info_back) if operator.eq(match[0], '331'): #隐藏用户名输入,显示密码输入,改变按钮 self.user_label.setVisible(False) self.user_line.setVisible(False) self.pass_label.setVisible(True) self.pass_line.setVisible(True) self.login_btn.setVisible(False) self.login_btn_2.setVisible(True) #把完成按钮与login函数连接起来 self.login_btn_2.clicked.connect(self.finish) except Exception as e: print(e) def finish(self): try: # 获取用密码,得到命令:PASS password并传入server pass_word = self.pass_line.text() command = 'PASS ' + pass_word + '\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.connect_line.setText(info_back) # 正则表达式提取数字部分 match = re.search(r'\d+', info_back) if operator.eq(match[0], '230'): # 显示用户名输入,显示密码输入,改变按钮 self.user_label.setGeometry(QtCore.QRect(50, 137, 58, 18)) self.user_line.setGeometry(QtCore.QRect(140, 130, 180, 32)) self.pass_label.setGeometry(QtCore.QRect(50, 197, 58, 18)) self.pass_line.setGeometry(QtCore.QRect(140, 190, 180, 32)) self.user_label.setVisible(True) self.user_line.setVisible(True) self.login_btn_2.setVisible(False) self.login_btn_3.setVisible(True) except Exception as e: print(e) class main_window(QtWidgets.QMainWindow, Ui_main): def __init__(self): super(main_window, self).__init__() self.setupUi_main(self) # 将ContextMenuPolicy设置为Qt.CustomContextMenu,否则无法使用customContextMenuRequested信号 self.list.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) # 显示欢迎信息 info_back = 'Welcome to FTP Client by tuyc17 !' self.info_line.setText(info_back) # 获取当前程序文件位置 self.cwd = os.getcwd() # 连接按钮和对应函数 self.source_btn.clicked.connect(self.update_list) self.scan_btn.clicked.connect(self.select_file) self.scan_btn_2.clicked.connect(self.select_dir) self.upload_btn.clicked.connect(self.upload_file) self.new_btn.clicked.connect(self.make_dir) self.previous_btn.clicked.connect(self.go_previous) self.restart_small_btn.clicked.connect(self.restart_small) # 创建QMenu信号事件 self.list.customContextMenuRequested.connect(self.show_menu) self.list.contextMenu = QtWidgets.QMenu(self) self.list.ENTER = self.list.contextMenu.addAction('打开文件夹') self.list.DELETE = self.list.contextMenu.addAction('删除空文件夹') self.list.DOWNLOAD = self.list.contextMenu.addAction('下载') self.list.RENAME = self.list.contextMenu.addAction('重命名') # 右键菜单事件绑定 self.list.ENTER.triggered.connect(self.enter) self.list.DELETE.triggered.connect(self.delete_file) self.list.DOWNLOAD.triggered.connect(self.download_file) self.list.RENAME.triggered.connect(self.rename_file) # 菜单栏事件绑定 self.actionAbout.triggered.connect(self.show_info) # 菜单信息 def show_info(self): QMessageBox.information(self, 'About...', 'By Tuyc17.\n 2019.10.30') # pasc/port def set_port(self): global client_socket global port_socket try: mode = self.combo_box.currentText() if client_socket is not None: client_socket.close() client_socket = None # 疑似有问题 if operator.eq(mode, 'Passive'): client_socket = socket(AF_INET, SOCK_STREAM, 0) command = 'PASV\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') # 正则表达式 match = re.search(r'\d+(?:,\d+){5}', info_back) nums = match[0].split(',') client_host = nums[0] + '.' + nums[1] + '.' + nums[2] + '.' + nums[3] client_port = int(nums[4]) * 256 + int(nums[5]) client_addr = (client_host, client_port) client_socket.connect(client_addr) # self.info_line.setText(info_back) elif operator.eq(mode, 'Port'): port_socket = socket(AF_INET, SOCK_STREAM, 0) client_host = server_socket.getsockname()[0] client_port = 0 client_addr = (client_host, client_port) port_socket.bind(client_addr) port_socket.listen(1) client_sockname = port_socket.getsockname() tmp_str_1 = str(client_sockname[1] // 256) tmp_str_2 = str(client_sockname[1] % 256) tmp_host = client_sockname[0] tmp_host = tmp_host.replace('.', ',') command = 'PORT ' + tmp_host + ',' + tmp_str_1 + ',' + tmp_str_2 + '\r\n' server_socket.send(command.encode()) # 不急着把portsocket做accept来取clientsocket和关掉portsocket info_back = server_socket.recv(BUFFSIZE).decode('utf-8') info_back = info_back + tmp_host + ',' + tmp_str_1 + ',' + tmp_str_2 # self.info_line.setText(info_back) except Exception as e: print(e) # 刷新list按钮,同时执行SYST和TYPE I def update_list(self): global index global client_socket global port_socket # global server_socket try: # 发送SYST和TYPE I command = 'SYST\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') command = 'TYPE I\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') # 发送PWD指令,获取服务端的位置 command = 'PWD\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') # 正则表达式搜索两双引号之间的内容 match = re.search(r'".+"', info_back) info_back = match[0].replace('"', '') self.source_line.setText(info_back) # 根据info_back执行LIST并清空control socket的消息.去data socket接收文件名 self.set_port() command = 'LIST\r\n' server_socket.send(command.encode()) # 发送LIST command后,检测port_socket,做accept并赋值None if port_socket is not None: client_socket = port_socket.accept()[0] port_socket.close() port_socket = None info_back = server_socket.recv(BUFFSIZE).decode('utf-8') time.sleep(0.1) # self.info_line.setText(info_back) # 收到文件列表 info_back = client_socket.recv(BUFFSIZE).decode('utf-8') # 清空self.list中的内容 self.list.clear() # 分割得到文件名 lists = info_back.split('\n') for cheese in lists: infos = cheese.split(' ') item = QtWidgets.QListWidgetItem() self.list.addItem(item) index = index + 1 item.setText(QtCore.QCoreApplication.translate("main_page", infos[len(infos) - 1])) # 当连接本地服务器时,去掉第一个干扰项 if PORT == 6789: self.list.takeItem(0) # 关掉client_socket client_socket.close() info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) except Exception as e: print(e) # 选取文件按钮(scan_btn) def select_file(self): file_choose, filetype = QFileDialog.getOpenFileName(self, "选取文件", self.cwd, # 起始路径 "All Files (*);;Text Files (*.txt)") # 设置文件扩展名过滤,用双分号间隔 if file_choose == "": return self.upload_line.setText(file_choose) self.update_list() # 选取文件夹按钮(scan_btn_2) def select_dir(self): dir_choose = QFileDialog.getExistingDirectory(self, "选取文件夹", self.cwd) # 起始路径 if dir_choose == "": return self.target_line.setText(dir_choose) self.update_list() # 右键显示上下文菜单,QMenu信号事件 def show_menu(self, pos): self.list.contextMenu.exec_(QtGui.QCursor.pos()) # 下载按钮retr def download_file(self): global client_socket global port_socket try: self.set_port() file_name = self.list.currentItem().text() command = 'RETR ' + file_name + '\r\n' # 发送command命令 server_socket.send(command.encode()) # 发送command后,检测port_socket,做accept并赋值None if port_socket is not None: client_socket = port_socket.accept()[0] port_socket.close() port_socket = None target_path = self.target_line.text() target = target_path + '/' + file_name down_file = open(target, 'w') while(True): # 从数据流中读入文件数据 str = client_socket.recv(BUFFSIZE).decode('utf-8') if len(str) == 0: break down_file.write(str) down_file.close() client_socket.close() time.sleep(0.1) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) self.update_list() except Exception as e: print(e) # 上传按钮stor def upload_file(self): global client_socket global port_socket try: self.set_port() file_path = self.upload_line.text() file_names = file_path.split('/') file_name = file_names[len(file_names) - 1] command = 'STOR ' + file_name + '\r\n' # 发送command命令 server_socket.send(command.encode()) # 发送command后,检测port_socket,做accept并赋值None if port_socket is not None: client_socket = port_socket.accept()[0] port_socket.close() port_socket = None # 向数据流中传入文件 # 按照file_path打开客户端文件 up_file = open(file_path, 'rb') while(True): str = up_file.read(BUFFSIZE) if len(str) == 0: break # 上传至socket client_socket.send(str) up_file.close() client_socket.close() time.sleep(0.1) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) self.update_list() except Exception as e: print(e) # 文件重命名 def rename_file(self): try: origin_name = self.list.currentItem().text() # 传入待重命名的文件 command = 'RNFR ' + origin_name + '\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) self.rename_2() except Exception as e: print(e) def rename_2(self): try: # 打开消息窗口以供输入新文件名 new_name_str = QInputDialog.getText(self, '新文件名输入', '请输入新文件名:') new_name = new_name_str[0] command = 'RNTO ' + new_name + '\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) self.update_list() except Exception as e: print(e) # 创建空文件夹 def make_dir(self): try: # 打开消息窗口以供输入新文件夹名 new_dir_str = QInputDialog.getText(self, '新文件夹名输入', '请输入新文件夹的名称:') new_dir = new_dir_str[0] command = 'MKD ' + new_dir + '\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) self.update_list() except Exception as e: print(e) # 删除空文件夹 def delete_file(self): try: origin_name = self.list.currentItem().text() # 传入待删除的文件夹 command = 'RMD ' + origin_name + '\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) self.update_list() except Exception as e: print(e) # 打开文件夹 def enter(self): try: # 获得当前路径 current_path = self.source_line.text() # current_path = current_path[1:] # 获得当前文件夹名称 current_folder = self.list.currentItem().text() # 得到新路径 new_path = current_path + '/' + current_folder command = 'CWD ' + new_path + '\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) self.update_list() except Exception as e: print(e) # 返回上级目录 def go_previous(self): try: # 获得当前路径 current_path = self.source_line.text() current_parts = current_path.split('/') current_parts = current_parts[1:len(current_parts) - 1] previous_path = '' for index in range(len(current_parts)): previous_path = previous_path + '/' + current_parts[index] command = 'CWD ' + previous_path + '\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.info_line.setText(info_back) self.update_list() except Exception as e: print(e) # 断点续传 def restart_small(self): try: command = 'REST 5\r\n' server_socket.send(command.encode()) info_back = server_socket.recv(BUFFSIZE).decode('utf-8') self.download_file() except Exception as e: print(e) if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) # 生成窗体 login_page = login_window() main_page = main_window() # 关联各个窗体,按钮与窗体显隐之间的关系 btn = login_page.login_btn_3 btn.clicked.connect(main_page.show) btn.clicked.connect(login_page.close) # 显示 login_page.show() sys.exit(app.exec_())
#!/usr/bin/env python # creator.py # TODO: initialize default values from tcu_params object # TODO: change range of frequency spin box depending on mode setting # TODO: add content to the info section, perhaps have help files with html # TODO: delete/ignore empty rows of table when export() # TODO: check rounding of floats when extracting from spinbox # TODO: verify pulse_params_reg format (is pri 1x32 or 2x16) import argparse import sys from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QFileDialog, QMessageBox import datetime from creator_gui import Ui_MainWindow from parser import TCUParams VERSION = '1.1.0' class Creator(Ui_MainWindow): """docstring for TCUPulseParamsGUILogic.""" def __init__(self, tcu_params, window): self.main_window = Ui_MainWindow.__init__(self) self.setupUi(window) window.setWindowTitle('TCU Parameter Editor v' + VERSION) self.tcu_params = tcu_params self.actionQuit.triggered.connect(sys.exit) self.actionOpen.triggered.connect(self.open) self.actionExport.triggered.connect(self.export_to) self.actionInstructions.triggered.connect(self.display_help) self.actionAbout.triggered.connect(self.display_about) self.button_export.clicked.connect(self.export) self.button_export_close.clicked.connect(self.export_close) self.button_add_pulse.clicked.connect(self.add_pulse) self.button_add_pulse.clicked.connect(self.update_metadata) self.button_edit_pulse.clicked.connect(self.edit_pulse) self.button_edit_pulse.clicked.connect(self.update_metadata) self.button_remove_pulse.clicked.connect(self.remove_pulse) # self.spin_num_pulses.valueChanged.connect(self.update_table) # self.spin_num_pulses.valueChanged.connect(self.update_metadata) self.spin_num_repeats.valueChanged.connect(self.update_metadata) self.spin_samples_per_pri.valueChanged.connect(self.update_metadata) self.combo_mode.activated[str].connect(self.update_frequency_band) self.table_pulse_params.itemSelectionChanged.connect(self.update_selection) # # def select_row(self): # items = self.table_pulse_params.selectedItems() # populate fields with existing headerfile data self.spin_clk_period.setProperty("value", self.tcu_params.clk_period_ns) # self.spin_num_pulses.setProperty("value", self.tcu_params.num_pulses) self.spin_num_repeats.setProperty("value", self.tcu_params.num_repeats) self.spin_pri_pulse_width.setProperty("value", self.tcu_params.pri_pulse_width) self.spin_prepulse.setProperty("value", self.tcu_params.pre_pulse) self.spin_x_amp_delay.setProperty("value", self.tcu_params.x_amp_delay) self.spin_l_amp_delay.setProperty("value", self.tcu_params.l_amp_delay) self.spin_rex_delay.setProperty("value", self.tcu_params.rex_delay) self.spin_dac_delay.setProperty("value", self.tcu_params.dac_delay) self.spin_adc_delay.setProperty("value", self.tcu_params.adc_delay) self.spin_samples_per_pri.setProperty("value", self.tcu_params.samples_per_pri) self.combo_waveform_index.setProperty("currentIndex", self.tcu_params.waveform_index -1) self.update_table() # disabling the RF pulse width field in the pulses editor selection # this will be used for future NeXtRAD experiments capable of waveforms # with varying pulse widths. for now, this value is the same as the # WAVEFORM_INDEX value self.spin_rf_pulse_width.setProperty("enabled", False) self.spin_rf_pulse_width.setToolTip('Multiple pulse-widths not yet supported, please use \'Waveform Index\' parameter') self.combo_waveform_index.currentTextChanged.connect(self.pulse_width_update) def pulse_width_update(self): pulse_widths_list = [0.5, 1.0, 3.0, 5.0, 10.0, 15.0, 20.0, 0.5, 1.0, 3.0, 5.0, 10.0, 15.0, 20.0] pulse_width = pulse_widths_list[self.combo_waveform_index.currentIndex()] self.spin_rf_pulse_width.setProperty("value", pulse_width) for pulse in self.tcu_params.pulses: pulse['pulse_width'] = pulse_width self.update_table() def _get_filename_from_dialog(self): filename = QFileDialog.getOpenFileName(parent=self.main_window, caption='Open file', directory='./', filter='INI Files (*.ini);;All Files (*)') return (filename[0]) def open(self): filename = self._get_filename_from_dialog() def export_to(self): filename = self._get_filename_from_dialog() def display_help(self): self.instructions_window = QtWidgets.QMainWindow() widget = QtWidgets.QWidget(self.instructions_window) gridlayout = QtWidgets.QGridLayout() widget.setLayout(gridlayout) widget.setWindowTitle('Instructions') gridlayout.addWidget(QtWidgets.QLabel(widget, text='https://github.com/nextrad/tcu_software')) self.instructions_window.show() def display_about(self): QMessageBox.about(self.main_window, 'About', 'TCU Parameter Editor\nv1.1.0\nBrad Kahn') def export(self): # TODO: verify captured datatypes are ints / doubles # TODO: input validation and verification # retrieve general params self.tcu_params.clk_period_ns = self.spin_clk_period.value() # self.tcu_params.num_pulses = self.spin_num_pulses.value() self.tcu_params.num_repeats = self.spin_num_repeats.value() self.tcu_params.pri_pulse_width = self.spin_pri_pulse_width.value() self.tcu_params.pre_pulse = self.spin_prepulse.value() self.tcu_params.x_amp_delay = self.spin_x_amp_delay.value() self.tcu_params.l_amp_delay = self.spin_l_amp_delay.value() self.tcu_params.rex_delay = self.spin_rex_delay.value() self.tcu_params.dac_delay = self.spin_dac_delay.value() self.tcu_params.adc_delay = self.spin_adc_delay.value() self.tcu_params.samples_per_pri = self.spin_samples_per_pri.value() self.tcu_params.waveform_index = self.combo_waveform_index.currentIndex() + 1 # retrieve pulse params from table # TODO ... for row in range(self.table_pulse_params.rowCount()): pulse_width = eval(self.table_pulse_params.item(row, 0).text()) pri = eval(self.table_pulse_params.item(row, 1).text()) pol_mode = eval(self.table_pulse_params.item(row, 2).text()) frequency = eval(self.table_pulse_params.item(row, 3).text()) self.tcu_params.export() print("exported") def export_close(self): self.export() window.close() # TODO: this needs to be fixed, what gets updated first? table or object? def add_pulse(self): # pulse = PulseParameters(pulse_width=self.spin_rf_pulse_width.value(), # pri=self.spin_pri.value(), # pol_mode=self.combo_mode.currentIndex(), # frequency=self.spin_frequency.value()) pulse = {'pulse_width': self.spin_rf_pulse_width.value(), 'pri': self.spin_pri.value(), 'pol_mode': self.combo_mode.currentIndex(), 'frequency': self.spin_frequency.value()} self.tcu_params.pulses.append(pulse) self.update_table() def _get_selected_rows(self): index_list = [] for model_index in self.table_pulse_params.selectionModel().selectedRows(): index = QtCore.QPersistentModelIndex(model_index) index_list.append(index) return index_list def edit_pulse(self): index_list = self._get_selected_rows() for index in index_list: print('editing pulse #' + str(index.row()+1)) pulse = {'pulse_width': self.spin_rf_pulse_width.value(), 'pri': self.spin_pri.value(), 'pol_mode': self.combo_mode.currentIndex(), 'frequency': self.spin_frequency.value()} self.tcu_params.pulses[index.row()] = pulse self.update_table() def remove_pulse(self): index_list = self._get_selected_rows() for index in index_list: print('deleting pulse #' + str(index.row()+1)) del self.tcu_params.pulses[index.row()] self.update_table() def update_table(self): self.table_pulse_params.setRowCount(len(self.tcu_params.pulses)) for index, pulse_param in enumerate(self.tcu_params.pulses): self.table_pulse_params.setItem(index, 0, QTableWidgetItem(str(pulse_param['pulse_width']))) self.table_pulse_params.setItem(index, 1, QTableWidgetItem(str(pulse_param['pri']))) self.table_pulse_params.setItem(index, 2, QTableWidgetItem(str(pulse_param['pol_mode']))) self.table_pulse_params.setItem(index, 3, QTableWidgetItem(str(pulse_param['frequency']))) if len(self.tcu_params.pulses) > 0: # self.table_pulse_params.selectRow(len(self.tcu_params.pulses) - 1) self.button_export.setEnabled(True) self.button_export_close.setEnabled(True) else: self.button_export.setEnabled(False) self.button_export_close.setEnabled(False) if len(self.tcu_params.pulses) < 32: self.button_add_pulse.setEnabled(True) else: self.button_add_pulse.setEnabled(False) index_list = self._get_selected_rows() if len(index_list) > 0: self.button_edit_pulse.setEnabled(True) self.button_remove_pulse.setEnabled(True) self.label_pulse_index.setText("Pulse " + str(index_list[0].row()+1) + " of " + str(len(self.tcu_params.pulses))) else: self.button_edit_pulse.setEnabled(False) self.button_remove_pulse.setEnabled(False) self.label_pulse_index.setText("No pulse selected") def update_selection(self): index_list = self._get_selected_rows() if len(index_list) > 0: self.label_pulse_index.setText("Pulse " + str(index_list[0].row()+1) + " of " + str(len(self.tcu_params.pulses))) selected_pulse = self.tcu_params.pulses[index_list[0].row()] self.spin_rf_pulse_width.setProperty("value", selected_pulse['pulse_width']) self.spin_pri.setProperty("value", selected_pulse['pri']) self.combo_mode.setCurrentIndex(selected_pulse['pol_mode']) self.update_frequency_band() self.spin_frequency.setProperty("value", selected_pulse['frequency']) self.button_edit_pulse.setEnabled(True) self.button_remove_pulse.setEnabled(True) self.label_pulse_index.setText("Pulse " + str(index_list[0].row()+1) + " of " + str(len(self.tcu_params.pulses))) else: self.button_edit_pulse.setEnabled(False) self.button_remove_pulse.setEnabled(False) self.label_pulse_index.setText("No pulse selected") def update_frequency_band(self): # TODO: remove magic numbers if self.combo_mode.currentIndex() in range(4): self.spin_frequency.setRange(1235, 1365) else: self.spin_frequency.setRange(8500, 9200) def update_metadata(self): samples_per_pri = self.spin_samples_per_pri.value() num_pulses = len(self.tcu_params.pulses) num_repeats = self.spin_num_repeats.value() time_block_microseconds = 0 for pulse in self.tcu_params.pulses: time_block_microseconds += pulse['pri'] time_experiment_microseconds = time_block_microseconds * num_repeats time_experiment_seconds = time_experiment_microseconds / 1000000 num_pris = num_pulses * num_repeats num_samples = num_pris * samples_per_pri experiment_size_bits = num_samples * 32 experiment_size_bytes = experiment_size_bits // 8 experiment_megabytes = experiment_size_bytes // (1024*1024) # print('samples_per_pri = ' + str(samples_per_pri)) # print('num_pulses = ' + str(num_pulses)) # print('num_repeats = ' + str(num_repeats)) # print('time_block = ' + str(time_block_microseconds)) # print('time_experiment_microseconds = ' + str(time_experiment_microseconds)) # print('time_experiment_seconds = ' + str(time_experiment_seconds)) # time_experiment_minutes = time_experiment_seconds/60 # print('time_experiment_minutes = ' + str(time_experiment_minutes)) # time_experiment_hours = time_experiment_minutes/60 # print('time_experiment_hours = ' + str(time_experiment_hours)) # TODO: clip values over 24 hrs if time_experiment_seconds < 86400: self.lcdNumber_time.display(str(datetime.timedelta(seconds=int(time_experiment_seconds)))) else: self.lcdNumber_time.display('large') self.lcdNumber_size.display(str(experiment_megabytes)) if __name__ == '__main__': # ------------------------------------------------------------------------- # PARSE COMMAND LINE ARGUMENTS # ------------------------------------------------------------------------- clargparser = argparse.ArgumentParser(usage='creator.py [-f FILE]', description='Experiment creator for the ' 'NeXtRAD Timing Control Unit') clargparser.add_argument('-f', '--file', help='header file default [./NeXtRAD.ini]', default='./NeXtRAD.ini') clargparser.add_argument('-o', '--outputfile', help='output file for exported params [./PulseParameters.ini]', default='./PulseParameters.ini') args = clargparser.parse_args() HEADER_FILE = args.file OUTPUT_FILE = args.outputfile tcu_params = TCUParams(HEADER_FILE, OUTPUT_FILE) app = QtWidgets.QApplication(sys.argv) window = QtWidgets.QMainWindow() program = Creator(tcu_params, window) window.show() sys.exit(app.exec_())
def hasTwoThreeDigitFactors(number): for i in range(999,100, -1): divisor = number / i if number % i == 0 and len(str(divisor)) == 3: return True return False def isPalindrome(number): numString = str(number) length = len(numString) j = length-1 for i in range (0, length // 2): if numString[i] != numString[j]: return False j=j-1 return True def findLargestProductPalindrome(first, second): while True: product = first * second print ('first num %d ' % first) print ('second num %d ' % second) print ('Product %d ' % product) if isPalindrome(product): print ('Largest palindrome: ' + str(product) + ' 8by multiplying ' + str(first) + ' and ' + str(second)) return if(first < second): second = second - 1 if(first == second): first = first - 1 def getFirstPalindrome(number): while True: if isPalindrome(number): return number number=number-1 def getNextPalindrome(prevPalindrome): strPal = str(prevPalindrome) strFirstThreeDigi = strPal[0] + strPal[1] + strPal[2] firstThreeDigi = int(strFirstThreeDigi) firstThreeDigi = firstThreeDigi - 1 strFirstThreeDigi = str(firstThreeDigi) strPal = strFirstThreeDigi + strFirstThreeDigi[2] + strFirstThreeDigi[1] + strFirstThreeDigi[0] return int(strPal) def findLargestPalindrome(max): pal = getFirstPalindrome(max) while True: if hasTwoThreeDigitFactors(pal): return pal pal = getNextPalindrome(pal) largest_num = 999 * 999 print "Largest Palindrome with two 3 digit factors is : " + str(findLargestPalindrome(largest_num))
from Tkinter import * class Graph: def __init__(self): self.nodes = {} # Nodes are stored as a dict with their ids as keys self.endPoints = [] self.startPoints = [] self.discovered = [] self.paths = [] # Check if there's a two-way edge between two nodes def twoWay(self, x, y): return ((y in self.nodes[x]) and (x in self.nodes[y])) # Create a graph given a 2d matrix def from_matrix(self, matrix): self.matrix = matrix for rowI, row in enumerate(matrix): for colI, val in enumerate(row): # If cell is empty, do nothing if val == -1: pass # Otherwise, check all 4 neighbours (if they exist) # and add the relevant edge to the graph else: if val == 0: self.startPoints.append((rowI, colI)) self.nodes[(rowI, colI)] = [] for (i, j) in [(0, 1), (1, 0), (0, -1), (-1, 0)]: if ( # Column out of bounds rowI + i >= 0 and rowI + i < len(matrix) and # Row out of bounds colI + j >= 0 and colI + j < len(matrix[0]) and # Comparing node with itself not (i == 0 and j == 0) ): if matrix[rowI + i][colI + j] >= val: self.nodes[(rowI, colI)].append((rowI + i, colI + j)) # Creates a Tk window and draws the matrix and graph representations # ToDo: Add an update function so that the window does not have to be destroyed to display changes in the graph def draw(self, width=1280, height=720): # Window width and height, and matrix box side length in pixels box = 100 fontSize = 36 # Set up a simple Tk canvas root = Tk() c = Canvas(root, width=width, height=height) c.pack(side="top", fill="both", expand=True) # Want to center the screen so we can animate results # Gets both half the screen width/height and window width/height positionRight = int(root.winfo_screenwidth() / 2 - width / 2) positionDown = int(root.winfo_screenheight() / 2 - height / 2) # Positions the window in the center of the page. root.geometry("+{}+{}".format(positionRight, positionDown)) # Add headings and divider to our display c.create_text(width / 4., height * .1, text="Level Matrix", font=("TkDefaultFont", fontSize)) c.create_text(3 * width / 4., height * .1, text="Level Graph", font=("TkDefaultFont", fontSize)) c.create_line(width / 2., 0, width / 2., height) # Now we're going to display the input matrix and # the generated graph, side by side # We want these to sit nicely centered in their halves, # so first we're going to calculate how much space we need # for our representation diagramWidth = box * len(self.matrix[0]) diagramHeight = box * len(self.matrix) padX = (width / 2. - diagramWidth) / 2. padY = (height - diagramHeight) / 2. # Make the matrix representation for rowI, row in enumerate(self.matrix): for colI, val in enumerate(row): # Calculate rectangle placement and draw it rectX = padX + colI * box rectY = padY + rowI * box c.create_rectangle(rectX, rectY, rectX + box, rectY + box, outline="black") # Apply the relevant label, if any if val != -1: if val == 0: c.create_text(rectX + box / 2., rectY + box / 2., text='S', font=("TkDefaultFont", fontSize)) else: c.create_text(rectX + box / 2., rectY + box / 2., text=str(val), font=("TkDefaultFont", fontSize)) # Make the graph representation # We need to shift across to the other section of the window r = box * .1 # Radius of the node dots rectX += width / 2. + box / 2. + r / 2. rectY += box / 2. + r / 2. sep = (box - 2 * r * 1.4) # This is the length of the edge arrows # Draw a dot for each node if val != -1: c.create_oval(rectX - r, rectY - r, rectX + r, rectY + r, fill='black') # For each edge, draw the relevant arrow if (rowI, colI) in self.nodes.keys(): for edge in self.nodes[(rowI, colI)]: direction = (edge[0] - rowI, edge[1] - colI) startX = rectX + direction[1] * r * 1.4 startY = rectY + direction[0] * r * 1.4 endX = startX + direction[1] * sep endY = startY + direction[0] * sep c.create_line(startX, startY, endX, endY, arrow="last", width=3, arrowshape=(16, 20, 6)) def quit_window(event): root.destroy() root.bind('<Escape>', quit_window) root.mainloop() # Rule 1 def check_for_end(self): endPoints = [] for node in self.nodes: # Grab all the edges LEAVING the node in question vertices = self.nodes[node] # No exits at all if (vertices == [] or ( # Just one two-way node that therefore must be entrance len(vertices) == 1 and self.twoWay(node, vertices[0])) and self.nodes.values().count(node) == 1): endPoints.append(node) self.endPoints = endPoints return endPoints # Rule 2 - If a node with a two-way edge is an end point, the edge can be made into an entry edge. # i.e. end points shouldn't have any exit nodes def convert_endpoint_vertices(self): for node in self.endPoints: self.nodes[node] = [] # Rules 3 - 6 def check_vertices(self): for node in self.nodes: # List of nodes that have exits that lead to this node enteringNodes = [x for x in self.nodes.keys() if node in self.nodes[x]] # Rule 3 - If a node has only a two-way edge and exit edges, # the two-way edge can be converted to an entry edge if len(enteringNodes) == 1 and self.twoWay(node, enteringNodes[0]): self.nodes[node].remove(enteringNodes[0]) # Rule 4 - If a node has only a two-way edge and entry edges, # the two-way edge can be converted to an exit edge if len(self.nodes[node]) == 1 and self.twoWay(node, self.nodes[node][0]): self.nodes[self.nodes[node][0]].remove(node) # Rule 5 - If a node is part of the only path to an end point, # that node can't have any other exits if self.endPoints != []: # First we need to build a list of the path to the end point path = [] currentNode = self.endPoints[0] currentEnteringNodes = [x for x in self.nodes.keys() if currentNode in self.nodes[x]] while len(currentEnteringNodes) == 1: path.append(currentNode) currentNode = currentEnteringNodes[0] currentEnteringNodes = [x for x in self.nodes.keys() if currentNode in self.nodes[x]] # Now we check whether any of this node's exits lead to the path onlyValidExit = None for exit in self.nodes[node]: if exit in path: onlyValidExit = exit # If that's the case, we then remove all the remaining exits. if onlyValidExit is not None: self.nodes[node] = [onlyValidExit] # Rule 6 - If a node has only one exit, all other entrances # to its destination node can be removed. if len(self.nodes[node]) == 1: # Need to grab all the nodes that lead to the destination in question penultimateNodes = [x for x in self.nodes if self.nodes[node][0] in self.nodes[x]] # Now iterate over that list and remove the destination from the other nodes vertices for pNode in penultimateNodes: if pNode != node: self.nodes[pNode].remove(self.nodes[node][0]) # Rule 6.5 - If a node has only a single entrance, # all other exits from the entering node can be destroyed if len(enteringNodes) == 1: self.nodes[enteringNodes[0]] = [node] def findAllPaths(self, start, dest, visited, path): # Mark the start node as visited and add it to the current path visited[start] = True path.append(start) # If there's nowhere to go, add a copy of the path to self.paths if start == dest: a = [x for x in path] self.paths.append(a) # Otherwise, recursively visit all available nodes else: for node in self.nodes[start]: if not visited[node]: self.findAllPaths(node, dest, visited, path) # Backtrack: Remove the node from the path, and mark it as unvisited path.pop() visited[start] = False def brute_force(self): # Dictionary to keep track of which nodes have been visited visited = {} for node in self.nodes.keys(): visited[node] = False # Find all paths between the start and end point of the graph self.findAllPaths(self.startPoints[0], self.endPoints[0], visited, path=[]) # Select only the solutions that visit every node self.solutions = [x for x in self.paths if len(x) == len(self.nodes.keys())] # Continuously call check_vertices until the graph stops being modified def simplify(self): prev_complexity = len(self.nodes.values()) self.check_vertices() new_complexity = len(self.nodes.values()) while prev_complexity != new_complexity: prev_complexity = len(self.nodes.values()) self.check_vertices() new_complexity = len(self.nodes.values()) def reset(self): self.from_matrix(self.matrix) self.paths = [] self.solutions = []
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Actor', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=200)), ('birth_date', models.DateField(verbose_name=b'Birth_date')), ('image', models.ImageField(upload_to=b'image')), ], options={ 'db_table': 'actor', 'managed': True, }, ), migrations.CreateModel( name='Cinema', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=200)), ], options={ 'db_table': 'cinema', 'managed': True, }, ), migrations.CreateModel( name='General', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ], ), migrations.CreateModel( name='Movie', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=200)), ('duration_mins', models.IntegerField()), ('actor', models.ManyToManyField(to='cine.Actor')), ('main_actor', models.ForeignKey(related_name='main_actor', to='cine.Actor')), ], options={ 'db_table': 'movie', 'managed': True, }, ), migrations.CreateModel( name='MovieByRoom', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('start_datetime', models.DateTimeField(verbose_name=b'Start_datetime')), ('end_datetime', models.DateTimeField(verbose_name=b'')), ('movie', models.ForeignKey(to='cine.Movie')), ], ), migrations.CreateModel( name='Room', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('room_number', models.IntegerField()), ('room_manager', models.CharField(max_length=200)), ('cinema', models.ForeignKey(related_name='myCinema', to='cine.Cinema')), ('played', models.ManyToManyField(to='cine.Movie', through='cine.MovieByRoom')), ], options={ 'db_table': 'room', 'managed': True, }, ), migrations.AddField( model_name='moviebyroom', name='room', field=models.ForeignKey(to='cine.Room'), ), ]
""" Percolate Coding Challenge! Normalize entries from data/sample-Liz.in to JSON on data/result.out $ cd perc_test/app $ python formatter.py """ import time import json from functools import wraps def timefn(fn): """ profiling/logging helper measuring execution speed :param self: function :return: str: print statement """ @wraps(fn) def measure_time(*args, **kwargs): """ prints time elapsed per wrapped function in console """ beg = time.time() result = fn(*args, **kwargs) end = time.time() print "@timefn:" + fn.func_name + " took " + str(end - beg) + " seconds" return result return measure_time class Formatter(object): """ formatter :returns result.out """ def __init__(self): self.entries = [] self.errors = [] self.line_count = None self.entry_count = None @staticmethod def read_file(filename): """ reads file f = _fm.read_file(filename) :param filename: str :return: txt: str """ with open(filename, 'r') as opened_file: txt = opened_file.read() opened_file.close() return txt @timefn def get_entries_by_line(self, file_contents): """ grabs content of each line and appends to list container = _fm.get_entries_by_line(file_contents) :param file_contents: str :return: container: list """ _entries = file_contents.split("\n") container = [] for _entry in _entries: item = _entry.split(",") container.append(item) # stores line_count for test_line_count_equals_entry_count self.line_count = len(container) return container @timefn def validate_entries(self, container): """ validates each entry valid formats: 1. lastname, firstname, (###)-###-####, color, ##### 2. firstname lastname, color, #####, ### ### #### 3. firstname, lastname, #####, ### ### ####, color invalid entries are appended to self.errors output = _fm.validate_entries(container) :param container: list """ for entry in container: entry_index = container.index(entry) _entry = {} if len(entry) == 4: name = entry[0].split(" ") _entry["first_name"] = self.validate_str(name[0], entry_index) _entry["last_name"] = self.validate_str(name[1], entry_index) _entry["color"] = self.validate_str(entry[1], entry_index) _entry["zipcode"] = self.validate_zipcode(entry[2], entry_index) _entry["phonenumber"] = self.validate_phonenumber(entry[3], entry_index) self.validate_entry(_entry) elif len(entry) == 5: if "(" in entry[2]: _entry["first_name"] = self.validate_str(entry[1], entry_index) _entry["last_name"] = self.validate_str(entry[0], entry_index) _entry["color"] = self.validate_str(entry[3], entry_index) _entry["zipcode"] = self.validate_zipcode(entry[4], entry_index) _entry["phonenumber"] = self.validate_phonenumber(entry[2], entry_index) self.validate_entry(_entry) else: _entry["first_name"] = self.validate_str(entry[0], entry_index) _entry["last_name"] = self.validate_str(entry[1], entry_index) _entry["color"] = self.validate_str(entry[4], entry_index) _entry["zipcode"] = self.validate_zipcode(entry[2], entry_index) _entry["phonenumber"] = self.validate_phonenumber(entry[3], entry_index) self.validate_entry(_entry) else: self.errors.append(container.index(entry)) def validate_entry(self, _entry): """ checks if any attributes == None :param _entry: dict """ if None not in _entry.itervalues(): self.entries.append(_entry) def validate_phonenumber(self, phone, ind): """ normalizes phone output invalid phone are letters or lengths not equal to 10 phonenumber = self.validate_phonenumber(phone, ind) :param phone: str :param ind: int :return: phone: str or None """ phone = phone.replace("(", "") phone = phone.replace(")", "") phone = phone.replace("-", "") phone = phone.replace(" ", "") try: phone = str(int(phone)) if len(phone) == 10: return str(phone[0:3]) + "-" + str(phone[3:6]) + "-" + str(phone[6:]) else: self.errors.append(ind) return None except ValueError: self.errors.append(ind) def validate_zipcode(self, zipcode, ind): """ normalizes zipcode output invalid zipcode are letters or lengths not equal to 5 zipcode = self.validate_zipcode(zipcode, ind) :param zipcode: str :param ind: int :return: zipcode: str or None """ zipcode = zipcode.replace(" ", "") try: zipcode = str(int(zipcode)) if len(zipcode) == 5: return str(zipcode) else: self.errors.append(ind) return None except ValueError: self.errors.append(ind) def validate_str(self, label, ind): """ normalizes string output tests if string could be an integer invalid if type is not string string = self.validate_str(name[0], container.index(entry)) :param label: string :param ind: int :return: label: str or None """ try: label = int(label) self.errors.append(ind) return None except ValueError: if type(label) == str: return label.strip() else: self.errors.append(ind) return None def format_output(self): """ formats output to json :return: results: str """ # remove duplicate errors self.errors = list(set(self.errors)) # stores entry_count for test_line_count_equals_entry_count self.entry_count = len(self.entries) + len(self.errors) # sorting entries alphabetically ascending by last name sorted_entries = sorted(self.entries, key=lambda k: k["last_name"], reverse=False) self.entries = sorted_entries output = {"entries": self.entries, "errors": self.errors} # formatting to json per requirements results = json.dumps(output, sort_keys=True, indent=2) return results @staticmethod def output_results(op_filename, _json): """ writes and saves _json to op_filename :param op_filename: str :param _json: str :return: result.out: json obj """ with open(op_filename, 'w') as opened_file: opened_file.write(_json) opened_file.close() def process_file(in_put, out_put): """ reads input analyzes input saves output in json process_file("../data/sample-Liz.in", "../data/result.out") :param in_put: str :param out_put: str :return: result.out """ _formatter = Formatter() read_input = _formatter.read_file(in_put) entries = _formatter.get_entries_by_line(read_input) _formatter.validate_entries(entries) results = _formatter.format_output() _formatter.output_results(out_put, results) if __name__ == '__main__': process_file("../data/sample-Liz.in", "../data/result.out")
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Thu Sep 12 13:44:04 2019 This is Code built for one purpose: well a couple purposes 1) to load the landslide DEM 2) load the non landslide DEm 3) call fft mean spec so I can begin debugging that code. @author: matthew """ import time import os import numpy as np import matplotlib import matplotlib.pyplot as plt from osgeo import gdal #%% load DEMS os.chdir(r'U:\GHP\Projects\NSF - Morris Landslides\Code\Developmemnt') matplotlib.rcParams['figure.figsize'] = (8, 5.5) dem_path = os.path.join(os.getcwd(),'ls_patch.tif') demFLD = gdal.Open(dem_path) demFLD = np.array(demFLD.GetRasterBand(1).ReadAsArray()) dem_path = os.path.join(os.getcwd(),'no_ls_patch.tif') demUNfld = gdal.Open(dem_path) demUNfld = np.array(demUNfld.GetRasterBand(1).ReadAsArray()) #this removes dem_path = os.path.join(os.getcwd(),'snowBasinFull1.tif') dem = gdal.Open(dem_path) dem = np.array(dem.GetRasterBand(1).ReadAsArray()) #this removes #geogrpahic references demUNfld[demUNfld == -32767] =np.nan w = 47 # width of the window, must be odd dx = 2.0 # cell size # #with rio.open(dem_path,'r+') as ds: # arr = ds.read() #read all raster values # ds.write(arr) # #dem = rio.open(dem_path) #dem.bounds #fix,ax = plt.subplots(figsize = (8,3)) #show(dem, # title = "lidar", # ax = ax) #ax.set_axis_off() #%% call fft_mean_spec from fft_mean_spec import fft_mean_spec normalize = 0 plots = 1 start = time.time() [Vdftave_fld, Vdftvec_fld, fvec, freqmat] = fft_mean_spec(demFLD, 47, 0.5, normalize,plots) [Vdftave_unfld, Vdftvec_unfld, fvec, freqmat] = fft_mean_spec(demUNfld, 47, 0.5, normalize,plots) from fft_normPower import fft_normPower #calculate normalized fourier [Vdft_norm, Vdftvec_norm,fvecN] = fft_normPower(Vdftave_fld, Vdftave_unfld, freqmat, plots) end = time.time() print(end-start) #%% Try plotting a few things #fig,(ax1,ax2) = plt.subplots(1,2) plt.loglog(fvec,Vdftvec_fld,'r.', label = 'Landslide Terrane') plt.loglog(fvec,Vdftvec_unfld,'b.', label = 'Non-landslide') plt.xlabel('Wavelength') plt.ylabel('Spectral Power') plt.title('Spectra for landslide and non-landslide terrane') plt.legend() plt.xlim(0.03, 1) plt.ylim(10e-11, 10e-1) plt.show() plt.semilogx(fvecN,Vdftvec_norm,'.') plt.xlabel('Wavelength') plt.ylabel('Normalized Power') plt.title('Normalized spectral power') #%% now use wavelets from conv2_mexh_var import conv2_mexh_var dx = 0.5 scales = np.exp(np.linspace(0,2.2,20)) [Vcwt_fld, frq, wave] = conv2_mexh_var(demFLD, scales, dx) [Vcwt_unfld, _, _] = conv2_mexh_var(demUNfld, scales, dx) Vcwt_fld = np.transpose(Vcwt_fld) Vcwt_unfld = np.transpose(Vcwt_unfld) Vcwt_norm = Vcwt_fld/Vcwt_unfld #%% plot results from the wavelet transform fig1, ax1 = plt.subplots() ax1.loglog(frq, Vcwt_fld,'s') ax1.loglog(frq,Vcwt_unfld,'v') fig2, ax2 = plt.subplots() ax2.semilogx(frq, Vcwt_norm) #%% Compute the wavelt coefficients. #This is the step where some work is involved in solving for the wavelet scale needed to filter out the wavelenths of interest # W = 2pi*dx*s/(sqrt(5/2)) # this equation can be rearranged to solve for s which is the input in these next bits from conv2_mexh2 import conv2_mexh2 [C2,_,_] = conv2_mexh2(dem, 1.5,dx) [C3,_,_] = conv2_mexh2(dem, 2.0,dx) [C4,_,_] = conv2_mexh2(dem, 2.5,dx) [C5,_,_] = conv2_mexh2(dem, 3.0,dx) [C6,_,_] = conv2_mexh2(dem, 3.5,dx) #%% Square and sum wavelet coefficients in quadrature Vcwtsum = C2**2 + C3**2 + C4**2 + C5**2 + C6**2 fig1, ax1= plt.subplots() plt.imshow(np.log(Vcwtsum)) ax1.title('CWT Spectral Power Summer') radius = 25 from smooth2 import smooth2 [Vcwt_smooth,_] = smooth2(Vcwtsum,radius) #fig2, ax2 = plt.imshow() #ax2.title('CWT Power Sum (smoothed)') #ax2.imshow(np.log(Vcwt_smooth))
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from typing import Iterable, Type import pytest from pants.backend.debian.target_types import DebianSources from pants.build_graph.address import Address from pants.engine.rules import QueryRule from pants.engine.target import ( HydratedSources, HydrateSourcesRequest, SourcesPaths, SourcesPathsRequest, ) from pants.testutil.rule_runner import RuleRunner, engine_error @pytest.fixture def sources_rule_runner() -> RuleRunner: return RuleRunner( rules=[ QueryRule(HydratedSources, [HydrateSourcesRequest]), QueryRule(SourcesPaths, [SourcesPathsRequest]), ] ) def test_sources_expected_num_files(sources_rule_runner: RuleRunner) -> None: sources_rule_runner.write_files( { f: "" for f in [ "f1.txt", "f2.txt", "dirA/f3.txt", "dirB/f4.txt", "dirC/f5.txt", "dirC/f6.txt", ] } ) def hydrate(sources_cls: Type[DebianSources], sources: Iterable[str]) -> HydratedSources: return sources_rule_runner.request( HydratedSources, [ HydrateSourcesRequest(sources_cls(sources, Address("", target_name="example"))), ], ) with engine_error(contains="must resolve to at least one file"): hydrate(DebianSources, []) with engine_error(contains="must resolve to at least one file"): hydrate(DebianSources, ["non-existing-dir/*"]) with engine_error(contains="Individual files were found"): hydrate(DebianSources, ["f1.txt", "f2.txt"]) with engine_error(contains="Multiple directories were found"): hydrate(DebianSources, ["dirA/f3.txt", "dirB/f4.txt"]) # Also check that we support valid sources declarations. assert hydrate(DebianSources, ["dirC/f5.txt", "dirC/f6.txt"]).snapshot.files == ( "dirC/f5.txt", "dirC/f6.txt", ) assert hydrate(DebianSources, ["dirC/*"]).snapshot.files == ("dirC/f5.txt", "dirC/f6.txt")
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import atexit import errno import os import shutil import stat import tempfile import threading import uuid from collections import defaultdict from contextlib import contextmanager from pathlib import Path from typing import Any, Callable, DefaultDict, Iterable, Iterator, Sequence, overload from typing_extensions import Literal from pants.util.strutil import ensure_text def longest_dir_prefix(path: str, prefixes: Sequence[str]) -> str | None: """Given a list of prefixes, return the one that is the longest prefix to the given path. Returns None if there are no matches. """ longest_match, longest_prefix = 0, None for prefix in prefixes: if fast_relpath_optional(path, prefix) is not None and len(prefix) > longest_match: longest_match, longest_prefix = len(prefix), prefix return longest_prefix def fast_relpath(path: str, start: str) -> str: """A prefix-based relpath, with no normalization or support for returning `..`.""" relpath = fast_relpath_optional(path, start) if relpath is None: raise ValueError(f"{start} is not a directory containing {path}") return relpath def fast_relpath_optional(path: str, start: str) -> str | None: """A prefix-based relpath, with no normalization or support for returning `..`. Returns None if `start` is not a directory-aware prefix of `path`. """ if len(start) == 0: # Empty prefix. return path # Determine where the matchable prefix ends. pref_end = len(start) - 1 if start[-1] == "/" else len(start) if pref_end > len(path): # The prefix is too long to match. return None elif path[:pref_end] == start[:pref_end] and (len(path) == pref_end or path[pref_end] == "/"): # The prefix matches, and the entries are either identical, or the suffix indicates that # the prefix is a directory. return path[pref_end + 1 :] return None def safe_mkdir(directory: str | Path, clean: bool = False) -> None: """Ensure a directory is present. If it's not there, create it. If it is, no-op. If clean is True, ensure the dir is empty. :API: public """ if clean: safe_rmtree(directory) try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise def safe_mkdir_for(path: str | Path, clean: bool = False) -> None: """Ensure that the parent directory for a file is present. If it's not there, create it. If it is, no-op. """ dirname = os.path.dirname(path) if dirname: safe_mkdir(dirname, clean=clean) def safe_file_dump( filename: str, payload: bytes | str = "", mode: str = "w", makedirs: bool = False ) -> None: """Write a string to a file. This method is "safe" to the extent that `safe_open` is "safe". See the explanation on the method doc there. When `payload` is an empty string (the default), this method can be used as a concise way to create an empty file along with its containing directory (or truncate it if it already exists). :param filename: The filename of the file to write to. :param payload: The string to write to the file. :param mode: A mode argument for the python `open` builtin which should be a write mode variant. Defaults to 'w'. :param makedirs: Whether to make all parent directories of this file before making it. """ if makedirs: os.makedirs(os.path.dirname(filename), exist_ok=True) with safe_open(filename, mode=mode) as f: f.write(payload) @overload def maybe_read_file(filename: str) -> str | None: ... @overload def maybe_read_file(filename: str, binary_mode: Literal[False]) -> str | None: ... @overload def maybe_read_file(filename: str, binary_mode: Literal[True]) -> bytes | None: ... @overload def maybe_read_file(filename: str, binary_mode: bool) -> bytes | str | None: ... def maybe_read_file(filename: str, binary_mode: bool = False) -> bytes | str | None: """Read and return the contents of a file in a single file.read(). :param filename: The filename of the file to read. :param binary_mode: Read from file as bytes or unicode. :returns: The contents of the file, or None if opening the file fails for any reason """ try: return read_file(filename, binary_mode=binary_mode) except OSError: return None @overload def read_file(filename: str) -> str: ... @overload def read_file(filename: str, binary_mode: Literal[False]) -> str: ... @overload def read_file(filename: str, binary_mode: Literal[True]) -> bytes: ... @overload def read_file(filename: str, binary_mode: bool) -> bytes | str: ... def read_file(filename: str, binary_mode: bool = False) -> bytes | str: """Read and return the contents of a file in a single file.read(). :param filename: The filename of the file to read. :param binary_mode: Read from file as bytes or unicode. :returns: The contents of the file. """ mode = "rb" if binary_mode else "r" with open(filename, mode) as f: content: bytes | str = f.read() return content def safe_walk(path: bytes | str, **kwargs: Any) -> Iterator[tuple[str, list[str], list[str]]]: """Just like os.walk, but ensures that the returned values are unicode objects. This isn't strictly safe, in that it is possible that some paths will not be decodeable, but that case is rare, and the only alternative is to somehow avoid all interaction between paths and unicode objects, which seems especially tough in the presence of unicode_literals. See e.g. https://mail.python.org/pipermail/python-dev/2008-December/083856.html :API: public """ # If os.walk is given a text argument, it yields text values; if it # is given a binary argument, it yields binary values. return os.walk(ensure_text(path), **kwargs) _MkdtempCleanerType = Callable[[], None] _MKDTEMP_CLEANER: _MkdtempCleanerType | None = None _MKDTEMP_DIRS: DefaultDict[int, set[str]] = defaultdict(set) _MKDTEMP_LOCK = threading.RLock() def _mkdtemp_atexit_cleaner() -> None: for td in _MKDTEMP_DIRS.pop(os.getpid(), []): safe_rmtree(td) def _mkdtemp_unregister_cleaner() -> None: global _MKDTEMP_CLEANER _MKDTEMP_CLEANER = None def _mkdtemp_register_cleaner(cleaner: _MkdtempCleanerType) -> None: global _MKDTEMP_CLEANER assert callable(cleaner) if _MKDTEMP_CLEANER is None: atexit.register(cleaner) _MKDTEMP_CLEANER = cleaner def safe_mkdtemp(cleaner: _MkdtempCleanerType = _mkdtemp_atexit_cleaner, **kw: Any) -> str: """Create a temporary directory that is cleaned up on process exit. Arguments are as to tempfile.mkdtemp. :API: public """ # Proper lock sanitation on fork [issue 6721] would be desirable here. with _MKDTEMP_LOCK: return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner) def register_rmtree(directory: str, cleaner: _MkdtempCleanerType = _mkdtemp_atexit_cleaner) -> str: """Register an existing directory to be cleaned up at process exit.""" with _MKDTEMP_LOCK: _mkdtemp_register_cleaner(cleaner) _MKDTEMP_DIRS[os.getpid()].add(directory) return directory def safe_rmtree(directory: str | Path) -> None: """Delete a directory if it's present. If it's not present, no-op. Note that if the directory argument is a symlink, only the symlink will be deleted. :API: public """ if os.path.islink(directory): safe_delete(directory) else: shutil.rmtree(directory, ignore_errors=True) def safe_open(filename, *args, **kwargs): """Open a file safely, ensuring that its directory exists. :API: public """ safe_mkdir_for(filename) return open(filename, *args, **kwargs) def safe_delete(filename: str | Path) -> None: """Delete a file safely. If it's not present, no-op. """ try: os.unlink(filename) except OSError as e: if e.errno != errno.ENOENT: raise def safe_concurrent_rename(src: str, dst: str) -> None: """Rename src to dst, ignoring errors due to dst already existing. Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins. """ # Delete dst, in case it existed (with old content) even before any concurrent processes # attempted this write. This ensures that at least one process writes the new content. if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src. safe_rmtree(dst) else: safe_delete(dst) try: shutil.move(src, dst) except OSError as e: if e.errno != errno.EEXIST: raise @contextmanager def safe_concurrent_creation(target_path: str) -> Iterator[str]: """A contextmanager that yields a temporary path and renames it to a final target path when the contextmanager exits. Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins. :param target_path: The final target path to rename the temporary path to. :yields: A temporary path containing the original path with a unique (uuid4) suffix. """ safe_mkdir_for(target_path) tmp_path = f"{target_path}.tmp.{uuid.uuid4().hex}" try: yield tmp_path except Exception: rm_rf(tmp_path) raise else: if os.path.exists(tmp_path): safe_concurrent_rename(tmp_path, target_path) def chmod_plus_x(path: str) -> None: """Equivalent of unix `chmod a+x path`""" path_mode = os.stat(path).st_mode path_mode &= int("777", 8) if path_mode & stat.S_IRUSR: path_mode |= stat.S_IXUSR if path_mode & stat.S_IRGRP: path_mode |= stat.S_IXGRP if path_mode & stat.S_IROTH: path_mode |= stat.S_IXOTH os.chmod(path, path_mode) def absolute_symlink(source_path: str, target_path: str) -> None: """Create a symlink at target pointing to source using the absolute path. :param source_path: Absolute path to source file :param target_path: Absolute path to intended symlink :raises ValueError if source_path or link_path are not unique, absolute paths :raises OSError on failure UNLESS file already exists or no such file/directory """ if not os.path.isabs(source_path): raise ValueError(f"Path for source : {source_path} must be absolute") if not os.path.isabs(target_path): raise ValueError(f"Path for link : {target_path} must be absolute") if source_path == target_path: raise ValueError(f"Path for link is identical to source : {source_path}") try: if os.path.lexists(target_path): if os.path.islink(target_path) or os.path.isfile(target_path): os.unlink(target_path) else: shutil.rmtree(target_path) safe_mkdir_for(target_path) os.symlink(source_path, target_path) except OSError as e: # Another run may beat us to deletion or creation. if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT): raise def relative_symlink(source_path: str, link_path: str) -> None: """Create a symlink at link_path pointing to relative source. :param source_path: Absolute path to source file :param link_path: Absolute path to intended symlink :raises ValueError if source_path or link_path are not unique, absolute paths :raises OSError on failure UNLESS file already exists or no such file/directory """ if not os.path.isabs(source_path): raise ValueError(f"Path for source:{source_path} must be absolute") if not os.path.isabs(link_path): raise ValueError(f"Path for link:{link_path} must be absolute") if source_path == link_path: raise ValueError(f"Path for link is identical to source:{source_path}") # The failure state below had a long life as an uncaught error. No behavior was changed here, it just adds a catch. # Raising an exception does differ from absolute_symlink, which takes the liberty of deleting existing directories. if os.path.isdir(link_path) and not os.path.islink(link_path): raise ValueError(f"Path for link would overwrite an existing directory: {link_path}") try: if os.path.lexists(link_path): os.unlink(link_path) rel_path = os.path.relpath(source_path, os.path.dirname(link_path)) safe_mkdir_for(link_path) os.symlink(rel_path, link_path) except OSError as e: # Another run may beat us to deletion or creation. if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT): raise def touch(path: str, times: int | tuple[int, int] | None = None): """Equivalent of unix `touch path`. :API: public :path: The file to touch. :times Either a tuple of (atime, mtime) or else a single time to use for both. If not specified both atime and mtime are updated to the current time. """ if isinstance(times, tuple) and len(times) > 2: raise ValueError( "`times` must either be a tuple of (atime, mtime) or else a single time to use for both." ) if isinstance(times, int): times = (times, times) with safe_open(path, "a"): os.utime(path, times) def recursive_dirname(f: str) -> Iterator[str]: """Given a relative path like 'a/b/c/d', yield all ascending path components like: 'a/b/c/d' 'a/b/c' 'a/b' 'a' '' """ prev = None while f != prev: yield f prev = f f = os.path.dirname(f) yield "" def rm_rf(name: str) -> None: """Remove a file or a directory similarly to running `rm -rf <name>` in a UNIX shell. :param name: the name of the file or directory to remove. :raises: OSError on error. """ if not os.path.exists(name): return try: # Avoid using safe_rmtree so we can detect failures. shutil.rmtree(name) except OSError as e: if e.errno == errno.ENOTDIR: # 'Not a directory', but a file. Attempt to os.unlink the file, raising OSError on failure. safe_delete(name) elif e.errno != errno.ENOENT: # Pass on 'No such file or directory', otherwise re-raise OSError to surface perm issues etc. raise def group_by_dir(paths: Iterable[str]) -> dict[str, set[str]]: """For a list of file paths, returns a dict of directory path -> files in that dir.""" ret = defaultdict(set) for path in paths: dirname, filename = os.path.split(path) ret[dirname].add(filename) return ret def find_nearest_ancestor_file(files: set[str], dir: str, filename: str) -> str | None: """Given a filename return the nearest ancestor file of that name in the directory hierarchy.""" while True: candidate_config_file_path = os.path.join(dir, filename) if candidate_config_file_path in files: return candidate_config_file_path if dir == "": return None dir = os.path.dirname(dir)
l = [1, 2, 3] l.reverse() print(l)
""" 4. Valid Palindrome Question: Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases. For example, "A man, a plan, a canal: Panama" is a palindrome. "race a car" is not a palindrome. Example Questions Candidate Might Ask: Q: What about an empty string? Is it a valid palindrome? A: For the purpose of this problem, we define empty string as valid palindrome. """ import re class Solution: def isPalindrome(self, s): """ :type s: str :rtype: bool """ i = 0 x = s.lower() x = re.sub('[^a-z0-9]+' , "", x) j = len(x) - 1 print (x) while i <= j: for k in range(len(x)): if x[i] == x[j]: i += 1 j -= 1 else: return False return True
import psycopg2 import os import csv import io from pymongo import MongoClient def check(): print("\n\n\n\n\n\n...............................MONGO DB CONNECTIVITY ESTABLISHED!............................... ") def initMongo(): con = MongoClient() db = con.finalproject1 movies = db.movies movies.drop() #'column_name = {UserID,MovieID,Tag,Timestamp}' #path = "D:\\Fall 2017\\Database Systems\\Project-Main\\tags.csv" with io.open("tags.csv", errors = 'ignore') as f: csv_f = csv.reader(f) for i, row in enumerate(csv_f): if i > 1 and len(row) > 1: #print(row) movies.insert({'RatingID': row[0], 'Tag': row[1], 'Timestamp': row[2]}) return movies def findTag(param,movies): my_list=[] for doc in movies.find({"RatingID" : param}): #print (doc) #print (doc['MovieID'], doc['Tag']) my_list.append(doc['Tag']) return my_list
access_dict = {'FastEthernet0/12':10, 'FastEthernet0/14':11, 'FastEthernet0/16':17, 'FastEthernet0/17':150} def generate_access_config(access, psecurity=False): access_config = {} access_template = ['switchport mode access', 'switchport access vlan', 'switchport nonegotiate', 'spanning-tree portfast', 'spanning-tree bpduguard enable'] port_security = ['switchoprt port-security maximum 2', 'switchoprt port-security violation restrict', 'switchoprt port-security'] #Создагте списка конфигурации for interface,vlan in access.items(): interface = 'interface {}'.format(interface) access_config[interface] = [] #.append('interface {}'.format(interface)) for line in access_template: if line.endswith('access vlan'): access_config[interface].append('{} {}'.format(line,vlan)) continue access_config[interface].append(line) #Если psecurity=True добавить конфигурации psecurity if psecurity: for security in port_security: access_config[interface].append(security) #Возвращение списка конфигурации return access_config config = generate_access_config(access_dict, True) print(config)
"""Precompute various Colorgorical model data for improved performance.""" import os import numpy as np import model def precomputeStartingColors(): """Precomputes the starting color sub-space. Creates a list of all colors in a subspace of the default 8,325 CIE Lab colors (http://dx.doi.org/10.1145/2207676.2208547). These colors are separated every 15 units along the L, a, and b axis. This function also precomputes all pairwise color scores between the colors. It precomputes CIEDE2000 (perceptual distance), name difference (see above DOI), and pair preference (http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3037488/). Returns: subspace (np.ndarray): the precomputed subspace subspaceScores (np.ndarray): color scores between all color pairs """ intervals = model.CIE_LAB_STARTING_SUBSPACE_INTERVALS # Load the 8,325 color space into memory filePrefix = os.path.dirname(os.path.realpath(__file__)) colorspacePath = os.path.join(public_root, '../data/allColors.csv') colorSpaces = np.loadtxt(open(colorPath, 'rb'), delimiter=',') isL = colorSpaces[np.any([colorSpaces[:,0] == l for l in intervals["L"]])] isa = colorSpaces[np.any([colorSpaces[:,1] == a for a in intervals["a"]])] isb = colorSpaces[np.any([colorSpaces[:,2] == b for b in intervals["b"]])] subspace = colorSpaces[np.all(isL, isa, isb),:]
from django.test import TestCase from elections.utils import ElectionBuilder, get_notice_directory from .base_tests import BaseElectionCreatorMixIn class TestCreateIds(BaseElectionCreatorMixIn, TestCase): def setUp(self): super().setUp() self.election = ElectionBuilder( "local", "2017-06-08" ).build_election_group() self.organisation = ( ElectionBuilder("local", "2017-06-08") .with_organisation(self.org1) .build_organisation_group(None) ) self.ballot1 = ( ElectionBuilder("local", "2017-06-08") .with_organisation(self.org1) .with_division(self.org_div_1) .build_ballot(None) ) self.ballot2 = ( ElectionBuilder("local", "2017-06-08") .with_organisation(self.org1) .with_division(self.org_div_2) .build_ballot(None) ) def test_one_ballot_with_org(self): folder = get_notice_directory( [self.election, self.organisation, self.ballot1] ) self.assertEqual(self.ballot1.election_id, folder) def test_one_ballot_no_org(self): folder = get_notice_directory([self.election, self.ballot1]) self.assertEqual(self.ballot1.election_id, folder) def test_two_ballots_with_org(self): folder = get_notice_directory( [self.election, self.organisation, self.ballot1, self.ballot2] ) self.assertEqual(self.organisation.election_id, folder) def test_two_ballots_no_org(self): folder = get_notice_directory( [self.election, self.ballot1, self.ballot2] ) self.assertEqual(self.election.election_id, folder) def test_group_only(self): folder = get_notice_directory([self.election, self.organisation]) self.assertEqual(self.organisation.election_id, folder) def test_invalid_empty(self): with self.assertRaises(ValueError): get_notice_directory([]) def test_invalid_two_ballots_no_groups(self): with self.assertRaises(ValueError): get_notice_directory([self.ballot1, self.ballot2])
import numpy as np from eelbrain import * # dimension objects from eelbrain._data_obj import UTS, Sensor """ Create simulated data with shape (2 conditions * 15 subjects, 5 sensors, len(T) time points) """ # create the time dimension time = UTS(-.2, .01, 100) # random data x = np.random.normal(0, 1, (30, 5, len(time))) # add an effect to the random data x[15:,:3,20:40] += np.hanning(20) * 2 # create the sensor dimension from 5 sensor locations in 3d space sensor = Sensor([[0,0,0],[1,0,0],[0,-1,0],[-1,0,0],[0,1,0]], sysname='testnet', proj2d=None) # combine all these into the NDVar. Plotting defaults are stored in the info # dict: info = {'vmax': 2.5, 'meas': 'B', 'cmap': 'xpolar', 'unit': 'pT'} Y = NDVar(x, dims=('case', sensor, time), name='Y', info=info) # To describe the cases ('case' dimension), create a condition and a subject Factor A = Factor(['a0', 'a1'], repeat=15, name='A') subject = Factor(xrange(15), tile=2, random=True, name='subject') # uncorrected related measures t-test res = testnd.ttest_rel(Y, A, match=subject) # plot topographically an uncorrected t-test plot.TopoArray(res) # and a butterfly plot plot.TopoButterfly(res)
#!/home/franck260/ENV/bin/python import application application.app.configure("production.cfg") application.app.run()
#!/usr/bin/env python3 #generate_references.py #* #* -------------------------------------------------------------------------- #* Licensed under MIT (https://git.biohpc.swmed.edu/gudmap_rbk/rna-seq/-/blob/14a1c222e53f59391d96a2a2e1fd4995474c0d15/LICENSE) #* -------------------------------------------------------------------------- #* import argparse import subprocess import shlex import logging EPILOG = ''' For more details: %(prog)s --help ''' logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) logger.propagate = False logger.setLevel(logging.INFO) def get_args(): '''Define arguments.''' parser = argparse.ArgumentParser( description=__doc__, epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-r', '--reference', help="The reference file (markdown format).", required=True) parser.add_argument('-o', '--output', help="The out file name.", default='references') args = parser.parse_args() return args def main(): args = get_args() reference = args.reference output = args.output out_filename = output + '_mqc.yaml' # Header for HTML print( ''' id: 'software_references' section_name: 'Software References' description: 'This section describes references for the tools used.' plot_type: 'html' data: | ''' , file = open(out_filename, "w") ) # Turn Markdown into HTML references_html = 'bash -c "pandoc -p {} | sed \'s/^/ /\' >> {}"' references_html = references_html.format(reference, out_filename) subprocess.check_call(shlex.split(references_html)) if __name__ == '__main__': main()
N = int(input()) def distance(A,B): ans = ord(B) - ord(A) if ans < 0: ans += 26 return ans ans = [] for _ in range(N): AB = input() A, B = AB.split()[0], AB.split()[1] t = [] for i in range(len(A)): t.append(distance(A[i], B[i])) ans.append(t) for k in ans: a = '' for j in k: a += ' ' + str(j) a.strip() print("Distances:%s" % a) # Done
import numpy as np import tensorflow as tf from dsn.util.tf_langevin import ( langevin_dyn, bounded_langevin_dyn, bounded_langevin_dyn_np, ) from tf_util.stat_util import approx_equal EPS = 1e-16 CONV_EPS = 1e-2 def langevin_dyn_np(f, x0, eps, num_its): dim = x0.shape[0] x = x0 for i in range(num_its): x = (1.0 - eps) * x + eps * f(x) return x n = 200 def test_langevin_dyn(): x0 = np.random.normal(0.0, 10.0, (n, 3)) eps = 0.2 num_iters = 100 def f(x): f1 = x[:, 1] - x[:, 2] f2 = x[:, 0] + 3 f3 = x[:, 1] - 2 return tf.stack([f1, f2, f3], axis=1) def f_np(x): f1 = x[1] - x[2] f2 = x[0] + 3 f3 = x[1] - 2 return np.array([f1, f2, f3]) x0 = tf.placeholder(dtype=tf.float64, shape=(n, 3)) _x0 = np.random.normal(0.0, 1.0, (n, 3)) xs = langevin_dyn(f, x0, eps, num_iters) xs_true = np.zeros((n, 3)) for i in range(n): xs_true[i, :] = langevin_dyn_np(f_np, _x0[i, :], eps, num_iters) with tf.Session() as sess: _xs = sess.run(xs, {x0: _x0}) x_100_true = np.tile(np.array([[2.0, 5.0, 3.0]]), [n, 1]) assert approx_equal(_xs, xs_true, EPS) assert approx_equal(x_100_true, xs_true, CONV_EPS) assert approx_equal(x_100_true, _xs, CONV_EPS) return None def test_bounded_langevin_dyn(): x0 = tf.placeholder(dtype=tf.float64, shape=(n, 2)) def f(x): f1 = x[:, 1] + 2 f2 = 0.0 * x[:, 0] + 0.1 return tf.stack([f1, f2], axis=1) def f_np(x): f1 = x[:, 1] + 2 f2 = 0.0 * x[:, 0] + 0.1 return np.stack([f1, f2], axis=1) eps = 0.8 num_its = 30 non_neg = [False, True] x_ss, x = bounded_langevin_dyn(f, x0, eps, num_its, non_neg, db=True) _x0 = np.random.normal(0.0, 10.0, (n, 2)) x_ss_np, x_np = bounded_langevin_dyn_np(f_np, _x0, eps, num_its, non_neg, db=True) with tf.Session() as sess: _x = sess.run(x, {x0: _x0}) assert approx_equal(_x, x_np, EPS) x_ss_true = np.array([2.1, 0.1]) for i in range(n): assert approx_equal(_x[i, :, -1], x_ss_true, EPS) assert _x[i, 1, 1] >= 0.0 # Test top bound def f(x): f1 = x[:, 1] + 10.0 f2 = x[:, 0] + 10.0 return tf.stack([f1, f2], axis=1) def f_np(x): f1 = x[:, 1] + 10.0 f2 = x[:, 0] + 10.0 return np.stack([f1, f2], axis=1) x_ss, x = bounded_langevin_dyn(f, x0, eps, num_its, non_neg, db=True) _x0 = np.random.normal(0.0, 10.0, (n, 2)) x_ss_np, x_np = bounded_langevin_dyn_np(f_np, _x0, eps, num_its, non_neg, db=True) with tf.Session() as sess: _x = sess.run(x, {x0: _x0}) assert approx_equal(_x, x_np, EPS) assert(approx_equal(_x[:,0,-1], 150.0*np.ones((n,)), EPS)) assert(approx_equal(_x[:,1,-1], 150.0*np.ones((n,)), EPS)) # Test bottom bound def f(x): f1 = x[:, 1] - 200.0 f2 = x[:, 0] - 200.0 return tf.stack([f1, f2], axis=1) def f_np(x): f1 = x[:, 1] - 200.0 f2 = x[:, 0] - 200.0 return np.stack([f1, f2], axis=1) x_ss, x = bounded_langevin_dyn(f, x0, eps, num_its, non_neg, db=True) _x0 = np.random.normal(0.0, 10.0, (n, 2)) x_ss_np, x_np = bounded_langevin_dyn_np(f_np, _x0, eps, num_its, non_neg, db=True) with tf.Session() as sess: _x = sess.run(x, {x0: _x0}) assert approx_equal(_x, x_np, EPS) assert(approx_equal(_x[:,0,-1], -150.0*np.ones((n,)), EPS)) assert(approx_equal(_x[:,1,-1], 0.0*np.ones((n,)), EPS)) return None if __name__ == "__main__": test_langevin_dyn() test_bounded_langevin_dyn()
import sys import subprocess import rlkit.launchers.config as config cmd = F"aws s3 sync --exact-timestamp --exclude '*' --include '12-02*' {config.AWS_S3_PATH}/ ../../s3_files/" process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) print(cmd) for line in iter(process.stdout.readline, b''): sys.stdout.buffer.write(line)
import string, random def get_random_str(size=8, chars=string.ascii_lowercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) def gen_playlists(repeat=10, start_id=1): playlists = [] for i in range(repeat): playlists.append({"id_playlist": i + start_id, "nome_playlist": get_random_str(), "estilo_playlist": get_random_str(), "obs_playlist": get_random_str(20)}) return playlists def gen_musics(repeat=10, start_id=1): musics= [] for i in range(repeat): musics.append({"id_musica": i + start_id, "nome_musica": get_random_str(), "criador_musica": get_random_str(), "estilo_musica": get_random_str(), "id_playlist": 1}) return musics def get_playlist_by_id(playlists, id): for p in playlists: if p['id_playlist'] is id: return p return None def get_music_by_id(musics, id): for m in musics: if m['id_musica'] is id: return m return None def add_playlist(playlists, new): playlists.append(new) return playlists def add_music(musics, new): musics.append(new) return musics def remove_playlist_by_id(playlists, id): for p in playlists: if p['id_playlist'] is id: playlists.remove(p) return playlists def remove_music_by_id(musics, id): for m in musics: if m['id_musica'] is id: musics.remove(m) return musics if __name__ == '__main__': playlists = gen_playlists() musics = gen_musics() print(len(playlists)) print(len(musics)) print(len(remove_playlist_by_id(playlists, 1))) print(len(remove_music_by_id(musics, 1))) print(len(add_playlist(playlists, gen_playlists(1, 11)[0]))) print(len(add_music(musics, gen_musics(1, 11)[0]))) print(get_playlist_by_id(playlists, 11) is not None) print(get_music_by_id(musics, 11) is not None)
counter = -1 train = [] while True: try: x = int(input()) except: break if x == 0 and len(train) != 0: print(train[counter]) train.remove(train[counter]) counter -= 1 else: train.append(x) counter += 1
from PyQt4.Qt import * class SortableTableItem(QTableWidgetItem): def __lt__(self, other): if isinstance(other, QTableWidgetItem): myValue, myOk = self.data(Qt.EditRole).toInt() otherValue, otherOk = other.data(Qt.EditRole).toInt() if myOk and otherOk: return myValue < otherValue return super(SortableTableItem, self).__lt__(other)
from rest_framework.serializers import Serializer from .models import Employee,EmployeeSerializer from rest_framework.views import APIView from rest_framework.viewsets import ModelViewSet class EmployeeViewSet(ModelViewSet): queryset=Employee.objects.all() serializer_class=EmployeeSerializer
# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ( 'UseManager', ) from _emerge.Package import Package from portage import os from portage.dep import dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re from portage.eapi import eapi_supports_stable_use_forcing_and_masking from portage.localization import _ from portage.util import grabfile, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg from portage.versions import _pkg_str from portage.package.ebuild._config.helper import ordered_by_atom_specificity class UseManager(object): def __init__(self, repositories, profiles, abs_user_config, user_config=True): # file variable #-------------------------------- # repositories #-------------------------------- # use.mask _repo_usemask_dict # use.stable.mask _repo_usestablemask_dict # use.force _repo_useforce_dict # use.stable.force _repo_usestableforce_dict # package.use.mask _repo_pusemask_dict # package.use.stable.mask _repo_pusestablemask_dict # package.use.force _repo_puseforce_dict # package.use.stable.force _repo_pusestableforce_dict #-------------------------------- # profiles #-------------------------------- # use.mask _usemask_list # use.stable.mask _usestablemask_list # use.force _useforce_list # use.stable.force _usestableforce_list # package.use.mask _pusemask_list # package.use.stable.mask _pusestablemask_list # package.use _pkgprofileuse # package.use.force _puseforce_list # package.use.stable.force _pusestableforce_list #-------------------------------- # user config #-------------------------------- # package.use _pusedict # Dynamic variables tracked by the config class #-------------------------------- # profiles #-------------------------------- # usemask # useforce #-------------------------------- # user config #-------------------------------- # puse self._repo_usemask_dict = self._parse_repository_files_to_dict_of_tuples("use.mask", repositories) self._repo_usestablemask_dict = \ self._parse_repository_files_to_dict_of_tuples("use.stable.mask", repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking) self._repo_useforce_dict = self._parse_repository_files_to_dict_of_tuples("use.force", repositories) self._repo_usestableforce_dict = \ self._parse_repository_files_to_dict_of_tuples("use.stable.force", repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking) self._repo_pusemask_dict = self._parse_repository_files_to_dict_of_dicts("package.use.mask", repositories) self._repo_pusestablemask_dict = \ self._parse_repository_files_to_dict_of_dicts("package.use.stable.mask", repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking) self._repo_puseforce_dict = self._parse_repository_files_to_dict_of_dicts("package.use.force", repositories) self._repo_pusestableforce_dict = \ self._parse_repository_files_to_dict_of_dicts("package.use.stable.force", repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking) self._repo_puse_dict = self._parse_repository_files_to_dict_of_dicts("package.use", repositories) self._usemask_list = self._parse_profile_files_to_tuple_of_tuples("use.mask", profiles) self._usestablemask_list = \ self._parse_profile_files_to_tuple_of_tuples("use.stable.mask", profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking) self._useforce_list = self._parse_profile_files_to_tuple_of_tuples("use.force", profiles) self._usestableforce_list = \ self._parse_profile_files_to_tuple_of_tuples("use.stable.force", profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking) self._pusemask_list = self._parse_profile_files_to_tuple_of_dicts("package.use.mask", profiles) self._pusestablemask_list = \ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.mask", profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking) self._pkgprofileuse = self._parse_profile_files_to_tuple_of_dicts("package.use", profiles, juststrings=True) self._puseforce_list = self._parse_profile_files_to_tuple_of_dicts("package.use.force", profiles) self._pusestableforce_list = \ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.force", profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking) self._pusedict = self._parse_user_files_to_extatomdict("package.use", abs_user_config, user_config) self.repositories = repositories def _parse_file_to_tuple(self, file_name, recursive=True, eapi_filter=None): ret = [] lines = grabfile(file_name, recursive=recursive) eapi = read_corresponding_eapi_file(file_name) if eapi_filter is not None and not eapi_filter(eapi): if lines: writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") % (eapi, os.path.basename(file_name), file_name), noiselevel=-1) return () useflag_re = _get_useflag_re(eapi) for prefixed_useflag in lines: if prefixed_useflag[:1] == "-": useflag = prefixed_useflag[1:] else: useflag = prefixed_useflag if useflag_re.match(useflag) is None: writemsg(_("--- Invalid USE flag in '%s': '%s'\n") % (file_name, prefixed_useflag), noiselevel=-1) else: ret.append(prefixed_useflag) return tuple(ret) def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True, eapi_filter=None, user_config=False): ret = {} location_dict = {} eapi = read_corresponding_eapi_file(file_name, default=None) if eapi is None and not user_config: eapi = "0" if eapi is None: ret = ExtendedAtomDict(dict) else: ret = {} file_dict = grabdict_package(file_name, recursive=recursive, allow_wildcard=(eapi is None), allow_repo=(eapi is None), verify_eapi=(eapi is not None)) if eapi is not None and eapi_filter is not None and not eapi_filter(eapi): if file_dict: writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") % (eapi, os.path.basename(file_name), file_name), noiselevel=-1) return ret useflag_re = _get_useflag_re(eapi) for k, v in file_dict.items(): useflags = [] for prefixed_useflag in v: if prefixed_useflag[:1] == "-": useflag = prefixed_useflag[1:] else: useflag = prefixed_useflag if useflag_re.match(useflag) is None: writemsg(_("--- Invalid USE flag for '%s' in '%s': '%s'\n") % (k, file_name, prefixed_useflag), noiselevel=-1) else: useflags.append(prefixed_useflag) location_dict.setdefault(k, []).extend(useflags) for k, v in location_dict.items(): if juststrings: v = " ".join(v) else: v = tuple(v) ret.setdefault(k.cp, {})[k] = v return ret def _parse_user_files_to_extatomdict(self, file_name, location, user_config): ret = ExtendedAtomDict(dict) if user_config: pusedict = grabdict_package( os.path.join(location, file_name), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False) for k, v in pusedict.items(): ret.setdefault(k.cp, {})[k] = tuple(v) return ret def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories, eapi_filter=None): ret = {} for repo in repositories.repos_with_profiles(): ret[repo.name] = self._parse_file_to_tuple(os.path.join(repo.location, "profiles", file_name), eapi_filter=eapi_filter) return ret def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories, eapi_filter=None): ret = {} for repo in repositories.repos_with_profiles(): ret[repo.name] = self._parse_file_to_dict(os.path.join(repo.location, "profiles", file_name), eapi_filter=eapi_filter) return ret def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations, eapi_filter=None): return tuple(self._parse_file_to_tuple( os.path.join(profile.location, file_name), recursive=profile.portage1_directories, eapi_filter=eapi_filter) for profile in locations) def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations, juststrings=False, eapi_filter=None): return tuple(self._parse_file_to_dict( os.path.join(profile.location, file_name), juststrings, recursive=profile.portage1_directories, eapi_filter=eapi_filter, user_config=profile.user_config) for profile in locations) def getUseMask(self, pkg=None): if pkg is None: return frozenset(stack_lists( self._usemask_list, incremental=True)) slot = None cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp try: stable = pkg.stable except AttributeError: # KEYWORDS is unavailable (prior to "depend" phase) stable = False usemask = [] if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] try: repos.extend(repo.name for repo in self.repositories[pkg.repo].masters) except KeyError: pass repos.append(pkg.repo) for repo in repos: usemask.append(self._repo_usemask_dict.get(repo, {})) if stable: usemask.append(self._repo_usestablemask_dict.get(repo, {})) cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) if stable: cpdict = self._repo_pusestablemask_dict.get(repo, {}).get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) for i, pusemask_dict in enumerate(self._pusemask_list): if self._usemask_list[i]: usemask.append(self._usemask_list[i]) if stable and self._usestablemask_list[i]: usemask.append(self._usestablemask_list[i]) cpdict = pusemask_dict.get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) if stable: cpdict = self._pusestablemask_list[i].get(cp) if cpdict: pkg_usemask = ordered_by_atom_specificity(cpdict, pkg) if pkg_usemask: usemask.extend(pkg_usemask) return frozenset(stack_lists(usemask, incremental=True)) def getUseForce(self, pkg=None): if pkg is None: return frozenset(stack_lists( self._useforce_list, incremental=True)) cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp try: stable = pkg.stable except AttributeError: # KEYWORDS is unavailable (prior to "depend" phase) stable = False useforce = [] if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] try: repos.extend(repo.name for repo in self.repositories[pkg.repo].masters) except KeyError: pass repos.append(pkg.repo) for repo in repos: useforce.append(self._repo_useforce_dict.get(repo, {})) if stable: useforce.append(self._repo_usestableforce_dict.get(repo, {})) cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) if stable: cpdict = self._repo_pusestableforce_dict.get(repo, {}).get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) for i, puseforce_dict in enumerate(self._puseforce_list): if self._useforce_list[i]: useforce.append(self._useforce_list[i]) if stable and self._usestableforce_list[i]: useforce.append(self._usestableforce_list[i]) cpdict = puseforce_dict.get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) if stable: cpdict = self._pusestableforce_list[i].get(cp) if cpdict: pkg_useforce = ordered_by_atom_specificity(cpdict, pkg) if pkg_useforce: useforce.extend(pkg_useforce) return frozenset(stack_lists(useforce, incremental=True)) def getPUSE(self, pkg): cp = getattr(pkg, "cp", None) if cp is None: slot = dep_getslot(pkg) repo = dep_getrepo(pkg) pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) cp = pkg.cp ret = "" cpdict = self._pusedict.get(cp) if cpdict: puse_matches = ordered_by_atom_specificity(cpdict, pkg) if puse_matches: puse_list = [] for x in puse_matches: puse_list.extend(x) ret = " ".join(puse_list) return ret def extract_global_USE_changes(self, old=""): ret = old cpdict = self._pusedict.get("*/*") if cpdict is not None: v = cpdict.pop("*/*", None) if v is not None: ret = " ".join(v) if old: ret = old + " " + ret if not cpdict: #No tokens left in atom_license_map, remove it. del self._pusedict["*/*"] return ret
from flask import Flask, render_template,abort app = Flask(__name__) @app.route('/',methods=["GET","POST"]) def inicio(): return render_template("inicio.html") @app.route('/hola/') @app.route('/hola/<nombre>') def saluda(nombre=None): return render_template("template1.html", nombre=nombre) @app.route('/suma/<num1>/<num2>') def suma(num1, num2): try: resultado = int(num1) + int(num2) except: abort(404) return render_template("template2.html", num1=num1, num2=num2, resultado=resultado) app.run(debug=True)
import os from pathlib import Path ''' This page is all about static data that won't be changed through the Tests. All the time this data should be static as-is like here written by: jiaul_islam ''' # ALL GLOBAL VARIABLE BASE_DIR = Path.cwd() READER_FILENAME = 'Request_CR.xlsx' WRITER_FILENAME = 'Output_CR.xlsx' CANCEL_CHANGE_FILENAME = "cancel.txt" CLOSE_CHANGE_FILENAME = "close.txt" class StaticData: USERNAME = os.environ.get("BMC_USER") # Get the username PASSWORD = os.environ.get("BMC_PASS") # Get the password IT_HOME = 'IT Home' READ_EXCEL_FILE = str(BASE_DIR.joinpath("data_driver", READER_FILENAME)) WRITE_EXCEL_FILE = str(BASE_DIR.joinpath("data_driver", WRITER_FILENAME)) CANCEL_CHANGE_TXT_FILE_PATH = str(BASE_DIR.joinpath("data_driver", CANCEL_CHANGE_FILENAME)) CLOSE_CHANGE_TXT_FILE_PATH = str(BASE_DIR.joinpath("data_driver", CLOSE_CHANGE_FILENAME)) VIEW_ATTACHMENT_DEFAULT_STATE = 'View Attachment Disabled' class BMCData: BMC_URL = 'http://itsm-web.robi.com.bd:8080/arsys/shared/login.jsp?/arsys/home' USERNAME = os.environ.get("BMC_USER") # Get the username PASSWORD = os.environ.get("BMC_PASS") # Get the password class LDMAData: LDMA_URL = 'http://ldma.robi.com.bd/view/common/login.php' LDMA_USERNAME = os.environ.get("LDMA_USER") LDMA_PASSWORD = os.environ.get("LDMA_PASS")
# -*- coding: utf-8 -*- """ Created on Mon Apr 1 20:58:36 2019 @author: Sneha """ # -*- coding: utf-8 -*- """ Created on Tue Mar 26 13:26:27 2019 @author: Sneha """ import tkinter as tk from tkinter import * from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg import numpy as np import matplotlib.pyplot as plt import math from collections import deque, namedtuple import sys from collections import defaultdict from heapq import * import matplotlib.animation as animation from shapely.geometry import Point, Polygon import time title='Click point in map to select Initial/Final point.' arr=[] root= tk.Tk() class Node: def __init__(self, node, cost,x,y): self.node = node self.parent = None self.x=x self.y=y self.cost = cost # Print the tree def PrintTree(self,ax): if self.parent: self.parent.PrintTree(ax) ax.scatter(self.x,self.y,s=10,c='b') init=[] final=[] resolution=1 radius=0 clearance=0 # we'll use infinity as a default distance to nodes. inf = float('inf') Edge = namedtuple('Edge', 'start, end, cost') def onpick(event): print(event.xdata,event.ydata) global init,final,title if(not(init)): print('init') init=[int(event.xdata),int(event.ydata)] else: print('final') final=[int(event.xdata),int(event.ydata)] title='Node Exploration' return True def updateMinMax(arr,minx,miny,maxx,maxy,d): if(maxx>arr[2]): # print('x max') arr[2]=maxx+1+d if(minx<arr[0]): # print('x min') arr[0]=minx-1-d if(maxy>arr[3]): # print('y max') arr[3]=maxy+1+d if(miny<arr[1]): # print('y min') arr[1]=miny-1-d def pathAvailability(x,y,arr,pol,maxPx,minPx,maxPy,minPy): """ Box """ global radius,clearance,resolution d=radius+clearance if(((y-((112.5/resolution)+d))<=0) and ((x-((100/resolution)+d))<=0) and ((-y+((67.5/resolution)-d))<=0) and ((-x+((50/resolution)-d))<=0)): maxBx=100 minBx=50 maxBy=112.5 minBy=67.5 updateMinMax(arr,minBx,minBy,maxBx,maxBy,d) return 0 # xpolygon=[120,158, 165,188,168,145]; # % ypolygon=[55,51,89,51,14,14]; # % Line 2 with coordinates (125,56) and (150,15) p2 = Point(x,y) for i in pol: coords = i poly = Polygon(i) inside2 = p2.within(poly) if(inside2==True): break if(inside2==True): updateMinMax(arr,minPx,minPy,maxPx,maxPy,d) return 0 if((((math.pow((x-(140/resolution)),2)/math.pow(((15/resolution)+d),2))+(math.pow((y-(120/resolution)),2)/math.pow(((6/resolution)+d),2))-1)<=0)): maxEx=140+15 minEx=140-15 maxEy=120+6 minEy=120-6 updateMinMax(arr,minEx,minEy,maxEx,maxEy,d) return 0 if((((math.pow((x-(190/resolution)),2))+(math.pow((y-(130/resolution)),2))-(math.pow(((15/resolution)+d),2)))<=0)): maxCx=190+15 minCx=190-15 maxCy=130+15 minCy=130-15 updateMinMax(arr,minCx,minCy,maxCx,maxCy,d) return 0 else: return 1 def make_edge(start, end, cost=1): return Edge(start, end, cost) def test(algo_type): print(algo_type) def sorting(vals): print(vals) return vals def getKey(item): return item[0] def dijkstra(graph,f,t,paths_to_goal,tempx,tempy,weightx,weighty,costw,final,pol): path=[] paths_to_goal=[] count=-1 path=0 queue=[] queue.append((tempx,tempy)) g = defaultdict(list) q,seen,mins,queue= [(0,Node(f,0,tempx,tempy))],set(), {f: 0},[(0,f)] nodes=[] count=0 nodes.append(Node(f,0,tempx,tempy)) node='' while (q and node!=t): (cost1,node)=heappop(queue) index= [i for ((c,y), i) in zip(q, range(len(q))) if node==y.node] (cost,v1) = q.pop(index[0]) temp_trav(weightx,weighty,costw,final,graph,g,q,queue,nodes,v1,seen,mins,pol) ans= [v for v in (nodes) if v.node == t] print(ans) if(len(ans)>0): return nodes,ans[0] else: return 'Initial/Final Point in Obstacle!!',0 def animate(listPnts): global title,root,final,init fig = plt.Figure(figsize=(5,4), dpi=100) ax = fig.add_subplot(111) scatter = FigureCanvasTkAgg(fig, root) scatter.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH) ax.fill([250,0,0,250],[150,150,0,0], color = (0,0,0)) for i in (listPnts): ax.fill(i[0],i[1], color = i[2]) ax.legend() ax.set_title(title); ax.set_xlabel('X axis') ax.set_ylabel('Y axis') fig.canvas.mpl_connect('button_press_event',onpick) tk.Label(root, text="Enter Coordinates").pack() tk.Label(root, text="Initial point(comma separated x,y-no spaces)").pack() initial=Entry(root) if(init): init_str=str(init[0])+' '+str(init[1]) initial.insert(0,init_str) initial.pack() tk.Label(root, text="Final point(comma separated x,y-no spaces)").pack() final1=Entry(root) if(final): final_str=str(final[0])+' '+str(final[1]) final1.insert(0,final_str) final1.pack() tk.Button(root, text="Quit", command= lambda:quit(initial,final1)).pack() root.mainloop() return listPnts xdata=[] ydata=[] def animated(i,nodes,node,test): global xdata,ydata t, y = i.x,i.y xdata.append(t) ydata.append(y) xmin, xmax = ax.get_xlim() if t >= xmax: ax.set_xlim(xmin, 2*xmax) ax.figure.canvas.draw() line.set_data(xdata, ydata) if(((nodes[len(nodes)-1].x) == i.x) and (nodes[len(nodes)-1].y == i.y)): node.PrintTree(ax) return line, def quit(initial,final1): global root,init,final,radius,resolution,clearance,arr resolution=1 if(initial.get()): if(len((initial.get()).split(','))==2): x,y=(initial.get()).split(',') if(x and y and (int(x)) and (int(y))): init=[int(int(x)/resolution),int(int(y)/resolution)] else: root.quit() root.destroy() test=tk.Tk() test.geometry('400x300') label = Label(test, text= "Please enter valid Initial Point.") label.pack() test.mainloop() else: root.quit() root.destroy() test=tk.Tk() test.geometry('400x300') label = Label(test, text= "Please enter valid comma separated Initial Point.") label.pack() test.mainloop() elif(init): init=[int(init[0]/resolution),int(init[1]/resolution)] else: root.quit() root.destroy() test=tk.Tk() test.geometry('400x300') label = Label(test, text= "Please enter valid Initial Point.") label.pack() test.mainloop() if(final1.get()): if(len((final1.get()).split(','))==2): x1,y1=(final1.get()).split(',') if(x1 and y1 and (int(x1)) and (int(y1))): final=[int(int(x1)/resolution),int(int(y1)/resolution)] else: root.quit() root.destroy() test=tk.Tk() test.geometry('400x300') label = Label(test, text= "Please enter valid Final Point.") label.pack() test.mainloop() else: root.quit() root.destroy() test=tk.Tk() test.geometry('400x300') label = Label(test, text= "Please enter valid comma separated Final Point.") label.pack() test.mainloop() elif(final): final=[int(final[0]/resolution),int(final[1]/resolution)] else: root.quit() root.destroy() test=tk.Tk() test.geometry('400x300') label = Label(test, text= "Please enter valid Final Point.") label.pack() test.mainloop() radius=0 clearance=0 root.quit() root.destroy() minx = min(final[0],init[0])-1 miny = min(final[1],init[1])-1 maxx= max(final[0],init[0])+1 maxy= max(final[1],init[1])+1 arr=[minx,miny,maxx,maxy] def temp_trav(weightx,weighty,cost,final,graph,g,q,queue,nodes,parent,seen,mins,pol): global arr,maxPx,minPx,maxPy,minPy flag=0 tempx=parent.x tempy=parent.y global radius,clearance,resolution d=radius+clearance minx = min(final[0],init[0])-1 miny = min(final[1],init[1])-1 maxx= max(final[0],init[0])+1 maxy= max(final[1],init[1])+1 for i in range(8): x=tempx+weightx[i] y=tempy+weighty[i] costw=cost[i] a=str(tempx)+' '+str(tempy) b=str(x)+' '+str(y) tup=(a,b,costw) tupin=(b,a,costw) if((tup not in graph) and (tupin not in graph) and (x>=0 and x<=((250/resolution)+radius) and y>=0 and y<=((150/resolution)+radius)) and (((x+d)<=(250/resolution)) and ((y+d)<=(150/resolution)) and ((x-d)>=(0/resolution)) and ((y-d)>=(0/resolution)))): if(((pathAvailability(x,y,arr,pol,maxPx,minPx,maxPy,minPy))==1) and (x>=(arr[0]) and y>=(arr[1])) and ( x<=(arr[2]) and y<=(arr[3]) )): graph.append(tup) test.append((x,y)) if(b not in seen): seen.add(b) next = (costw+parent.cost) v2=(Node(b,(next),x,y)) v2.parent=parent mins[b] = next nodes.append(v2) q.append((next,v2)) heappush(queue, (next, b)) else: ans= [v for v in (nodes) if v.node == b] # ans1= [i for i, v in (queue) if v == b] prev = mins.get(b, None) next = (costw+parent.cost) if prev is None or next < prev: ans[0].parent=parent mins[b] = next # ans1[0]=next ans[0].cost=next else: minx=arr[0] miny=arr[1] maxx=arr[2] maxy=arr[3] t = np.linspace(0, 2*np.pi, 100) r = 15 n=190 #x-position of the center m=130 #radius on the y-axis u=140 #x-position of the center v=120 #y-position of the center a=15 #radius on the x-axis b=6 #radius on the y-axis p=n+r*np.cos(t) q=m+r*np.sin(t) r=u+a*np.cos(t) s=v+b*np.sin(t) x = [50, 100, 100, 50] y = [112.5, 112.5, 67.5, 67.5] px=[125,163,170,193,173,150] py=[56,52,90,52,15,15] fig, ax = plt.subplots() ax.grid(color=(0,0,0), linestyle='-', linewidth=1) test=[] xs=[] ys=[] uboxx=[] uboxy=[] for i in range(4): uboxx.append(x[i]+radius*np.cos(t)) uboxy.append(y[i]+radius*np.sin(t) ) upolx=[] upoly=[] for i in range(6): upolx.append(px[i]+radius*np.cos(t)) upoly.append(py[i]+radius*np.sin(t) ) ucirx=[] uciry=[] for i in range(len(r)): ucirx.append(p[i]+radius*np.cos(t)) uciry.append(q[i]+radius*np.sin(t)) uelpx=[] uelpy=[] for i in range(len(r)): uelpx.append(r[i]+radius*np.cos(t)) uelpy.append(s[i]+radius*np.sin(t)) listPnts=animate([[uboxx, uboxy,'b'],[x, y,'r'],[upolx, upoly,'b'], [px, py,'r'],[ucirx, uciry,'b'],[p,q,'r'],[uelpx, uelpy,'b'],[r,s,'r']]) r = 15/resolution n=190/resolution #x-position of the center m=130/resolution #radius on the y-axis u=140/resolution #x-position of the center v=120/resolution #y-position of the center a=15/resolution #radius on the x-axis b=6/resolution #radius on the y-axis #plt.gca().set_aspect('equal') p=n+r*np.cos(t) q=m+r*np.sin(t) r=u+a*np.cos(t) s=v+b*np.sin(t) x = [50/resolution, 100/resolution, 100/resolution, 50/resolution] y = [112.5/resolution, 112.5/resolution, 67.5/resolution, 67.5/resolution] px=[125/resolution,163/resolution,170/resolution,193/resolution,173/resolution,150/resolution] py=[56/resolution,52/resolution,90/resolution,52/resolution,15/resolution,15/resolution] uboxx=[] uboxy=[] for i in range(4): uboxx.append(x[i]+radius*np.cos(t)) uboxy.append(y[i]+radius*np.sin(t) ) upolx=[] upoly=[] in_x=[] in_y=[] for i in range(6): temp_x=px[i]+radius*np.cos(t) temp_y=py[i]+radius*np.sin(t) for j in temp_x: in_x.append(j) for k in temp_y: in_y.append(j) upolx.append(temp_x) upoly.append(temp_y) ucirx=[] uciry=[] for i in range(len(r)): ucirx.append(p[i]+radius*np.cos(t)) uciry.append(q[i]+radius*np.sin(t)) uelpx=[] uelpy=[] for i in range(len(r)): uelpx.append(r[i]+radius*np.cos(t)) uelpy.append(s[i]+radius*np.sin(t)) ax.fill(uboxx, uboxy,'b') ax.fill(x, y,'r') testing=ax.fill(upolx, upoly,'b') ax.fill(px, py,'r') ax.fill(ucirx, uciry,'b') ax.fill(p,q,'r') ax.fill(uelpx, uelpy,'b') ax.fill(r,s,'r') xs=[] ys=[] k=0 pol=[] for i in testing: array=i.get_xy() polygon_vertices=[] for j in array: polygon_vertices.append((j[0],j[1])) pol.append(polygon_vertices) maxPx=0 minPx=250 maxPy=0 minPy=150 for i in pol: coords = i poly = Polygon(i) for j in i: if(minPx>j[0]): minPx=j[0] if(maxPx<j[0]): maxPx=j[0] if(minPy>j[1]): minPy=j[1] if(maxPy<j[1]): maxPy=j[1] print(minPx,minPy,maxPx,maxPy) obstacles=[[uboxx, uboxy],[upolx, upoly],[ucirx, uciry],[uelpx, uelpy]] weightx=[0,1,1,1,0,-1,-1,-1] weighty=[1,1,0,-1,-1,-1,0,1] cost=[1,np.sqrt(2),1,np.sqrt(2),1,np.sqrt(2),1,np.sqrt(2)] graph=[] tempx=init[0] tempy=init[1] pathx=[] pathy=[] paths_to_goal=[] plt.tick_params(axis='both', which='major', labelsize=9) print("Processing.....") if(init and final): nodes,node=dijkstra(graph,str(init[0])+' '+str(init[1]), str(final[0])+' '+str(final[1]),paths_to_goal,tempx,tempy,weightx,weighty,cost,final,pol) if(node==0): test=tk.Tk() test.geometry('400x300') label = Label(test, text= nodes) label.pack() test.mainloop() else: listPnts=[[uboxx, uboxy,'b'],[x, y,'r'],[upolx, upoly,'b'], [px, py,'r'],[ucirx, uciry,'b'],[p,q,'r'],[uelpx, uelpy,'b'],[r,s,'r']] test=tk.Tk() fig = plt.Figure(figsize=(5,4), dpi=100) ax = fig.add_subplot(111) line, = ax.plot([], [], 'y.',lw=0.3, alpha=0.2) ax.grid() scatter = FigureCanvasTkAgg(fig, test) scatter.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH) for i in (listPnts): ax.fill(i[0],i[1], color = i[2]) ax.legend() ax.grid(color=(0,0,0), linestyle='-', linewidth=1) ax.set_title(title); ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ani = animation.FuncAnimation(fig, animated, nodes, fargs=(nodes, node,test), interval=10,repeat=False, blit=False) test.mainloop() else: test=tk.Tk() test.geometry('400x300') label = Label(test, text= "Please check validity if Initial/Goal Coordinates, Resolution, Radius or Clearance.") label.pack() test.mainloop()
x = int(input("Enter any number")) y = int(input("Enter any number")) z=1 for m in range(y): z=z*x print(z)
import datetime import flask_sqlalchemy db = flask_sqlalchemy.SQLAlchemy() class Player(db.Model): __tablename__ = 'players' id = db.Column(db.Integer, primary_key=True) first_name = db.Column(db.String(128)) last_name = db.Column(db.String(128)) nationality_id = db.Column(db.Integer, db.ForeignKey('countries.id')) current_club_id = db.Column(db.Integer, db.ForeignKey('clubs.id')) preferred_position = db.Column(db.String(64)) date_of_birth = db.Column(db.Date) last_modified = db.Column(db.DateTime, default=datetime.datetime.utcnow) class Country(db.Model): __tablename__ = 'countries' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(128)) players = db.relationship('Player', backref='nationality') class Club(db.Model): __tablename__ = 'clubs' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(128)) players = db.relationship('Player', backref='current_club')
# 30.itertools.permutations() # > split()은 기본적으로 ' '가 들어있는 거고 공백을 모두 사라지게 만듬 # split() vs split(" ") 차이는 후자는 띄어쓰기도 리스트에 포함됨 # EX) "HI 2spcae" 를 split 하면 ['HI', '2space'] vs ['HI', ' ', '2space']임 # "HI" 만 split() 하면ㅁ ['HI'] 반한됨 Split('')는 오류남 # 31.Polar Coordinates # > Complex() 로 복소수를 만들 수 있다. # > cmath.phase() 위상각을 만들 수 있다.
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # Client tests for SQL statement authorization # These tests verify the functionality of SHOW GRANT ROLE/USER. We # create several users and groups to verify clear separation. import grp import pytest from getpass import getuser from os import getenv from impala.dbapi import connect as impala_connect from tests.common.test_dimensions import create_uncompressed_text_dimension from tests.common.custom_cluster_test_suite import CustomClusterTestSuite SENTRY_CONFIG_FILE = getenv('IMPALA_HOME') + \ '/fe/src/test/resources/sentry-site_oo.xml' class TestShowGrant(CustomClusterTestSuite): @classmethod def add_test_dimensions(cls): super(TestShowGrant, cls).add_test_dimensions() cls.ImpalaTestMatrix.add_dimension( create_uncompressed_text_dimension(cls.get_workload())) @classmethod def get_workload(cls): return 'functional-query' def setup_method(self, method): super(TestShowGrant, self).setup_method(method) self.__test_cleanup() def teardown_method(self, method): self.__test_cleanup() self.client.execute('drop role sgu_test_admin') super(TestShowGrant, self).teardown_method(method) def __test_cleanup(self): # Clean up any old roles created by this test for role_name in self.client.execute('show roles').data: if 'sgu_test' in role_name: self.client.execute('drop role %s' % role_name) # Cleanup any other roles that were granted to this user. # TODO: Update Sentry Service config and authorization tests to use LocalGroupMapping # for resolving users -> groups. This way we can specify custom test users that don't # actually exist in the system. group_name = grp.getgrnam(getuser()).gr_name for role_name in self.client.execute('show role grant group `%s`' % group_name).data: self.client.execute('drop role %s' % role_name) # Create a temporary admin user so we can actually view/clean up the test db. self.client.execute('create role sgu_test_admin') self.client.execute('grant all on server to sgu_test_admin') self.client.execute('grant role sgu_test_admin to group `%s`' % group_name) @classmethod def restart_first_impalad(cls): impalad = cls.cluster.impalads[0] impalad.restart() cls.client = impalad.service.create_beeswax_client() @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args='--server_name=server1 --authorization_policy_provider_class=' 'org.apache.impala.service.CustomClusterResourceAuthorizationProvider ' '--sentry_config={0}'.format(SENTRY_CONFIG_FILE), catalogd_args='--sentry_config={0} --authorization_policy_provider_class=' 'org.apache.impala.service.CustomClusterResourceAuthorizationProvider' .format(SENTRY_CONFIG_FILE), sentry_config=SENTRY_CONFIG_FILE) def test_show_grant_user(self, vector, unique_database): group_name = grp.getgrnam(getuser()).gr_name self.client.execute('create role sgu_test_primary') self.client.execute('grant all on server to sgu_test_primary') self.client.execute('grant role sgu_test_primary to group `%s`' % group_name) self.run_test_case('QueryTest/show_grant_user', vector, use_db=unique_database) @pytest.mark.execute_serially @CustomClusterTestSuite.with_args( impalad_args='--server_name=server1 --authorization_policy_provider_class=' 'org.apache.impala.service.CustomClusterResourceAuthorizationProvider ' '--sentry_config={0}'.format(SENTRY_CONFIG_FILE), catalogd_args='--sentry_config={0} --authorization_policy_provider_class=' 'org.apache.impala.service.CustomClusterResourceAuthorizationProvider' .format(SENTRY_CONFIG_FILE), sentry_config=SENTRY_CONFIG_FILE) def test_show_grant_in_hs2(self, vector, unique_database): """IMPALA-7701: Test that all types in show grant commands are correct. Incorrect types can result in null/None values.""" role = 'sgu_test_primary' self.client.execute('create role %s' % role) self.client.execute('grant all on database %s to role %s' % (unique_database, role)) default_impalad = pytest.config.option.impalad.split(',')[0] impalad_host = default_impalad.split(':')[0] impalad_hs2_port = pytest.config.option.impalad_hs2_port with impala_connect(host=impalad_host, port=impalad_hs2_port) as conn: cursor = conn.cursor() cursor.execute('show grant user %s on database %s' % (getuser(), unique_database)) rows = cursor.fetchall() assert len(rows) == 1 cols = rows[0] assert len(cols) == 10 assert cols[0] == 'USER' # principal_type assert cols[1] == getuser() # principal_name assert cols[2] == 'database' # scope assert cols[3] == unique_database # database assert cols[4] == '' # table assert cols[5] == '' # column assert cols[6] == '' # uri assert cols[7] == 'owner' # privilege assert cols[8] # grant_option # We don't assert create_time since the value can be None or str depending on the # Sentry refresh. cursor.execute('show grant role %s on database %s' % (role, unique_database)) rows = cursor.fetchall() assert len(rows) == 1 cols = rows[0] assert len(cols) == 8 assert cols[0] == 'database' # scope assert cols[1] == unique_database # database assert cols[2] == '' # table assert cols[3] == '' # column assert cols[4] == '' # uri assert cols[5] == 'all' # privilege assert not cols[6] # grant_option # We don't assert create_time since the value can be None or str depending on the # Sentry refresh.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jun 30 17:34:28 2019 @author: amie """ import picdata as pic import numpy as np from numba import jit import time import math import matplotlib.pyplot as plt #from pca import PCA_face_example import pickle import cv2 class picprocess(): def __init__(self): self.strain = 1 self.sN_NEURONS = 100 self.sN_NEURONS_1 = 100 self.sETA = 1.0 self.sXI = 1.0 self.strain_round = 100 self.strainpath = 'trainpicpath.txt' self.stestpath = 'testpicpath.txt' value = self.readconfig('config_IG.txt') self.strain = int(value[0]) self.sN_NEURONS = int(value[1]) self.sN_NEURONS_1 = int(value[2]) self.sETA = float(value[3]) self.sXI = float(value[4]) self.strain_round = int(value[5]) self.strainpath =value[6] self.stestpath = value[7] def readconfig(self,configpath): value = [] with open(configpath,'r') as config: for line in config.readlines(): line = line.split('=') value.append(line[1].split('\n')[0]) return value def cosine_dis(self, x, y): num = (x*y).sum(axis=1) denom = np.linalg.norm(x) * np.linalg.norm(y,axis=1) return num/denom def predict(self,picpath): # initialize the parameters picpath = self.stestpath population_a = np.zeros((self.sN_NEURONS,1)) population_s = np.ones((self.sN_NEURONS,1))*0.045 wcross = np.random.uniform(0,1,(self.sN_NEURONS,self.sN_NEURONS_1)) population_Wcross = wcross / wcross.sum() population_Winput = np.random.random((self.sN_NEURONS,9))/10.0 population_Winput_1 = np.random.random((self.sN_NEURONS_1,1))/10.0 pic_data = pic.picprocess() picpath = '' with open(self.stestpath) as file: for line in file.readlines(): if line == '\n': continue picpath = line.strip() image = cv2.imread(picpath) pic_width = image.shape[0] pic_hight = image.shape[1] # load the test picture databoxe,data_xdirection,data_ydirection = pic_data.pix_singlechannel_cellload(self.stestpath,[3,3]) sensory_x = databoxe / 255.0 sensory_y = data_xdirection / 255.0 sensory_y = sensory_y.reshape(-1,1) # load the model with open('Weight data/Train_IG_weight/populations_Wcross599.pkl','rb') as file: population_Wcross = pickle.load(file) with open('Weight data/Train_IG_weight/populations_Winput599.pkl','rb') as file1: population_Winput = pickle.load(file1) with open('Weight data/Train_IG_weight/population_Winput_1599.pkl','rb') as file2: population_Winput_1 = pickle.load(file2) with open('Weight data/Train_IG_weight/populations_s599.pkl','rb') as file3: population_s = pickle.load(file3) # show the HL matrix plt.imshow(population_Wcross) x_drection = [] for i in range(sensory_x.shape[0]): input_sample = sensory_x[i].reshape(1,-1) temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/100).reshape(-1,1) act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2))) act_cur_sum = act_cur1.sum() if act_cur_sum == 0 : print('act_cur.sum() is less than 1e-323,ignore the update!') act_cur1 = act_cur1 / act_cur_sum population_a = act_cur1 # get the position of winner neuron win_pos = population_a[:,0].argmax() pre_pos = population_Wcross[win_pos,:].argmax() # decode the HL matrix a1 = population_Winput_1[pre_pos]*255 x_drection.append(a1) # rebuild the optical flow picture pic_data.createGpic(x_drection,[pic_width-2,pic_hight-2]) def parametrize_learning_law(self, v0, vf, t0, tf): y = np.zeros((tf-t0,1)) t = [i for i in range(1,tf+1)] B = (vf*tf - v0*t0)/(v0 - vf) A = v0*t0 + B*v0 y = [A/(t[i]+B) for i in range(len(t))] return y def speed_up_som(self): pic_data = pic.picprocess() # get the training data databoxe,data_xdirection,data_ydirection = pic_data.pix_singlechannel_cellload(self.strainpath,[3,3]) # Normalize the data sensory_x = databoxe / 255.0 sensory_y = data_xdirection / 255.0 sensory_y = sensory_y.reshape(-1,1) # initialize the parameters N_NEURONS = self.sN_NEURONS # sensor1 N_NEURONS_1 = self.sN_NEURONS_1 # sensor2 population_s = np.ones((N_NEURONS,1))*0.045 # sensor1 tuning curve population_a = np.zeros((N_NEURONS,1)) # sensor1 activation value wcross = np.random.uniform(0,1,(N_NEURONS,N_NEURONS_1)) population_Wcross = wcross / wcross.sum() # sensor1 HL matrix train_round = self.strain_round population_Winput = np.random.random((N_NEURONS,sensory_x.shape[1]))/10.0 # sensor1 weight sample_num = sensory_x.shape[0] sample_demension = sensory_x.shape[1] learning_sigmat = self.parametrize_learning_law(50,1,1,train_round) learning_alphat = self.parametrize_learning_law(0.1,0.001,1,train_round) ETA = 1.0 XI = 1e-3 hwi = np.zeros((N_NEURONS,1)) population_s_1 = np.ones((N_NEURONS_1,1))*0.045 # sensor2 tuning curve population_a_1 = np.zeros((N_NEURONS_1,1)) # sensor1 activation value wcross_1 = np.random.uniform(0,1,(N_NEURONS_1,N_NEURONS)) population_Wcross_1 = wcross_1 / wcross_1.sum() # sensor2 HL matrix print(sensory_y.shape) population_Winput_1 = np.random.random((N_NEURONS_1,sensory_y.shape[1]))/10.0 #初始权重 sample_num_1 = sensory_y.shape[0] sample_demension_1 = sensory_y.shape[1] ETA = 1.0 XI = 1e-3 hwi_1 = np.zeros((N_NEURONS_1,1)) hl_trainround = 100 avg_act = np.zeros((N_NEURONS,1)) avg_act_1 = np.zeros((N_NEURONS_1,1)) # training for t in range(hl_trainround + train_round): if t < train_round: for sample_index in range(sample_num): act_cur1 = np.zeros((N_NEURONS,1)) act_cur2 = np.zeros((N_NEURONS_1,1)) input_sample = sensory_x[sample_index].reshape(1,-1) input_sample_2 = sensory_y[sample_index].reshape(1,-1) temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1) temp1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1) # matrix calculate.All activation values are updated together act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2))) act_cur2 = (1/(np.sqrt(2*np.pi)*population_s_1))*np.exp(-temp1/(2*np.power(population_s_1,2))) act_cur_sum = act_cur1.sum() act_cur_sum1 = act_cur2.sum() if act_cur_sum == 0 or act_cur_sum1 == 0: print('act_cur.sum() is less than 1e-323,ignore the update!') continue act_cur1 = act_cur1 / act_cur_sum act_cur2 = act_cur2 / act_cur_sum1 population_a = (1-ETA)*population_a + ETA * act_cur1 population_a_1 = (1-ETA)*population_a_1 + ETA * act_cur2 win_pos = population_a[:,0].argmax() win_pos1 = population_a_1[:,0].argmax() pos_list = np.arange(0,N_NEURONS,1) pos_list_1 = np.arange(0,N_NEURONS_1,1) hwi = (np.exp(-np.power(pos_list - win_pos, 2) / (2 * np.power(learning_sigmat[t],2)))).reshape(N_NEURONS,1) hwi_1 = (np.exp(-np.power(pos_list_1 - win_pos1, 2) / (2 * np.power(learning_sigmat[t],2)))).reshape(N_NEURONS_1,1) # matrix calculate.All population_Winput values are updated together population_Winput = population_Winput+ \ learning_alphat[t] * hwi * (input_sample - population_Winput) population_Winput_1 = population_Winput_1+ \ learning_alphat[t] * hwi_1 * (input_sample_2 - population_Winput_1) # matrix calculate.All population_s values are updated together temp_s = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1) population_s = population_s + \ learning_alphat[t] * (1/(np.sqrt(2*np.pi)*learning_sigmat[t])) * \ hwi * (temp_s - np.power(population_s,2)) temp_s_1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1) population_s_1 = population_s_1 + \ learning_alphat[t] * (1/(np.sqrt(2*np.pi)*learning_sigmat[t])) * \ hwi_1 * (temp_s_1 - np.power(population_s_1,2)) print('training:',t/(train_round+hl_trainround)) # HL matrix training for sample_index in range(sample_num): act_cur1 = np.zeros((N_NEURONS,1)) act_cur2 = np.zeros((N_NEURONS_1,1)) input_sample = sensory_x[sample_index].reshape(1,-1) #(1,1024) input_sample_2 = sensory_y[sample_index].reshape(1,-1) temp = (np.power((input_sample - population_Winput),2).sum(axis=1)/sample_demension).reshape(-1,1) temp1 = (np.power((input_sample_2 - population_Winput_1),2).sum(axis=1)/sample_demension_1).reshape(-1,1) # matrix calculate. All activation values are updated together act_cur1 = (1/(np.sqrt(2*np.pi)*population_s))*np.exp(-temp/(2*np.power(population_s,2))) act_cur2 = (1/(np.sqrt(2*np.pi)*population_s_1))*np.exp(-temp1/(2*np.power(population_s_1,2))) act_cur_sum = act_cur1.sum() act_cur_sum1 = act_cur2.sum() if act_cur_sum == 0 or act_cur_sum1 == 0: print('act_cur.sum() is less than 1e-323,ignore the update!') continue act_cur1 = act_cur1 / act_cur_sum act_cur2 = act_cur2 / act_cur_sum1 population_a = (1-ETA)*population_a + ETA * act_cur1 population_a_1 = (1-ETA)*population_a_1 + ETA * act_cur2 OMEGA = 0.002 + 0.998/(t+2) avg_act[:,0] = (1-OMEGA)*avg_act[:, 0] + OMEGA*population_a[:,0] avg_act_1[:,0] = (1-OMEGA)*avg_act_1[:, 0] + OMEGA*population_a_1[:,0] population_Wcross = (1-XI)*population_Wcross + XI*(population_a - avg_act[:, 0].reshape(N_NEURONS,1))*(population_a_1 - avg_act_1[:, 0].reshape(N_NEURONS_1,1)).T if t%50 == 49: # save the model with open('populations_Wcross{}.pkl'.format(t),'wb') as output: pickle.dump(population_Wcross,output) with open('populations_Winput{}.pkl'.format(t),'wb') as output1: pickle.dump(population_Winput,output1) with open('population_Winput_1{}.pkl'.format(t),'wb') as output2: pickle.dump(population_Winput_1,output2) with open('populations_s{}.pkl'.format(t),'wb') as output3: pickle.dump(population_s,output3) with open('populations_s_1{}.pkl'.format(t),'wb') as output4: pickle.dump(population_s_1,output4) if __name__ == '__main__': a = picprocess() start = time.time() if a.strain == 1: a.speed_up_som() else: a.predict(a.stestpath) print(time.time() - start)
#coding=utf-8 import scrapy from scrapy.spiders import CrawlSpider from scrapy.selector import Selector from scrapy.http import Request from jianshu.items import JianshuItem import urllib class Jianshu(CrawlSpider): name='jianshu' start_urls=['http://www.jianshu.com/trending/monthly'] page=1 url='http://www.jianshu.com/trending/monthly' def parse(self, response): item = JianshuItem() selector = Selector(response) articles = selector.xpath('//ul[@class="note-list"]/li') for article in articles: title = article.xpath('div/a/text()').extract() url = article.xpath('div/a/@href').extract() author = article.xpath('div/div[1]/div/a/text()').extract() # 下载所有热门文章的缩略图, 注意有些文章没有图片 try:#/div/div[1]/a/img image = article.xpath("div/div[1]/a/img/@src").extract()[0] filename='images/%s-%s.jpg' %(author[0],title[0]) print("文件名:"+filename) print("图片地址"+image) urllib.request.urlretrieve(image, filename) except: print ('--no---image--') #//*[@id="note-9417518"]/div/div[2]/a[1],阅读数 listtop = article.xpath('div/div[2]/a[1]/text()').extract() # likeNum = article.xpath('div/div[2]/span[1]/text()').extract() #//*[@id="note-9417518"]/div/div[2]/a[2]/i #//*[@id="note-9417518"]/div/div[2]/a[2] readAndComment = article.xpath('div/div[2]/a[2]/text()') test=readAndComment[1].extract() item['title'] = title item['url'] = 'http://www.jianshu.com/'+url[0] item['author'] = author item['readNum']=listtop[1] # 有的文章是禁用了评论的 try: item['commentNum']=readAndComment[1].extract() except: item['commentNum']='' item['likeNum']= likeNum yield item #/html/body/div[1]/div/div[1]/a #next_link = selector.xpath('//a') #xpath(‘//div[contains(@id,”ma”)]‘) if len(articles) >0 : self.page=self.page+1 next_link = self.url+"?page="+ str(self.page) print ("----"+next_link) yield Request(next_link,callback=self.parse)
import tensorflow as tf x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] y_data = [[0], [0], [0], [1], [1], [1]] X=tf.placeholder(tf.float32, shape=[None, 2]) Y=tf.placeholder(tf.float32, shape=[None, 1]) W=tf.Variable(tf.random_normal([2,1]), name='weight') b=tf.Variable(tf.random_normal([1]), name='bias') hypothesis=tf.sigmoid(tf.matmul(X, W)+b) cost=-tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis)) train=tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) predicted=tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy=tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) # Launch graph with tf.Session() as sess: # Initialize TensorFlow variables sess.run(tf.global_variables_initializer()) for step in range(10001): cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data}) if step % 200 == 0: print(step, cost_val) h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data}) print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a)
import sys from PyQt5.QtWidgets import QApplication, QWidget from PyQt5.Qt import Qt class MainWindow(QWidget): def __init__(self): super().__init__() def keyPressEvent(self, event): #print(event.key()) #Print the value of the key #print(event.text()) #Print the text of the key if event.key() == Qt.Key_Space: self.test_method() def test_method(self): print('Space key pressed') if __name__ == '__main__': app = QApplication(sys.argv) demo = MainWindow() demo.show() sys.exit(app.exec_())
# Generated by Django 2.2.7 on 2019-12-19 08:34 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('exhibition', '0004_auto_20191211_0145'), ] operations = [ migrations.AlterField( model_name='exhibition', name='notopendate', field=models.DateField(blank=True, null=True), ), ]
from django.urls import path, include from rest_framework.routers import DefaultRouter # from watchlist_app.api.views import movie_list, movie_details from watchlist_app.api.views import (ReviewList, ReviewDetail, ReviewCreate, WatchListAV, WatchDetailAV, StreamPlatformAV, StreamPlatformDetailAV, StreamPlatformVS, UserReview, WatchListGV) router = DefaultRouter() router.register('stream', StreamPlatformVS, basename='streamplatform') urlpatterns = [ path('list/', WatchListAV.as_view(), name='movie-list'), path('<int:pk>/', WatchDetailAV.as_view(), name='movie-detail'), path('list2/', WatchListGV.as_view(), name='watch-list'), path('', include(router.urls)), # path('stream/', StreamPlatformAV.as_view(), name='stream-list'), # path('stream/<int:pk>', StreamPlatformDetailAV.as_view(), name='stream-detail'), # path('review/', ReviewList.as_view(), name='review-list'), # path('review/<int:pk>', ReviewDetail.as_view(), name='review-detail'), path('<int:pk>/review-create/', ReviewCreate.as_view(), name='review-create'), path('<int:pk>/reviews/', ReviewList.as_view(), name='review-list'), path('review/<int:pk>/', ReviewDetail.as_view(), name='review-detail'), path('reviews/', UserReview.as_view(), name='user-review-detail'), ]
nums = map(int, raw_input().split(" ")) nums.sort() n = nums[-1] sumVals = n*(n+1)/2 sumList = sum(nums) print sumVals-sumList
import math def remove_all_space_in_string(string): return string.replace(' ', '') def calculate_size_matrix(length): sqrt_length = math.sqrt(length) row = math.floor(sqrt_length) column = math.ceil(sqrt_length) if row * column < length: row += 1 return int(row), int(column) def string_without_space_to_array(string): return [symbol for symbol in string] def create_matrix(row, column): return [[None] * column for _ in range(row)] def add_string_to_matrix(string, matrix): column = len(matrix[0]) start = 0 for row, value in enumerate(matrix): matrix[row] = string_without_space_to_array( string[start:column + start]) start += column return matrix def add_encrypted_string_to_matrix(string, matrix): for column in range(len(matrix[0])): start = 0 for row, value in enumerate(matrix): if len(string[column]) > row: matrix[row][column] = string[column][start] start += 1 def return_encrypted_string(matrix): cipher = '' for column in range(len(matrix[0])): for row in matrix: if len(row) > column: cipher += row[column] cipher += ' ' return cipher.strip() def return_decrypted_string(matrix): source_code = '' for row in matrix: source_code += ''.join(symbol if symbol is not None else '' for symbol in row) return source_code def encrypted_string(string): string_without_space = remove_all_space_in_string(string) row, column = calculate_size_matrix(len(string_without_space)) matrix = create_matrix(row, column) add_string_to_matrix(string_without_space, matrix) return return_encrypted_string(matrix) def decrypted_string(string): string_without_space = remove_all_space_in_string(string) row, column = calculate_size_matrix(len(string_without_space)) matrix = create_matrix(row, column) add_encrypted_string_to_matrix(string.split(), matrix) return return_decrypted_string(matrix) print('отдай мою кроличью лапку - test1') enc_str = encrypted_string('отдай мою кроличью лапку') print(enc_str) decr_str = decrypted_string(enc_str) print(decr_str) print() print('Бобер нас раскрыли - test2') enc_str = encrypted_string('Бобер нас раскрыли') print(enc_str) decr_str = decrypted_string(enc_str) print(decr_str) print() print('Орел вылетел - test3') enc_str = encrypted_string('Орел вылетел') print(enc_str) decr_str = decrypted_string(enc_str) print(decr_str)
import glob from PIL import Image import numpy as np import os import sys from tqdm import tqdm import shutil if __name__ == '__main__': # arg_Model : which model to use # arg_DataRoot : path to the dataRoot # arg_thres : threshold of the image output from the model Thres = 200 dataRoot = 'output{}'.format(Thres) saveRoot = 'total{}'.format(Thres) dataRoot = os.path.join(dataRoot) max_width = 3704 max_height = 10000 img_paths = glob.glob(os.path.join(dataRoot, '*.png')) img_paths = sorted(img_paths) img_lst = [] print(img_paths) print('total number of patch Images : ',len(img_paths)) if os.path.isdir(saveRoot): shutil.rmtree(saveRoot) os.mkdir(saveRoot) else: os.mkdir(saveRoot) roadnames = [] for img_path in img_paths: fname = img_path.split('/')[-1] if len(fname.split('_')) == 7: roadname = '_'.join(fname.split('_')[:4]) roadnames.append(roadname) roadnames=list(set(roadnames)) print('total number of RoadNames : ',len(roadnames)) for roadname in tqdm(roadnames): # print(img_paths) back = Image.new('RGB', (max_width, max_height), color='black') fullName = '' for idx, img_path in enumerate(img_paths): fname=img_path.split('/')[1] fname_lst=fname.split('_') if len(fname_lst)==7: road = '_'.join(fname_lst[:4]) if roadname == road: patch_size = int(fname_lst[4][1:]) x_start = int(fname_lst[5]) y_start = int(fname_lst[6].split('.')[0]) fullName='{}'.format(roadname) img=Image.open(img_path) back.paste(img, (x_start, y_start)) back.save('{}/{}.png'.format(saveRoot,fullName)) print('All Full Road Images are written.')
from django.shortcuts import render import requests from django.http import HttpResponse # Create your views here. from django.views.decorators.csrf import csrf_exempt from conversion.models import save_data def conversion_page(request): return render(request, 'conversion_page.html') @csrf_exempt def conversion_api(request): #print request.POST amount_entered = request.POST.get('amount') amount_type = type(amount_entered) try: amount = float(amount_entered) except: return HttpResponse('Invalid Amount') convert_from = request.POST.get('convert_from') convert_to = request.POST.get('convert_to') data = save_data(convert_from=convert_from,convert_to=convert_to,amount=amount_entered) data.save() if(convert_from == convert_to): return HttpResponse(amount) else: params = {'base':convert_from, 'symbols':convert_to} api = requests.get('http://api.fixer.io/latest', params=params) json_format = api.json() #print json_format #print convert_from #print convert_to #print amount #print converted_amount rate =float(json_format['rates'][convert_to]) print rate converted_amount = amount * rate context = {'rate':rate} return HttpResponse(converted_amount)
from PyCircuit import Vector, Or, Memory, Register, VFalse, VTrue, \ FeedbackVector, If, Enum, ConstantVector, And, Case class abstractDo: pass class doIf(abstractDo): def __init__(self, cond): self.cond = cond def do(self, val): assert len(self.cond) == 1 for r in val: if isinstance(r, abstractDo): r.do(val[r]) class doElif(abstractDo): def __init__(self, cond): self.cond = cond def do(self, val): assert len(self.cond) == 1 for r in val: r.do(val[r]) class doElse(abstractDo): pass def do(self, val): for r in val: if isinstance(r, abstractDo): r.do(val) class doCase(abstractDo): def __init__(self, cond): self.cond = cond def do(self, val): print("case", str(val)) cases = val for c in cases: print(" ", str(c), ":") for r in cases[c]: if isinstance(r, abstractDo): r.do(cases[c][r]) class do: def __init__(self, clk, dict): for r in dict: if isinstance(r, abstractDo): r.do(dict[r]) else: print(r) def doReg(r, val): if isinstance(r, doIf): r.do(val) elif isinstance(r, doCase): r.do(val) else: print(r, val) def simple_ram(clk, addr, data, isWrite, init=None): res = Memory(clk, addr, data, isWrite, init) return Register(clk, bits=len(res)).connect(res) """ // Cache Memory (4way 4word) // // i_ means input port // // o_ means output port // // _p_ means data exchange with processor // // _m_ means data exchange with memory // // Replacement policy is LRU (8bit) // `default_nettype none module cache(clk, rst, i_p_addr, i_p_byte_en, i_p_writedata, i_p_read, i_p_write, o_p_readdata, o_p_readdata_valid, o_p_waitrequest, o_m_addr, o_m_byte_en, o_m_writedata, o_m_read, o_m_write, i_m_readdata, i_m_readdata_valid, i_m_waitrequest, cnt_r, cnt_w, cnt_hit_r, cnt_hit_w, cnt_wb_r, cnt_wb_w); parameter cache_entry = 14; input wire clk, rst; input wire [24:0] i_p_addr; input wire [3:0] i_p_byte_en; input wire [31:0] i_p_writedata; input wire i_p_read, i_p_write; output reg [31:0] o_p_readdata; output reg o_p_readdata_valid; output wire o_p_waitrequest; output reg [25:0] o_m_addr; output wire [3:0] o_m_byte_en; output reg [127:0] o_m_writedata; output reg o_m_read, o_m_write; input wire [127:0] i_m_readdata; input wire i_m_readdata_valid; input wire i_m_waitrequest; output reg [31:0] cnt_r; output reg [31:0] cnt_w; output reg [31:0] cnt_hit_r; output reg [31:0] cnt_hit_w; output reg [31:0] cnt_wb_r; output reg [31:0] cnt_wb_w; wire [3:0] hit; wire [3:0] modify; wire [3:0] miss; wire [3:0] valid; wire [127:0] readdata0, readdata1, readdata2, readdata3; wire [127:0] writedata; wire write0, write1, write2, write3; wire [3:0] word_en; wire [3:0] byte_en; wire [22:0] addr; wire [22:0] wb_addr0, wb_addr1, wb_addr2, wb_addr3; wire [7:0] r_cm_data; wire [1:0] hit_num; reg [2:0] state; reg [127:0] writedata_buf; reg [24:0] write_addr_buf; reg [3:0] byte_en_buf; reg write_buf, read_buf; reg [3:0] write_set; reg [3:0] fetch_write; reg [7:0] w_cm_data; reg w_cm; localparam IDLE = 0; localparam COMP = 1; localparam HIT = 2; localparam FETCH1 = 3; localparam FETCH2 = 4; localparam FETCH3 = 5; localparam WB1 = 6; localparam WB2 = 7; `ifdef SIM integer i; initial begin for(i = 0; i <=(2**cache_entry-1); i=i+1) begin ram_hot.mem[i] = 0; end end `endif simple_ram #(.width(8), .widthad(cache_entry)) ram_hot(clk, addr[cache_entry-1:0], w_cm, w_cm_data, addr[cache_entry-1:0], r_cm_data); set #(.cache_entry(cache_entry)) set0(.clk(clk), .rst(rst), .entry(addr[cache_entry-1:0]), .o_tag(addr[22:cache_entry]), .writedata(writedata), .byte_en(byte_en), .write(write0), .word_en(word_en), // 4word r/w change .readdata(readdata0), .wb_addr(wb_addr0), .hit(hit[0]), .modify(modify[0]), .miss(miss[0]), .valid(valid[0]), .read_miss(read_buf)); set #(.cache_entry(cache_entry)) set1(.clk(clk), .rst(rst), .entry(addr[cache_entry-1:0]), .o_tag(addr[22:cache_entry]), .writedata(writedata), .byte_en(byte_en), .write(write1), .word_en(word_en), // 4word r/w change .readdata(readdata1), .wb_addr(wb_addr1), .hit(hit[1]), .modify(modify[1]), .miss(miss[1]), .valid(valid[1]), .read_miss(read_buf)); set #(.cache_entry(cache_entry)) set2(.clk(clk), .rst(rst), .entry(addr[cache_entry-1:0]), .o_tag(addr[22:cache_entry]), .writedata(writedata), .byte_en(byte_en), .write(write2), .word_en(word_en), // 4word r/w change .readdata(readdata2), .wb_addr(wb_addr2), .hit(hit[2]), .modify(modify[2]), .miss(miss[2]), .valid(valid[2]), .read_miss(read_buf)); set #(.cache_entry(cache_entry)) set3(.clk(clk), .rst(rst), .entry(addr[cache_entry-1:0]), .o_tag(addr[22:cache_entry]), .writedata(writedata), .byte_en(byte_en), .write(write3), .word_en(word_en), // 4word r/w change .readdata(readdata3), .wb_addr(wb_addr3), .hit(hit[3]), .modify(modify[3]), .miss(miss[3]), .valid(valid[3]), .read_miss(read_buf)); assign writedata = (|fetch_write) ? i_m_readdata : writedata_buf; //128bit assign write0 = (fetch_write[0]) ? i_m_readdata_valid : write_set[0]; assign write1 = (fetch_write[1]) ? i_m_readdata_valid : write_set[1]; assign write2 = (fetch_write[2]) ? i_m_readdata_valid : write_set[2]; assign write3 = (fetch_write[3]) ? i_m_readdata_valid : write_set[3]; assign addr = (o_p_waitrequest) ? write_addr_buf[24:2] : i_p_addr[24:2]; // set module input addr is 23bit assign byte_en = (|fetch_write) ? 4'b1111 : byte_en_buf; assign o_p_waitrequest = (state != IDLE); assign o_m_byte_en = 4'b1111; assign hit_num = (hit[0]) ? 0 : (hit[1]) ? 1 : (hit[2]) ? 2 : 3; assign word_en = (|fetch_write) ? 4'b1111 : (write_addr_buf[1:0] == 2'b00) ? 4'b0001 : (write_addr_buf[1:0] == 2'b01) ? 4'b0010 : (write_addr_buf[1:0] == 2'b10) ? 4'b0100 : 4'b1000; always @(posedge clk) begin if(rst) begin o_p_readdata_valid <= 0; {o_m_read, o_m_write} <= 0; o_m_addr <= 0; write_addr_buf <= 0; byte_en_buf <= 0; writedata_buf <= 0; {write_buf, read_buf} <= 0; write_set <= 0; fetch_write <= 0; {cnt_r, cnt_w} <= 0; {cnt_hit_r, cnt_hit_w} <= 0; {cnt_wb_r, cnt_wb_w} <= 0; state <= IDLE; end else begin case (state) IDLE: begin write_set <= 0; o_p_readdata_valid <= 0; writedata_buf <= {i_p_writedata, i_p_writedata, i_p_writedata, i_p_writedata}; write_addr_buf <= i_p_addr; byte_en_buf <= i_p_byte_en; write_buf <= i_p_write; read_buf <= i_p_read; if(i_p_read) begin state <= COMP; cnt_r <= cnt_r + 1; end else if(i_p_write) begin state <= COMP; cnt_w <= cnt_w + 1; end end COMP: begin if((|hit) && write_buf) begin state <= HIT; write_set <= hit; cnt_hit_w <= cnt_hit_w + 1; w_cm_data <= (r_cm_data[1:0] == hit_num) ? {r_cm_data[1:0], r_cm_data[7:2]} : (r_cm_data[3:2] == hit_num) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : (r_cm_data[5:4] == hit_num) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; w_cm <= 1; end else if((|hit) && read_buf) begin case(write_addr_buf[1:0]) 2'b00: o_p_readdata <= (hit[0]) ? readdata0[31:0] : (hit[1]) ? readdata1[31:0] : (hit[2]) ? readdata2[31:0] : readdata3[31:0]; 2'b01: o_p_readdata <= (hit[0]) ? readdata0[63:32] : (hit[1]) ? readdata1[63:32] : (hit[2]) ? readdata2[63:32] : readdata3[63:32]; 2'b10: o_p_readdata <= (hit[0]) ? readdata0[95:64] : (hit[1]) ? readdata1[95:64] : (hit[2]) ? readdata2[95:64] : readdata3[95:64]; 2'b11: o_p_readdata <= (hit[0]) ? readdata0[127:96] : (hit[1]) ? readdata1[127:96] : (hit[2]) ? readdata2[127:96] : readdata3[127:96]; endcase o_p_readdata_valid <= 1; w_cm_data <= (r_cm_data[1:0] == hit_num) ? {r_cm_data[1:0], r_cm_data[7:2]} : (r_cm_data[3:2] == hit_num) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : (r_cm_data[5:4] == hit_num) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; w_cm <= 1; cnt_hit_r <= cnt_hit_r + 1; state <= IDLE; end else if(!(&valid) || miss[r_cm_data[1:0]]) begin state <= FETCH1; if(!valid[0]) begin fetch_write <= 4'b0001; w_cm_data <= 8'b11100100; w_cm <= 1; end else if(!valid[1]) begin fetch_write <= 4'b0010; w_cm_data <= (r_cm_data[1:0] == 2'b01) ? {r_cm_data[1:0], r_cm_data[7:2]} : (r_cm_data[3:2] == 2'b01) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : (r_cm_data[5:4] == 2'b01) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; w_cm <= 1; end else if(!valid[2]) begin fetch_write <= 4'b0100; w_cm_data <= (r_cm_data[1:0] == 2'b10) ? {r_cm_data[1:0], r_cm_data[7:2]} : (r_cm_data[3:2] == 2'b10) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : (r_cm_data[5:4] == 2'b10) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; w_cm <= 1; end else if(!valid[3]) begin fetch_write <= 4'b1000; w_cm_data <= (r_cm_data[1:0] == 2'b11) ? {r_cm_data[1:0], r_cm_data[7:2]} : (r_cm_data[3:2] == 2'b11) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : (r_cm_data[5:4] == 2'b11) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; w_cm <= 1; end else if(miss[r_cm_data[1:0]]) begin if(r_cm_data[1:0] == 2'b00) fetch_write <= 4'b0001; else if(r_cm_data[1:0] == 2'b01) fetch_write <= 4'b0010; else if(r_cm_data[1:0] == 2'b10) fetch_write <= 4'b0100; else if(r_cm_data[1:0] == 2'b11) fetch_write <= 4'b1000; w_cm_data <= {r_cm_data[1:0], r_cm_data[7:2]}; w_cm <= 1; end o_m_addr <= {write_addr_buf[24:2], 3'b000}; o_m_read <= 1; end else begin state <= WB1; if(r_cm_data[1:0] == 2'b00) fetch_write <= 4'b0001; else if(r_cm_data[1:0] == 2'b01) fetch_write <= 4'b0010; else if(r_cm_data[1:0] == 2'b10) fetch_write <= 4'b0100; else if(r_cm_data[1:0] == 2'b11) fetch_write <= 4'b1000; w_cm_data <= {r_cm_data[1:0], r_cm_data[7:2]}; w_cm <= 1; if(read_buf) cnt_wb_r <= cnt_wb_r + 1; else if(write_buf) cnt_wb_w <= cnt_wb_w + 1; end end HIT: begin w_cm <= 0; write_set <= 0; state <= IDLE; end //1/13 FETCH1: begin w_cm <= 0; if(!i_m_waitrequest) begin o_m_read <= 0; state <= FETCH2; end end FETCH2: begin if(i_m_readdata_valid) begin fetch_write <= 0; //add 3/9 if(write_buf) begin state <= FETCH3; write_set <= fetch_write; end else if(read_buf) begin state <= IDLE; o_p_readdata_valid <= 1; case(write_addr_buf[1:0]) 2'b00: o_p_readdata <= i_m_readdata[ 31: 0]; 2'b01: o_p_readdata <= i_m_readdata[ 63:32]; 2'b10: o_p_readdata <= i_m_readdata[ 95:64]; 2'b11: o_p_readdata <= i_m_readdata[127:96]; endcase end end end FETCH3: begin state <= IDLE; write_set <= 0; end WB1: begin w_cm <= 0; o_m_addr <= (fetch_write[0]) ? {wb_addr0, 3'b000} : (fetch_write[1]) ? {wb_addr1, 3'b000} : (fetch_write[2]) ? {wb_addr2, 3'b000} : {wb_addr3, 3'b000}; o_m_writedata <= (fetch_write[0]) ? readdata0 : (fetch_write[1]) ? readdata1 : (fetch_write[2]) ? readdata2 : readdata3; o_m_write <= 1; state <= WB2; end WB2: begin if(!i_m_waitrequest) begin o_m_write <= 0; o_m_addr <= {write_addr_buf[24:2], 3'b000}; o_m_read <= 1; state <= FETCH1; end end endcase // case (state) end end endmodule // cache """ # localparam IDLE = 0; # localparam COMP = 1; # localparam HIT = 2; # localparam FETCH1 = 3; # localparam FETCH2 = 4; # localparam FETCH3 = 5; # localparam WB1 = 6; # localparam WB2 = 7; STATES = Enum('IDLE', 'COMP', 'HIT', 'FETCH1', 'FETCH2', 'FETCH3', 'WB1', 'WB2') def Cache(clk, rst, i_p_addr, i_p_byte_en, i_p_writedata, i_p_read, i_p_write, i_m_readdata, i_m_readdata_valid, i_m_waitrequest ): # parameter cache_entry = 14; # input wire clk, rst; # input wire [24:0] i_p_addr; # input wire [3:0] i_p_byte_en; # input wire [31:0] i_p_writedata; # input wire i_p_read, i_p_write; # output reg [31:0] o_p_readdata; # output reg o_p_readdata_valid; # output wire o_p_waitrequest; # # output reg [25:0] o_m_addr; # output wire [3:0] o_m_byte_en; # output reg [127:0] o_m_writedata; # output reg o_m_read, o_m_write; # input wire [127:0] i_m_readdata; # input wire i_m_readdata_valid; # input wire i_m_waitrequest; # # output reg [31:0] cnt_r; # output reg [31:0] cnt_w; # output reg [31:0] cnt_hit_r; # output reg [31:0] cnt_hit_w; # output reg [31:0] cnt_wb_r; # output reg [31:0] cnt_wb_w; # counters cnt_r = Register(clk, 0, 32, name="cnt_r") cnt_w = Register(clk, 0, 32, name="cnt_w") cnt_hit_r = Register(clk, 0, 32, name="cnt_hit_r") cnt_hit_w = Register(clk, 0, 32, name="cnt_hit_w") cnt_wb_r = Register(clk, 0, 32, name="cnt_wb_r") cnt_wb_w = Register(clk, 0, 32, name="cnt_wb_w") cache_entry = 14 hit = FeedbackVector(0, 4, name="hit") modify = FeedbackVector(0, 4) miss = FeedbackVector(0, 4) valid = FeedbackVector(0, 4) readdata0 = FeedbackVector(0, 128) readdata1 = FeedbackVector(0, 128) readdata2 = FeedbackVector(0, 128) readdata3 = FeedbackVector(0, 128) word_en = FeedbackVector(0, 4) wb_addr0 = FeedbackVector(0, 22) wb_addr1 = FeedbackVector(0, 22) wb_addr2 = FeedbackVector(0, 22) wb_addr3 = FeedbackVector(0, 22) r_cm_data = FeedbackVector(0, 8) hit_num = FeedbackVector(0, 2) o_p_readdata = Register(clk, 0, 32) o_p_readdata_valid = Register(clk, name="o_p_readdata_valid") o_m_addr = Register(clk, 0, 26, name="o_m_addr") o_m_writedata = Register(clk, 0, 128, name="o_m_writedata") o_m_read = Register(clk, name="o_m_read") o_m_write = Register(clk, name="o_m_write") state = Register(clk, STATES['IDLE'], name="state") writedata_buf = Register(clk, 0, 128, name="writedata_buf") write_addr_buf = Register(clk, 0, 25, name="write_addr_buf") byte_en_buf = Register(clk, 0, 4, name="byte_en_buf") write_buf = Register(clk, 0, 1, name="write_buf") read_buf = Register(clk, 0, 1, name="read_buf") write_set = Register(clk, 0, 4, name="write_set") fetch_write = Register(clk, 0, 4, name="fetch_write") w_cm_data = Register(clk, 0, 8, name="w_cm_data") w_cm = Register(clk, 0, 1, name="w_cm") writedata = If(fetch_write.reduce(Or), i_m_readdata, writedata_buf) # 128bit write0 = If(fetch_write[0], i_m_readdata_valid, write_set[0]) write1 = If(fetch_write[1], i_m_readdata_valid, write_set[1]) write2 = If(fetch_write[2], i_m_readdata_valid, write_set[2]) write3 = If(fetch_write[3], i_m_readdata_valid, write_set[3]) o_p_waitrequest = (state != STATES['IDLE']) addr = If(o_p_waitrequest, write_addr_buf[2:25], i_p_addr[2:25]) # set module input addr is 23bit byte_en = If(fetch_write.reduce(Or), ConstantVector(-1, 4), byte_en_buf) o_m_byte_en = ConstantVector(-1, 4) hit_num = If(hit[0], ConstantVector(0, 2), If(hit[1], ConstantVector(1, 2), If(hit[2], ConstantVector(2, 2), ConstantVector(3, 2)))) word_en.connect(If(fetch_write.reduce(Or), ConstantVector(-1, 4), If(write_addr_buf[0:2] == ConstantVector(0, 2), ConstantVector(1, 4), If(write_addr_buf[0:2] == ConstantVector(1, 2), ConstantVector(2, 4), If(write_addr_buf[0:2] == ConstantVector(2, 2), ConstantVector(4, 4), ConstantVector(8, 4)))))) # simple_ram #(.width(8), .widthad(cache_entry)) ram_hot(clk, addr[cache_entry-1:0], w_cm, w_cm_data, addr[cache_entry-1:0], r_cm_data); # ram_hot entry = addr[0:cache_entry] tag = addr[cache_entry:23] r_cm_data = simple_ram(clk, entry, w_cm_data, w_cm) # set #(.cache_entry(cache_entry)) # set0(.clk(clk), # .rst(rst), # .entry(addr[cache_entry-1:0]), # .o_tag(addr[22:cache_entry]), # .writedata(writedata), # .byte_en(byte_en), # .write(write0), # .word_en(word_en), // 4word r/w change # .readdata(readdata0), # .wb_addr(wb_addr0), # .hit(hit[0]), # .modify(modify[0]), # .miss(miss[0]), # .valid(valid[0]), # .read_miss(read_buf)); set0 = Set(clk, entry=entry, o_tag=tag, writedata=writedata, byte_en=byte_en, write=write0, word_en=word_en, read_miss=read_buf) # # set #(.cache_entry(cache_entry)) # set1(.clk(clk), # .rst(rst), # .entry(addr[cache_entry-1:0]), # .o_tag(addr[22:cache_entry]), # .writedata(writedata), # .byte_en(byte_en), # .write(write1), # .word_en(word_en), // 4word r/w change # .readdata(readdata1), # .wb_addr(wb_addr1), # .hit(hit[1]), # .modify(modify[1]), # .miss(miss[1]), # .valid(valid[1]), # .read_miss(read_buf)); set1 = Set(clk, entry=entry, o_tag=tag, writedata=writedata, byte_en=byte_en, write=write1, word_en=word_en, read_miss=read_buf) # # set #(.cache_entry(cache_entry)) # set2(.clk(clk), # .rst(rst), # .entry(addr[cache_entry-1:0]), # .o_tag(addr[22:cache_entry]), # .writedata(writedata), # .byte_en(byte_en), # .write(write2), # .word_en(word_en), // 4word r/w change # .readdata(readdata2), # .wb_addr(wb_addr2), # .hit(hit[2]), # .modify(modify[2]), # .miss(miss[2]), # .valid(valid[2]), # .read_miss(read_buf)); set2 = Set(clk, entry=entry, o_tag=tag, writedata=writedata, byte_en=byte_en, write=write2, word_en=word_en, read_miss=read_buf) # # set #(.cache_entry(cache_entry)) # set3(.clk(clk), # .rst(rst), # .entry(addr[cache_entry-1:0]), # .o_tag(addr[22:cache_entry]), # .writedata(writedata), # .byte_en(byte_en), # .write(write3), # .word_en(word_en), // 4word r/w change # .readdata(readdata3), # .wb_addr(wb_addr3), # .hit(hit[3]), # .modify(modify[3]), # .miss(miss[3]), # .valid(valid[3]), # .read_miss(read_buf)); set3 = Set(clk, entry=entry, o_tag=tag, writedata=writedata, byte_en=byte_en, write=write3, word_en=word_en, read_miss=read_buf) readdata0_, wb_addr0_, hit0, modify0, miss0, valid0 = set0 readdata0.connect(readdata0_) wb_addr0.connect(wb_addr0_) readdata1_, wb_addr1_, hit1, modify1, miss1, valid1 = set1 readdata1.connect(readdata1_) wb_addr1.connect(wb_addr1_) readdata2_, wb_addr2_, hit2, modify2, miss2, valid2 = set2 readdata2.connect(readdata2_) wb_addr2.connect(wb_addr2_) readdata3_, wb_addr3_, hit3, modify3, miss3, valid3 = set3 readdata3.connect(readdata3_) wb_addr3.connect(wb_addr3_) hits = Vector.concatRev(hit0, hit1, hit2, hit3) hit.connect(hits) modify.connect(Vector.concatRev(modify0, modify1, modify2, modify3)) miss.connect(Vector.concatRev(miss0, miss1, miss2, miss3)) valid.connect(Vector.concatRev(valid0, valid1, valid2, valid3)) do(clk, { doIf(rst): { state: "IDLE" }, doElse(): { doCase(state): { "IDLE": { doIf(i_p_read): {state: "COMP"}, doElif(i_p_write): {state: "COMP"} }, "COMP": { doIf(hit.reduce(Or) & write_buf): { state: "HIT", }, doElif(hit.reduce(Or) & read_buf): { state: "IDLE" }, doElif(~valid.reduce(And) | miss[r_cm_data[0:2]]): { state: "FETCH1", }, doElse(): { state: "WB1", } }, "HIT": { state: "IDLE", }, "FETCH1": { doIf(~i_m_waitrequest): { state: "FETCH2", } }, "FETCH2": { doIf(i_m_readdata_valid): { doIf(write_buf): { state: "FETCH3", }, doElif(read_buf): { state: "IDLE", } } }, "FETCH3": { state: "IDLE", }, "WB1": { state: "WB2" }, "WB2": { state: "FETCH1" } } } }) state.connect(If(rst, "IDLE", Case(state, { "IDLE": If(i_p_read, "COMP", If(i_p_write, "COMP", state)), "COMP": If(hit.reduce(Or) & write_buf, "HIT", If(hit.reduce(Or) & read_buf, "IDLE", If(~valid.reduce(And) | miss[r_cm_data[0:2]], "FETCH1", "WB1"))), "HIT": "IDLE", "FETCH1": If(~i_m_waitrequest, "FETCH2", state), "FETCH2": If(i_m_readdata_valid, If(write_buf, "FETCH3", If(read_buf, "IDLE", state))), "FETCH3": "IDLE", "WB1": "WB2", "WB2": "FETCH1" } ))) do(clk, { doIf(rst): { o_p_readdata_valid: 0, o_m_read: 0, o_m_write: 0, o_m_addr: 0, write_addr_buf: 0, byte_en_buf: 0, writedata_buf: 0, write_buf: 0, read_buf: 0, write_set: 0, fetch_write: 0, cnt_r: 0, cnt_w: 0, cnt_hit_r: 0, cnt_hit_w: 0, cnt_wb_r: 0, cnt_wb_w: 0, state: "IDLE" }, doElse(): { doCase(state): { "IDLE": { write_set: 0, o_p_readdata_valid: 0, writedata_buf: {i_p_writedata, i_p_writedata, i_p_writedata, i_p_writedata}, write_addr_buf: i_p_addr, byte_en_buf: i_p_byte_en, write_buf: i_p_write, read_buf: i_p_read, doIf(i_p_read): { state: "COMP", cnt_r: cnt_r + 1 }, doElse(): { doIf(i_p_write): { state: "COMP", cnt_w: cnt_w + 1 } } }, "COMP": { doIf(hit.reduce(Or) & write_buf): { state: "HIT", write_set: hit, cnt_hit_w: cnt_hit_w + 1, w_cm_data: If(r_cm_data[0:2] == hit_num, Vector.concatRev(r_cm_data[0:2], r_cm_data[2:8]), If(r_cm_data[2:4] == hit_num, Vector.concatRev(r_cm_data[2:4], r_cm_data[4:8], r_cm_data[0:2]), If(r_cm_data[4:6] == hit_num, Vector.concatRev(r_cm_data[4:6], r_cm_data[6:8], r_cm_data[0:4]), r_cm_data))), w_cm: 1 }, doElif(hit.reduce(Or) & read_buf): { doCase(write_addr_buf[0:2]): { 0b00: {o_p_readdata: If(hit[0], readdata0[0:32], If(hit[1], readdata1[0:32], If(hit[2], readdata2[0:32], readdata3[0:32])))}, 0b01: {o_p_readdata: If(hit[0], readdata0[32:64], If(hit[1], readdata1[32:64], If(hit[2], readdata2[32:64], readdata3[32:64])))}, 0b10: {o_p_readdata: If(hit[0], readdata0[64:96], If(hit[1], readdata1[64:96], If(hit[2], readdata2[64:96], readdata3[64:96])))}, 0b11: {o_p_readdata: If(hit[0], readdata0[96:128], If(hit[1], readdata1[96:128], If(hit[2], readdata2[96:128], readdata3[96:128])))} }, o_p_readdata_valid: 1, w_cm_data: If(r_cm_data[0:2] == hit_num, Vector.concatRev(r_cm_data[0:2], r_cm_data[2:8]), If(r_cm_data[2:4] == hit_num, Vector.concatRev(r_cm_data[2:4], r_cm_data[4:8], r_cm_data[0:2]), If(r_cm_data[4:6] == hit_num, Vector.concatRev(r_cm_data[4:6], r_cm_data[6:8], r_cm_data[0:4]), r_cm_data))), w_cm: 1, cnt_hit_r: cnt_hit_r + 1, state: "IDLE" }, doElif(~valid.reduce(And) | miss[r_cm_data[0:2]]): { state: "FETCH1", doIf(~valid[0]): { fetch_write: 0b0001, w_cm_data: 0b11100100, w_cm: 1, }, doElif(~valid[1]): { fetch_write: 0b0010, w_cm_data: If(r_cm_data[0:2] == 0b01, Vector.concatRev(r_cm_data[0:2], r_cm_data[2:8]), If(r_cm_data[2:4] == 0b01, Vector.concatRev(r_cm_data[2:4], r_cm_data[4:8], r_cm_data[0:2]), If(r_cm_data[4:6] == 0b01, Vector.concatRev(r_cm_data[4:6], r_cm_data[6:8], r_cm_data[0:4]), r_cm_data))), w_cm: 1 }, doElif(~valid[2]): { fetch_write: 0b0100, w_cm_data: If(r_cm_data[0:2] == 0b10, Vector.concatRev(r_cm_data[0:2], r_cm_data[2:8]), If(r_cm_data[2:4] == 0b10, Vector.concatRev(r_cm_data[2:4], r_cm_data[4:8], r_cm_data[0:2]), If(r_cm_data[4:6] == 0b10, Vector.concatRev(r_cm_data[4:6], r_cm_data[6:8], r_cm_data[0:4]), r_cm_data))), w_cm: 1 }, doElif(~valid[3]): { fetch_write: 0b1000, w_cm_data: If(r_cm_data[0:2] == 0b11, Vector.concatRev(r_cm_data[0:2], r_cm_data[2:8]), If(r_cm_data[2:4] == 0b11, Vector.concatRev(r_cm_data[2:4], r_cm_data[4:8], r_cm_data[0:2]), If(r_cm_data[4:6] == 0b11, Vector.concatRev(r_cm_data[4:6], r_cm_data[6:8], r_cm_data[0:4]), r_cm_data))), w_cm: 1 }, doElif(miss[r_cm_data[0:2]]): { doIf(r_cm_data[0:2] == 0b00): {fetch_write: 0b0001}, doElif(r_cm_data[0:2] == 0b01): {fetch_write: 0b0010}, doElif(r_cm_data[0:2] == 0b10): {fetch_write: 0b0100}, doElif(r_cm_data[0:2] == 0b11): {fetch_write: 0b1000}, doElse(): { w_cm_data: Vector.concatRev(r_cm_data[0:2], r_cm_data[2:8]), w_cm: 1 } }, o_m_addr: write_addr_buf[2:25].extendTo(len(o_m_addr), LSB=True), o_m_read: 1 }, doElse(): { state: "WB1", doIf(r_cm_data[0:2] == 0b00): {fetch_write: 0b0001}, doElif(r_cm_data[0:2] == 0b01): {fetch_write: 0b0010}, doElif(r_cm_data[0:2] == 0b10): {fetch_write: 0b0100}, doElif(r_cm_data[0:2] == 0b11): {fetch_write: 0b1000}, w_cm_data: Vector.concatRev(r_cm_data[0:2], r_cm_data[2:8]), w_cm: 1, doIf(read_buf): {cnt_wb_r: cnt_wb_r + 1}, doElif(write_buf): {cnt_wb_w: cnt_wb_w + 1}, } }, "HIT": { w_cm: 0, write_set: 0, state: "IDLE", }, "FETCH1": { w_cm: 0, doIf(~i_m_waitrequest): { o_m_read: 0, state: "FETCH2", } }, "FETCH2": { doIf(i_m_readdata_valid): { fetch_write: 0, doIf(write_buf): { state: "FETCH3", write_set: fetch_write, }, doElif(read_buf): { state: "IDLE", o_p_readdata_valid: 1, doCase(write_addr_buf[0:2]): { 0b00: {o_p_readdata: i_m_readdata[0:32]}, 0b01: {o_p_readdata: i_m_readdata[32:64]}, 0b10: {o_p_readdata: i_m_readdata[64:96]}, 0b11: {o_p_readdata: i_m_readdata[96:128]} } } } }, "FETCH3": { state: "IDLE", write_set: 0 }, "WB1": { w_cm: 0, o_m_addr: If(fetch_write[0], wb_addr0.extendTo(len(o_m_addr), LSB=True), If(fetch_write[1], wb_addr1.extendTo(len(o_m_addr), LSB=True), If(fetch_write[2], wb_addr2.extendTo(len(o_m_addr), LSB=True), wb_addr3.extendTo(len(o_m_addr), LSB=True)))), o_m_writedata: If(fetch_write[0], readdata0, If(fetch_write[1], readdata1, If(fetch_write[2], readdata2, readdata3))), o_m_write: 1, state: "WB2" }, "WB2": { doIf(~i_m_waitrequest): { o_m_write: 0, o_m_addr: write_addr_buf[2:25].extendTo(len(o_m_addr), LSB=True), o_m_read: 1, state: "FETCH1" } } } } }) # # always @(posedge clk) begin # if(rst) begin # o_p_readdata_valid <= 0; # {o_m_read, o_m_write} <= 0; # o_m_addr <= 0; # write_addr_buf <= 0; # byte_en_buf <= 0; # writedata_buf <= 0; # {write_buf, read_buf} <= 0; # write_set <= 0; # fetch_write <= 0; # {cnt_r, cnt_w} <= 0; # {cnt_hit_r, cnt_hit_w} <= 0; # {cnt_wb_r, cnt_wb_w} <= 0; # state <= IDLE; # end # else begin # case (state) # IDLE: begin # write_set <= 0; # o_p_readdata_valid <= 0; # writedata_buf <= {i_p_writedata, i_p_writedata, i_p_writedata, i_p_writedata}; # write_addr_buf <= i_p_addr; # byte_en_buf <= i_p_byte_en; # write_buf <= i_p_write; # read_buf <= i_p_read; # if(i_p_read) begin # state <= COMP; # cnt_r <= cnt_r + 1; # end else if(i_p_write) begin # state <= COMP; # cnt_w <= cnt_w + 1; # end # end # COMP: begin # if((|hit) && write_buf) begin # state <= HIT; # write_set <= hit; # cnt_hit_w <= cnt_hit_w + 1; # w_cm_data <= (r_cm_data[1:0] == hit_num) ? {r_cm_data[1:0], r_cm_data[7:2]} : # (r_cm_data[3:2] == hit_num) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : # (r_cm_data[5:4] == hit_num) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; # w_cm <= 1; # end else if((|hit) && read_buf) begin # case(write_addr_buf[1:0]) # 2'b00: o_p_readdata <= (hit[0]) ? readdata0[31:0] : (hit[1]) ? readdata1[31:0] : (hit[2]) ? readdata2[31:0] : readdata3[31:0]; # 2'b01: o_p_readdata <= (hit[0]) ? readdata0[63:32] : (hit[1]) ? readdata1[63:32] : (hit[2]) ? readdata2[63:32] : readdata3[63:32]; # 2'b10: o_p_readdata <= (hit[0]) ? readdata0[95:64] : (hit[1]) ? readdata1[95:64] : (hit[2]) ? readdata2[95:64] : readdata3[95:64]; # 2'b11: o_p_readdata <= (hit[0]) ? readdata0[127:96] : (hit[1]) ? readdata1[127:96] : (hit[2]) ? readdata2[127:96] : readdata3[127:96]; # endcase # o_p_readdata_valid <= 1; # w_cm_data <= (r_cm_data[1:0] == hit_num) ? {r_cm_data[1:0], r_cm_data[7:2]} : # (r_cm_data[3:2] == hit_num) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : # (r_cm_data[5:4] == hit_num) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; # w_cm <= 1; # cnt_hit_r <= cnt_hit_r + 1; # state <= IDLE; # end else if(!(&valid) || miss[r_cm_data[1:0]]) begin # state <= FETCH1; # if(!valid[0]) begin # fetch_write <= 4'b0001; # w_cm_data <= 8'b11100100; # w_cm <= 1; # end else if(!valid[1]) begin # fetch_write <= 4'b0010; # w_cm_data <= (r_cm_data[1:0] == 2'b01) ? {r_cm_data[1:0], r_cm_data[7:2]} : # (r_cm_data[3:2] == 2'b01) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : # (r_cm_data[5:4] == 2'b01) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; # w_cm <= 1; # end else if(!valid[2]) begin # fetch_write <= 4'b0100; # w_cm_data <= (r_cm_data[1:0] == 2'b10) ? {r_cm_data[1:0], r_cm_data[7:2]} : # (r_cm_data[3:2] == 2'b10) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : # (r_cm_data[5:4] == 2'b10) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; # w_cm <= 1; # end else if(!valid[3]) begin # fetch_write <= 4'b1000; # w_cm_data <= (r_cm_data[1:0] == 2'b11) ? {r_cm_data[1:0], r_cm_data[7:2]} : # (r_cm_data[3:2] == 2'b11) ? {r_cm_data[3:2], r_cm_data[7:4], r_cm_data[1:0]} : # (r_cm_data[5:4] == 2'b11) ? {r_cm_data[5:4], r_cm_data[7:6], r_cm_data[3:0]} : r_cm_data; # w_cm <= 1; # end else if(miss[r_cm_data[1:0]]) begin # if(r_cm_data[1:0] == 2'b00) fetch_write <= 4'b0001; # else if(r_cm_data[1:0] == 2'b01) fetch_write <= 4'b0010; # else if(r_cm_data[1:0] == 2'b10) fetch_write <= 4'b0100; # else if(r_cm_data[1:0] == 2'b11) fetch_write <= 4'b1000; # w_cm_data <= {r_cm_data[1:0], r_cm_data[7:2]}; # w_cm <= 1; # end # o_m_addr <= {write_addr_buf[24:2], 3'b000}; # o_m_read <= 1; # end else begin # state <= WB1; # if(r_cm_data[1:0] == 2'b00) fetch_write <= 4'b0001; # else if(r_cm_data[1:0] == 2'b01) fetch_write <= 4'b0010; # else if(r_cm_data[1:0] == 2'b10) fetch_write <= 4'b0100; # else if(r_cm_data[1:0] == 2'b11) fetch_write <= 4'b1000; # w_cm_data <= {r_cm_data[1:0], r_cm_data[7:2]}; # w_cm <= 1; # if(read_buf) cnt_wb_r <= cnt_wb_r + 1; # else if(write_buf) cnt_wb_w <= cnt_wb_w + 1; # end # end # HIT: begin # w_cm <= 0; # write_set <= 0; # state <= IDLE; # end //1/13 # FETCH1: begin # w_cm <= 0; # if(!i_m_waitrequest) begin # o_m_read <= 0; # state <= FETCH2; # end # end # FETCH2: begin # if(i_m_readdata_valid) begin # fetch_write <= 0; //add 3/9 # if(write_buf) begin # state <= FETCH3; # write_set <= fetch_write; # end else if(read_buf) begin # state <= IDLE; # o_p_readdata_valid <= 1; # case(write_addr_buf[1:0]) # 2'b00: o_p_readdata <= i_m_readdata[ 31: 0]; # 2'b01: o_p_readdata <= i_m_readdata[ 63:32]; # 2'b10: o_p_readdata <= i_m_readdata[ 95:64]; # 2'b11: o_p_readdata <= i_m_readdata[127:96]; # endcase # end # end # end # FETCH3: begin # state <= IDLE; # write_set <= 0; # end # WB1: begin # w_cm <= 0; # o_m_addr <= (fetch_write[0]) ? {wb_addr0, 3'b000} : # (fetch_write[1]) ? {wb_addr1, 3'b000} : # (fetch_write[2]) ? {wb_addr2, 3'b000} : {wb_addr3, 3'b000}; # o_m_writedata <= (fetch_write[0]) ? readdata0 : # (fetch_write[1]) ? readdata1 : # (fetch_write[2]) ? readdata2 : readdata3; # o_m_write <= 1; # state <= WB2; # end # WB2: begin # if(!i_m_waitrequest) begin # o_m_write <= 0; # o_m_addr <= {write_addr_buf[24:2], 3'b000}; # o_m_read <= 1; # state <= FETCH1; # end # end # endcase // case (state) # end # end # return o_p_readdata, o_p_readdata_valid, o_p_waitrequest, o_m_addr, o_m_byte_en, o_m_writedata, o_m_read, o_m_write """ module set(clk, rst, entry, o_tag, writedata, byte_en, write, word_en, readdata, wb_addr, hit, modify, miss, valid, read_miss); parameter cache_entry = 14; input wire clk, rst; input wire [cache_entry-1:0] entry; input wire [22-cache_entry:0] o_tag; input wire [127:0] writedata; input wire [3:0] byte_en; input wire write; input wire [3:0] word_en; input wire read_miss; output wire [127:0] readdata; output wire [22:0] wb_addr; output wire hit, modify, miss, valid; wire [22-cache_entry:0] i_tag; wire dirty; wire [24-cache_entry:0] write_tag_data; assign hit = valid && (o_tag == i_tag); assign modify = valid && (o_tag != i_tag) && dirty; assign miss = !valid || ((o_tag != i_tag) && !dirty); assign wb_addr = {i_tag, entry}; //write -> [3:0] write, writedata/readdata 32bit -> 128bit simple_ram #(.width(8), .widthad(cache_entry)) ram11_3(clk, entry, write && word_en[3] && byte_en[3], writedata[127:120], entry, readdata[127:120]); simple_ram #(.width(8), .widthad(cache_entry)) ram11_2(clk, entry, write && word_en[3] && byte_en[2], writedata[119:112], entry, readdata[119:112]); simple_ram #(.width(8), .widthad(cache_entry)) ram11_1(clk, entry, write && word_en[3] && byte_en[1], writedata[111:104], entry, readdata[111:104]); simple_ram #(.width(8), .widthad(cache_entry)) ram11_0(clk, entry, write && word_en[3] && byte_en[0], writedata[103:96], entry, readdata[103:96]); simple_ram #(.width(8), .widthad(cache_entry)) ram10_3(clk, entry, write && word_en[2] && byte_en[3], writedata[95:88], entry, readdata[95:88]); simple_ram #(.width(8), .widthad(cache_entry)) ram10_2(clk, entry, write && word_en[2] && byte_en[2], writedata[87:80], entry, readdata[87:80]); simple_ram #(.width(8), .widthad(cache_entry)) ram10_1(clk, entry, write && word_en[2] && byte_en[1], writedata[79:72], entry, readdata[79:72]); simple_ram #(.width(8), .widthad(cache_entry)) ram10_0(clk, entry, write && word_en[2] && byte_en[0], writedata[71:64], entry, readdata[71:64]); simple_ram #(.width(8), .widthad(cache_entry)) ram01_3(clk, entry, write && word_en[1] && byte_en[3], writedata[63:56], entry, readdata[63:56]); simple_ram #(.width(8), .widthad(cache_entry)) ram01_2(clk, entry, write && word_en[1] && byte_en[2], writedata[55:48], entry, readdata[55:48]); simple_ram #(.width(8), .widthad(cache_entry)) ram01_1(clk, entry, write && word_en[1] && byte_en[1], writedata[47:40], entry, readdata[47:40]); simple_ram #(.width(8), .widthad(cache_entry)) ram01_0(clk, entry, write && word_en[1] && byte_en[0], writedata[39:32], entry, readdata[39:32]); simple_ram #(.width(8), .widthad(cache_entry)) ram00_3(clk, entry, write && word_en[0] && byte_en[3], writedata[31:24], entry, readdata[31:24]); simple_ram #(.width(8), .widthad(cache_entry)) ram00_2(clk, entry, write && word_en[0] && byte_en[2], writedata[23:16], entry, readdata[23:16]); simple_ram #(.width(8), .widthad(cache_entry)) ram00_1(clk, entry, write && word_en[0] && byte_en[1], writedata[15: 8], entry, readdata[15:8]); simple_ram #(.width(8), .widthad(cache_entry)) ram00_0(clk, entry, write && word_en[0] && byte_en[0], writedata[ 7: 0], entry, readdata[ 7:0]); assign write_tag_data = (read_miss) ? {1'b0, 1'b1, o_tag} : (modify || miss ) ? {1'b1, 1'b1, o_tag} : {1'b1, 1'b1, i_tag}; simple_ram #(.width(25-cache_entry), .widthad(cache_entry)) ram_tag(clk, entry, write, write_tag_data, entry, {dirty, valid, i_tag}); `ifdef SIM integer i; initial begin for(i = 0; i <=(2**cache_entry-1); i=i+1) begin ram_tag.mem[i] = 0; end end `endif endmodule """ def Set(clk, entry, o_tag, writedata, byte_en, write, word_en, read_miss): cache_entry = 14 # # input wire clk, rst; # input wire [cache_entry-1:0] entry; # input wire [22-cache_entry:0] o_tag; # input wire [127:0] writedata; # input wire [3:0] byte_en; # input wire write; # input wire [3:0] word_en; # input wire read_miss; # # output wire [127:0] readdata; # output wire [22:0] wb_addr; # output wire hit, modify, miss, valid; # # # # wire [22-cache_entry:0] i_tag; i_tag = FeedbackVector(0, 23 - cache_entry) # wire dirty; dirty = FeedbackVector(0) # wire [24-cache_entry:0] write_tag_data; valid = FeedbackVector(0) # hit = valid & (o_tag == i_tag) modify = valid & (o_tag != i_tag) & dirty miss = ~ valid & ((o_tag != i_tag) & ~ dirty) # wb_addr = Vector.concatRev(i_tag, entry) # //write -> [3:0] write, writedata/readdata 32bit -> 128bit # simple_ram #(.width(8), .widthad(cache_entry)) ram11_3(clk, entry, write && word_en[3] && byte_en[3], writedata[127:120], entry, readdata[127:120]); write_word_en_3 = write & word_en[3] ram11_3 = simple_ram(clk, entry, writedata[120:127], write_word_en_3 & byte_en[3]) # simple_ram #(.width(8), .widthad(cache_entry)) ram11_2(clk, entry, write && word_en[3] && byte_en[2], writedata[119:112], entry, readdata[119:112]); ram11_2 = simple_ram(clk, entry, writedata[112:120], write_word_en_3 & byte_en[2]) # simple_ram #(.width(8), .widthad(cache_entry)) ram11_1(clk, entry, write && word_en[3] && byte_en[1], writedata[111:104], entry, readdata[111:104]); ram11_1 = simple_ram(clk, entry, writedata[104:112], write_word_en_3 & byte_en[1]) # simple_ram #(.width(8), .widthad(cache_entry)) ram11_0(clk, entry, write && word_en[3] && byte_en[0], writedata[103:96], entry, readdata[103:96]); ram11_0 = simple_ram(clk, entry, writedata[96:104], write_word_en_3 & byte_en[0]) # # simple_ram #(.width(8), .widthad(cache_entry)) ram10_3(clk, entry, write && word_en[2] && byte_en[3], writedata[95:88], entry, readdata[95:88]); write_word_en_2 = write & word_en[2] ram10_3 = simple_ram(clk, entry, writedata[88:96], write_word_en_2 & byte_en[3]) # simple_ram #(.width(8), .widthad(cache_entry)) ram10_2(clk, entry, write && word_en[2] && byte_en[2], writedata[87:80], entry, readdata[87:80]); ram10_2 = simple_ram(clk, entry, writedata[80:88], write_word_en_2 & byte_en[2]) # simple_ram #(.width(8), .widthad(cache_entry)) ram10_1(clk, entry, write && word_en[2] && byte_en[1], writedata[79:72], entry, readdata[79:72]); ram10_1 = simple_ram(clk, entry, writedata[72:80], write_word_en_2 & byte_en[1]) # simple_ram #(.width(8), .widthad(cache_entry)) ram10_0(clk, entry, write && word_en[2] && byte_en[0], writedata[71:64], entry, readdata[71:64]); ram10_0 = simple_ram(clk, entry, writedata[64:72], write_word_en_2 & byte_en[0]) # # simple_ram #(.width(8), .widthad(cache_entry)) ram01_3(clk, entry, write && word_en[1] && byte_en[3], writedata[63:56], entry, readdata[63:56]); write_word_en_1 = write & word_en[1] ram01_3 = simple_ram(clk, entry, writedata[56:64], write_word_en_1 & byte_en[3]) # simple_ram #(.width(8), .widthad(cache_entry)) ram01_2(clk, entry, write && word_en[1] && byte_en[2], writedata[55:48], entry, readdata[55:48]); ram01_2 = simple_ram(clk, entry, writedata[48:56], write_word_en_1 & byte_en[2]) # simple_ram #(.width(8), .widthad(cache_entry)) ram01_1(clk, entry, write && word_en[1] && byte_en[1], writedata[47:40], entry, readdata[47:40]); ram01_1 = simple_ram(clk, entry, writedata[40:48], write_word_en_1 & byte_en[1]) # simple_ram #(.width(8), .widthad(cache_entry)) ram01_0(clk, entry, write && word_en[1] && byte_en[0], writedata[39:32], entry, readdata[39:32]); ram01_0 = simple_ram(clk, entry, writedata[32:40], write_word_en_1 & byte_en[0]) # # simple_ram #(.width(8), .widthad(cache_entry)) ram00_3(clk, entry, write && word_en[0] && byte_en[3], writedata[31:24], entry, readdata[31:24]); write_word_en_0 = write & word_en[0] ram00_3 = simple_ram(clk, entry, writedata[24:32], write_word_en_0 & byte_en[3]) # simple_ram #(.width(8), .widthad(cache_entry)) ram00_2(clk, entry, write && word_en[0] && byte_en[2], writedata[23:16], entry, readdata[23:16]); ram00_2 = simple_ram(clk, entry, writedata[16:24], write_word_en_0 & byte_en[2]) # simple_ram #(.width(8), .widthad(cache_entry)) ram00_1(clk, entry, write && word_en[0] && byte_en[1], writedata[15: 8], entry, readdata[15:8]); ram00_1 = simple_ram(clk, entry, writedata[8:16], write_word_en_0 & byte_en[1]) # simple_ram #(.width(8), .widthad(cache_entry)) ram00_0(clk, entry, write && word_en[0] && byte_en[0], writedata[ 7: 0], entry, readdata[ 7:0]); ram00_0 = simple_ram(clk, entry, writedata[0: 8], write_word_en_0 & byte_en[0]) readdata = Vector.concatRev(ram00_0, ram00_1, ram00_2, ram00_3, ram01_0, ram01_1, ram01_2, ram01_3, ram10_0, ram10_1, ram10_2, ram10_3, ram11_0, ram11_1, ram11_2, ram11_3) # # write_tag_data = If(modify | miss, If(read_miss, Vector.concatRev(VFalse, VTrue, o_tag), Vector.concatRev(VTrue, VTrue, o_tag)), Vector.concatRev(VTrue, VTrue, i_tag)) # simple_ram #(.width(25-cache_entry), .widthad(cache_entry)) ram_tag(clk, entry, write, write_tag_data, entry, {dirty, valid, i_tag}); ram_tag = simple_ram(clk, entry, write_tag_data, write) dirty.connect(ram_tag[0]) valid.connect(ram_tag[1]) i_tag.connect(ram_tag[2:]) # # `ifdef SIM # integer i; # # initial begin # for(i = 0; i <=(2**cache_entry-1); i=i+1) begin # ram_tag.mem[i] = 0; # end # end # `endif # # endmodule return readdata, wb_addr, hit, modify, miss, valid
# The superclass to implement selection operators. # It is an abstract class. class SelectionOperator: # Constructor # name: name of the selection operator def __init__(self, name: str="Unspecified selection operator"): self.name = name; # Accessor on the name of the operator def getName(self) -> str: return self.name; # Select a good individual from anIndividualSet def select(self, anIndividualSet): return self.selectGood(anIndividualSet); # Select a good individual from anIndividualSet # Useful for a steady-state EA def selectGood(self, anIndividualSet): return self.__select__(anIndividualSet, True); # Select a bad individual from anIndividualSet # Useful for a steady-state EA def selectBad(self, anIndividualSet): return self.__select__(anIndividualSet, False); # Run this method once per generation, before any selection is done. Useful for ranking the individuals def preProcess(self, anIndividualSet): raise NotImplementedError("Subclasses should implement this!") # Abstract method to perform the actual selection def __select__(self, anIndividualSet, aFlag): # aFlag == True for selecting good individuals, # aFlag == False for selecting bad individuals, raise NotImplementedError("Subclasses should implement this!") # Method used for print() def __str__(self) -> str: return "name:\t\"" + self.name + "\"";
z1,z2,z3=map(int,input().split()) w=(z1/2)*(2*z2+(z1-1)*z3) print(int(w))
""" Given an array nums of integers, return how many of them contain an even number of digits. Example 1: Input: nums = [12,345,2,6,7896] Output: 2 Explanation: 12 contains 2 digits (even number of digits). 345 contains 3 digits (odd number of digits). 2 contains 1 digit (odd number of digits). 6 contains 1 digit (odd number of digits). 7896 contains 4 digits (even number of digits). Therefore only 12 and 7896 contain an even number of digits. """ from typing import List class Solution: def findNumbers(self, nums : List[int]) -> int: return sum(len(str(number)) % 2 == 0 for number in nums) if __name__ == "__main__": my_solution = Solution() print(my_solution.findNumbers([12, 345, 2, 6, 7896])) #print(sum([1,1,2,3]))
#!/usr/bin/env python2 # -*- coding: utf-8 -*- # Apriori #Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd #Importing the DataSet dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None) transactions = [] for i in range(0,7501): transactions.append([str(dataset.values[i,j]) for j in range(0,20)]) #Training Aprior on the dataset from apyori import apriori rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2,min_lift = 3, min_length = 2) #para calcular o valor de min_support #3= a quantidade do produto vendido por dia multiplicado por 7 = numero de dias da semana dividido 7500 que 'e o numero de transaÇoes que temos no banco. #3*7/7500 = 0.0028 # min_confidence = 0.2 = 20% # Visualising the results results = list(rules) results_list = [] for i in range(0, len(results)): results_list.append('RULE:\t' + str(results[i][0]) + '\nSUPPORT:\t' + str(results[i][1])) results_list_1 = [] for i in range(0, len(results)): results_list_1.append([str(results[i][0]), str(results[i][1]), str(results[i][2][0][2]), str(results[i][2][0][3])]) results_list_1 = pd.DataFrame(data=results_list_1,columns=['RULE','SUPPORT','CONFIDENCE','LIFT'])
import sunspec2.mdef as mdef import json import copy import pytest def test_to_int(): assert mdef.to_int('4') == 4 assert isinstance(mdef.to_int('4'), int) assert isinstance(mdef.to_int(4.0), int) def test_to_str(): assert mdef.to_str(4) == '4' assert isinstance(mdef.to_str('4'), str) def test_to_float(): assert mdef.to_float('4') == 4.0 assert isinstance(mdef.to_float('4'), float) assert mdef.to_float('z') is None def test_to_number_type(): assert mdef.to_number_type('4') == 4 assert mdef.to_number_type('4.0') == 4.0 assert mdef.to_number_type('z') == 'z' def test_validate_find_point(): with open('sunspec2/models/json/model_702.json') as f: model_json = json.load(f) assert mdef.validate_find_point(model_json['group'], 'ID') == model_json['group']['points'][0] assert mdef.validate_find_point(model_json['group'], 'abc') is None def test_validate_attrs(): with open('sunspec2/models/json/model_701.json') as f: model_json = json.load(f) # model assert mdef.validate_attrs(model_json, mdef.model_attr) == '' model_unexp_attr_err = copy.deepcopy(model_json) model_unexp_attr_err['abc'] = 'def' assert mdef.validate_attrs(model_unexp_attr_err, mdef.model_attr)[0:37] == 'Unexpected model definition attribute' model_unexp_type_err = copy.deepcopy(model_json) model_unexp_type_err['id'] = '701' assert mdef.validate_attrs(model_unexp_type_err, mdef.model_attr)[0:15] == 'Unexpected type' model_attr_missing = copy.deepcopy(model_json) del model_attr_missing['id'] assert mdef.validate_attrs(model_attr_missing, mdef.model_attr)[0:27] == 'Mandatory attribute missing' # group assert mdef.validate_attrs(model_json['group'], mdef.group_attr) == '' group_unexp_attr_err = copy.deepcopy(model_json)['group'] group_unexp_attr_err['abc'] = 'def' assert mdef.validate_attrs(group_unexp_attr_err, mdef.group_attr)[0:37] == 'Unexpected model definition attribute' group_unexp_type_err = copy.deepcopy(model_json)['group'] group_unexp_type_err['name'] = 1 assert mdef.validate_attrs(group_unexp_type_err, mdef.group_attr)[0:15] == 'Unexpected type' group_attr_missing = copy.deepcopy(model_json)['group'] del group_attr_missing['name'] assert mdef.validate_attrs(group_attr_missing, mdef.group_attr)[0:27] == 'Mandatory attribute missing' # point assert mdef.validate_attrs(model_json['group']['points'][0], mdef.point_attr) == '' point_unexp_attr_err = copy.deepcopy(model_json)['group']['points'][0] point_unexp_attr_err['abc'] = 'def' assert mdef.validate_attrs(point_unexp_attr_err, mdef.point_attr)[0:37] == 'Unexpected model definition attribute' point_unexp_type_err = copy.deepcopy(model_json)['group']['points'][0] point_unexp_type_err['name'] = 1 assert mdef.validate_attrs(point_unexp_type_err, mdef.point_attr)[0:15] == 'Unexpected type' point_unexp_value_err = copy.deepcopy(model_json)['group']['points'][1] point_unexp_value_err['access'] = 'z' assert mdef.validate_attrs(point_unexp_value_err, mdef.point_attr)[0:16] == 'Unexpected value' point_attr_missing = copy.deepcopy(model_json)['group']['points'][0] del point_attr_missing['name'] assert mdef.validate_attrs(point_attr_missing, mdef.point_attr)[0:27] == 'Mandatory attribute missing' # symbol assert mdef.validate_attrs(model_json['group']['points'][2]['symbols'][0], mdef.symbol_attr) == '' symbol_unexp_attr_err = copy.deepcopy(model_json)['group']['points'][2]['symbols'][0] symbol_unexp_attr_err['abc'] = 'def' assert mdef.validate_attrs(symbol_unexp_attr_err, mdef.symbol_attr)[0:37] == 'Unexpected model definition attribute' symbol_unexp_type_err = copy.deepcopy(model_json)['group']['points'][2]['symbols'][0] symbol_unexp_type_err['name'] = 1 assert mdef.validate_attrs(symbol_unexp_type_err, mdef.symbol_attr)[0:15] == 'Unexpected type' symbol_attr_missing = copy.deepcopy(model_json)['group']['points'][2]['symbols'][0] del symbol_attr_missing['name'] assert mdef.validate_attrs(symbol_attr_missing, mdef.symbol_attr)[0:27] == 'Mandatory attribute missing' def test_validate_group_point_dup(): with open('sunspec2/models/json/model_704.json') as f: model_json = json.load(f) assert mdef.validate_group_point_dup(model_json['group']) == '' dup_group_id_model = copy.deepcopy(model_json) dup_group_id_group = dup_group_id_model['group'] dup_group_id_group['groups'][0]['name'] = 'PFWInjRvrt' assert mdef.validate_group_point_dup(dup_group_id_group)[0:18] == 'Duplicate group id' dup_group_point_id_model = copy.deepcopy(model_json) dup_group_point_id_group = dup_group_point_id_model['group'] dup_group_point_id_group['groups'][0]['name'] = 'PFWInjEna' assert mdef.validate_group_point_dup(dup_group_point_id_group)[0:28] == 'Duplicate group and point id' mand_attr_miss_model = copy.deepcopy(model_json) mand_attr_miss_group = mand_attr_miss_model['group'] del mand_attr_miss_group['groups'][0]['name'] assert mdef.validate_group_point_dup(mand_attr_miss_group)[0:32] == 'Mandatory name attribute missing' dup_point_id_model = copy.deepcopy(model_json) dup_point_id_group = dup_point_id_model['group'] dup_point_id_group['points'][1]['name'] = 'ID' assert mdef.validate_group_point_dup(dup_point_id_group)[0:30] == 'Duplicate point id ID in group' mand_attr_miss_point_model = copy.deepcopy(model_json) mand_attr_miss_point_group = mand_attr_miss_point_model['group'] del mand_attr_miss_point_group['points'][1]['name'] assert mdef.validate_group_point_dup(mand_attr_miss_point_group)[0:55] == 'Mandatory attribute missing in point ' \ 'definition element' def test_validate_symbols(): symbols = [ {'name': 'CAT_A', 'value': 1}, {'name': 'CAT_B', 'value': 2} ] assert mdef.validate_symbols(symbols, mdef.symbol_attr) == '' def test_validate_sf(): with open('sunspec2/models/json/model_702.json') as f: model_json = json.load(f) model_point = model_json['group']['points'][2] model_group = model_json['group'] model_group_arr = [model_group, model_group] assert mdef.validate_sf(model_point, 'W_SF', model_group_arr) == '' not_sf_type_model = copy.deepcopy(model_json) not_sf_type_point = not_sf_type_model['group']['points'][2] not_sf_type_group = not_sf_type_model['group'] not_sf_type_group_arr = [not_sf_type_group, not_sf_type_group] for point in not_sf_type_model['group']['points']: if point['name'] == 'W_SF': point['type'] = 'abc' assert mdef.validate_sf(not_sf_type_point, 'W_SF', not_sf_type_group_arr)[0:60] == 'Scale factor W_SF for point ' \ 'WMaxRtg is not scale factor ' \ 'type' sf_not_found_model = copy.deepcopy(model_json) sf_not_found_point = sf_not_found_model['group']['points'][2] sf_not_found_group = sf_not_found_model['group'] sf_not_found_group_arr = [sf_not_found_group, sf_not_found_group] assert mdef.validate_sf(sf_not_found_point, 'ABC', sf_not_found_group_arr)[0:44] == 'Scale factor ABC for point ' \ 'WMaxRtg not found' sf_out_range_model = copy.deepcopy(model_json) sf_out_range_point = sf_out_range_model['group']['points'][2] sf_out_range_group = sf_out_range_model['group'] sf_out_range_group_arr = [sf_out_range_group, sf_out_range_group] assert mdef.validate_sf(sf_out_range_point, 11, sf_out_range_group_arr)[0:46] == 'Scale factor 11 for point ' \ 'WMaxRtg out of range' sf_invalid_type_model = copy.deepcopy(model_json) sf_invalid_type_point = sf_invalid_type_model['group']['points'][2] sf_invalid_type_group = sf_invalid_type_model['group'] sf_invalid_type_group_arr = [sf_invalid_type_group, sf_invalid_type_group] assert mdef.validate_sf(sf_invalid_type_point, 4.0, sf_invalid_type_group_arr)[0:51] == 'Scale factor 4.0 for' \ ' point WMaxRtg has ' \ 'invalid type' def test_validate_point_def(): with open('sunspec2/models/json/model_702.json') as f: model_json = json.load(f) model_group = model_json['group'] group = model_json['group'] point = model_json['group']['points'][0] assert mdef.validate_point_def(point, model_group, group) == '' unk_point_type_model = copy.deepcopy(model_json) unk_point_type_model_group = unk_point_type_model['group'] unk_point_type_group = unk_point_type_model['group'] unk_point_type_point = unk_point_type_model['group']['points'][0] unk_point_type_point['type'] = 'abc' assert mdef.validate_point_def(unk_point_type_point, unk_point_type_model_group, unk_point_type_group)[0:35] == 'Unknown point type abc for point ID' dup_symbol_model = copy.deepcopy(model_json) dup_symbol_model_group = dup_symbol_model['group'] dup_symbol_group = dup_symbol_model['group'] dup_symbol_point = dup_symbol_model['group']['points'][21] dup_symbol_point['symbols'][0]['name'] = 'CAT_B' assert mdef.validate_point_def(dup_symbol_point, dup_symbol_model_group, dup_symbol_group)[0:19] == 'Duplicate symbol id' mand_attr_missing = copy.deepcopy(model_json) mand_attr_missing_model_group = mand_attr_missing['group'] mand_attr_missing_group = mand_attr_missing['group'] mand_attr_missing_point = mand_attr_missing['group']['points'][0] del mand_attr_missing_point['name'] assert mdef.validate_point_def(mand_attr_missing_point, mand_attr_missing_model_group, mand_attr_missing_group)[0:27] == 'Mandatory attribute missing' def test_validate_group_def(): with open('sunspec2/models/json/model_702.json') as f: model_json = json.load(f) assert mdef.validate_group_def(model_json['group'], model_json['group']) == '' def test_validate_model_group_def(): with open('sunspec2/models/json/model_702.json') as f: model_json = json.load(f) assert mdef.validate_model_group_def(model_json, model_json['group']) == '' missing_id_model = copy.deepcopy(model_json) missing_id_group = missing_id_model['group'] missing_id_group['points'][0]['name'] = 'abc' assert mdef.validate_model_group_def(missing_id_model, missing_id_group)[0:41] == 'First point in top-level' \ ' group must be ID' wrong_model_id_model = copy.deepcopy(model_json) wrong_model_id_group = wrong_model_id_model['group'] wrong_model_id_group['points'][0]['value'] = 0 assert mdef.validate_model_group_def(wrong_model_id_model, wrong_model_id_group)[0:42] == 'Model ID does not ' \ 'match top-level group ID' missing_len_model = copy.deepcopy(model_json) missing_len_group = missing_len_model['group'] missing_len_group['points'][1]['name'] = 'abc' assert mdef.validate_model_group_def(missing_len_model, missing_len_group)[0:41] == 'Second point in top-level ' \ 'group must be L' missing_two_p_model = copy.deepcopy(model_json) missing_two_p_group = missing_two_p_model['group'] missing_two_p_point = missing_two_p_group['points'][0] del missing_two_p_group['points'] missing_two_p_group['points'] = [missing_two_p_point] assert mdef.validate_model_group_def(missing_two_p_model, missing_two_p_group)[0:48] == 'Top-level group must' \ ' contain at least two ' \ 'points' missing_p_def_model = copy.deepcopy(model_json) missing_p_def_group = missing_p_def_model['group'] del missing_p_def_group['points'] assert mdef.validate_model_group_def(missing_p_def_model, missing_p_def_group)[0:41] == 'Top-level group' \ ' missing point definitions' def test_validate_model_def(): with open('sunspec2/models/json/model_702.json') as f: model_json = json.load(f) assert mdef.validate_model_def(model_json) == '' def test_from_json_str(): with open('sunspec2/models/json/model_63001.json') as f: model_json = json.load(f) model_json_str = json.dumps(model_json) assert isinstance(mdef.from_json_str(model_json_str), dict) def test_from_json_file(): assert isinstance(mdef.from_json_file('sunspec2/models/json/model_63001.json'), dict) def test_to_json_str(): with open('sunspec2/models/json/model_63001.json') as f: model_json = json.load(f) assert isinstance(mdef.to_json_str(model_json), str) def test_to_json_filename(): assert mdef.to_json_filename('63001') == 'model_63001.json' def test_to_json_file(tmp_path): with open('sunspec2/models/json/model_63001.json') as f: model_json = json.load(f) mdef.to_json_file(model_json, filedir=tmp_path) with open(tmp_path / 'model_63001.json') as f: model_json = json.load(f) assert isinstance(model_json, dict) def test_model_filename_to_id(): assert mdef.model_filename_to_id('model_00077.json') == 77 with pytest.raises(Exception) as exc: mdef.model_filename_to_id('model_abc.json') assert 'Error extracting model id from filename' in str(exc.value)
#This Is Calculator Projects import os import math print("Welcome To My First Simple Project(Calculator using Python3)") list_menu = ["Addition", "Substraction", "Multiplication", "Division", "Modulo", "Raising to a power", "Square root", "Logarithm", "Sine", "Cosin", "Tangent"] # Menus def choose_menu(): os.system('clear') x=0 for i in list_menu: print(x, i) x +=1 choose = int(input("\nInsert number to select your operation : ")) return choose # Choose The Number def calculator_function(number): if number >10 : choose_menu() else: val1 = int(input("\nInsert Val1 : ")) val2 = int(input("\nInsert Val1 : ")) #Addition if number == 0 : print("\nThe result is : ",val1,"+", val2, "=", val1+val2, "\n") back = input("\nRecount? (y =recount; n = main menu; q=quit )") if back=='y': calculator_function(number) elif back=='n': print("\n") a=choose_menu() calculator_function(a) else: print("See You next later") #run a = choose_menu() calculator_function(a)
import copy def one_hot(x, len): a = [] for i in range(len): if i == x: a.append(1) else: a.append(0) return a def normal_1(a): b = copy.deepcopy(a) sum = 0 for i in b: sum += i if sum == 0: print('divide zero error') exit(1) for i in range(len(a)): b[i] /= sum return b def average(a, b, alpha): return a * (1 - alpha) + b * alpha
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __version__ = '1.0.1' delete_nvl_circle_element_query = """ UPDATE public.nvl_circle AS ncr SET deleted = TRUE, active = FALSE WHERE ($1::BIGINT is NULL OR npg.user_id = $1::BIGINT) AND ncr.id = $2::BIGINT RETURNING *; """ # delete_nvl_circle_element_by_location_id_query = """ # UPDATE public.nvl_circle AS ncr SET deleted = TRUE, # active = FALSE WHERE ($1::BIGINT is NULL OR ncr.user_id = $1::BIGINT) AND ncr.location_id = $2::BIGINT RETURNING *; # """ delete_nvl_circle_element_by_location_id_query = """ DELETE FROM public.nvl_circle AS ncr WHERE ($1::BIGINT is NULL OR ncr.user_id = $1::BIGINT) AND ncr.location_id = $2::BIGINT RETURNING *; """
# Generated by Django 2.0 on 2018-01-30 15:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('karbar', '0003_auto_20180130_1513'), ] operations = [ migrations.AlterField( model_name='myuser', name='user_type', field=models.IntegerField(choices=[(3, 'madadju'), (2, 'hamyar'), (0, 'modir'), (1, 'madadkar')]), ), ]
from flask_wtf import FlaskForm from wtforms import * from wtforms.validators import * from wtforms.widgets import HiddenInput from app.models import Category def unique_create_name(form, field): if field.data: if Category.query.filter_by(name=field.data).first(): raise ValidationError(f"A category with name '{field.data}' already exists.") else: field.data = None def unique_update_name(form, field): if field.data: if Category.query.filter(Category.id!=form.id.data).filter_by(name=field.data).first(): raise ValidationError(f"A category with name '{field.data}' already exists.") else: field.data = None class CreateCategoryForm(FlaskForm): name = StringField(validators=[DataRequired(), unique_create_name]) class UpdateCategoryForm(FlaskForm): id = IntegerField(validators=[DataRequired()], widget=HiddenInput()) name = StringField(validators=[DataRequired(), unique_update_name])
import pyterrier as pt import unittest import os import shutil import tempfile class TestTRECIndexer(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestTRECIndexer, self).__init__(*args, **kwargs) if not pt.started(): pt.init(logging="DEBUG") # else: # pt.setup_logging("DEBUG") self.here = os.path.dirname(os.path.realpath(__file__)) def setUp(self): # Create a temporary directory self.test_dir = tempfile.mkdtemp() def tearDown(self): # Remove the directory after the test shutil.rmtree(self.test_dir) def test_TREC_indexing(self): indexer = pt.TRECCollectionIndexer(self.test_dir) indexRef = indexer.index(pt.Utils.get_files_in_dir(self.here + "/fixtures/vaswani_npl/corpus/")) self.assertIsNotNone(indexRef) index = pt.IndexFactory.of(indexRef) self.assertEqual(11429, index.getCollectionStatistics().getNumberOfDocuments()) self.assertTrue(os.path.isfile(self.test_dir + '/data.direct.bf')) def test_TREC_indexing_singlepass(self): indexer = pt.TRECCollectionIndexer(self.test_dir, type=pt.IndexingType.SINGLEPASS) indexRef = indexer.index(pt.Utils.get_files_in_dir(self.here + "/fixtures/vaswani_npl/corpus/")) self.assertIsNotNone(indexRef) index = pt.IndexFactory.of(indexRef) self.assertEqual(11429, index.getCollectionStatistics().getNumberOfDocuments()) self.assertFalse(os.path.isfile(self.test_dir + '/data.direct.bf'))
__author__ = 'Supa'
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Foursquare venue scraper """ from scraper import BaseVenueScraper import foursquare import json # Foursquare venue scraper class Scraper_4SQVenues(BaseVenueScraper): source_name = '4sq' def __init__(self): """ Initialises foursquare api library and reads categories """ # init base super(Scraper_4SQVenues, self).__init__() # create 4sq api wrapper object self.fs = foursquare.Foursquare( client_id=self.config['client_id'], client_secret=self.config['client_secret'] ) # build category cache self._categories = self.get_categories() def get_categories(self): """ Queries foursquare categories and builds hierarchical structure of dicts containing category name and id as keys for faster navigation """ def proc(data, cats={}): for cat in data.get('categories') or []: cats[cat['name']] = proc(cat, {'_id': cat['id']}) return cats return proc(self.fs.venues.categories()) def get_category_id(self, cat): """ Finds category id. param cat is list of hierarchical category names """ cats = self._categories for name in cat: cats = cats[name] return cats['_id'] def transform_data(self, data, **kw): for venue in data.get('venues') or []: row = dict.fromkeys(self.staging_fields) loc = venue['location'] row.update( id=venue['id'], name=venue['name'], lat=loc['lat'], lng=loc['lng'], zip=loc.get('postalCode'), address=' '.join(loc.get('formattedAddress', [])), phone=venue['contact'].get('formattedPhone') ) row.update(kw) yield row def get_data(self, area, category, key_category): self.log.info("scraping area: %s, category: %s" % (area, category)) category_id = self.get_category_id(json.loads(category)) self.log.debug("categoryId: %s" % category_id) params = dict( json.loads(area), intent='browse', limit=self.config.get("venue_limit", '100'), categoryId=category_id ) data = self.fs.venues.search(params=params) self.log.info("%s venues found" % len(data.get("venues", []))) return self.transform_data(data, key_category=key_category) if __name__ == '__main__': scraper = Scraper_4SQVenues() scraper.run()
def wdm(talk): return ' '.join(x for x in talk.split() if x not in ('puke','hiccup')) ''' Fortunately last weekend, I met an utterly drunk old man. He was too drunk to be aggressive towards me. He was letting everything what he held out, from both his mind and his stomach. Although i was a bit uncomfortable, the old man's broken wisdom words caught my attention. But his talk was not continuous as it was frequently interrupted by an involuntary contractions 'puke' and 'hiccup' . Now i am hiring you to clean up his 'puke' and 'hiccup' and tell me the old man's wisdom words. Because drunk man also needs to take a pause and take a deep breath, you have to remove those pauses (redundant/unnecessary spaces). '''
import socket import sys import time HOST = socket.gethostname() PORT = 5001 client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) client_socket.connect((HOST,PORT)) print(""" Welcome to file share server Choose a option from the menu To download a file enter: D To list all the files available enter: L To upload a file enter: U To quit: Q """) while True: command = input('> ') if command == 'Q': sys.exit() if command == 'L': client_socket.send(command.encode()) data = client_socket.recv(1024) for file in data.decode('utf-8').split(','): #if file ending mp3 bla bla bla print(file) if command == 'D': filename = input('enter the name of the file you want to download\n> ') client_socket.send(filename.encode()) data = client_socket.recv(1024) if data[:11].decode('utf-8') == 'File exists': filesize = data[11:].decode() message = input(f'File exists, {str(filesize)} Bytes, download? (Y/N)? -> ') if message == 'Y': client_socket.send('OK'.encode('utf-8')) f = open('new_' + filename, 'wb') data =client_socket.recv(1024) totalRecv = len(data) f.write(data) while totalRecv < int(filesize): data = client_socket.recv(1024) totalRecv += len(data) f.write(data) print("{0:.2f}".format((totalRecv/float(filesize))* 100)+\ "% Done") time.sleep(0.1) print('Download complete!') else: print('File does not exist') print('Bye. Welcome back!') client_socket.close()
import unittest from katas.beta.string_to_list_of_integers import string_to_int_list class StringToIntegerListTestCase(unittest.TestCase): def test_equal_1(self): self.assertEqual(string_to_int_list('1,2,3,4,5'), [1, 2, 3, 4, 5]) def test_equal_2(self): self.assertEqual(string_to_int_list('21,12,23,34,45'), [21, 12, 23, 34, 45]) def test_equal_3(self): self.assertEqual(string_to_int_list('-1,-2,3,-4,-5'), [-1, -2, 3, -4, -5]) def test_equal_4(self): self.assertEqual(string_to_int_list('1,2,3,,,4,,5,,,'), [1, 2, 3, 4, 5]) def test_equal_5(self): self.assertEqual(string_to_int_list(',,,,,1,2,3,,,4,,5,,,'), [1, 2, 3, 4, 5]) def test_equal_6(self): self.assertEqual(string_to_int_list(''), []) def test_equal_7(self): self.assertEqual(string_to_int_list(',,,,,,,,'), [])
import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras import sys, os sys.path.append(os.getcwd()) from utils.data import time, series, plot_series from utils.prepDataset import create_window_dataset, create_seq2seq_window_dataset from utils.modelForecast import model_forecast split_time = 1000 time_train = time[:split_time] # From element 0 to element 999 value_train = series[:split_time] time_valid = time[split_time:] # From element 1000 to 1460 value_valid = series[split_time:] tf.random.set_seed(42) np.random.seed(42) window_size = 30 # Shape of a batch [[1,2,3, ...], [...]] train_set = create_window_dataset(value_train, window_size, batch_size=128) valid_set = create_window_dataset(value_valid, window_size, batch_size=128) seq_train_set = create_seq2seq_window_dataset(value_train, window_size, batch_size=128) seq_valid_set = create_seq2seq_window_dataset(value_valid, window_size, batch_size=128) # for X_batch, Y_batch in create_window_dataset(tf.range(10), 3, # batch_size=1): # print("X:", X_batch.numpy()) # print("Y:", Y_batch.numpy()) # Output: X: [[4 5 6]] # Y: [7] def getPlotToFindTheBestLearningRate(model, train_set): lr_schedule = keras.callbacks.LearningRateScheduler( lambda epoch: 1e-7 * 10**(epoch / 20)) optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9) model.compile(loss=keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) history = model.fit(train_set, epochs=100, callbacks=[lr_schedule]) plt.semilogx(history.history["lr"], history.history["loss"]) plt.axis([1e-7, 1e-4, 0, 30]) plt.show() simple_model = keras.models.Sequential([ keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1), # Add another dimension to the input input_shape=[None]), # Now a batch has shape [ [ [1], [2], ... ] ] keras.layers.SimpleRNN(100, return_sequences=True), keras.layers.SimpleRNN(100), keras.layers.Dense(1), keras.layers.Lambda(lambda x: x * 200.0) # Because the value of labels ranging from 40 -> 120 and the # output of the last Dense layer is between -1 and 1, we just # scale the final output so that the network can easily # compare it with the labels. You can choose some large number # like 120, 180, or 220 but here 200 is a good choice # as it leads to more accurate model ]) # getPlotToFindTheBestLearningRate(simple_model, train_set) # Making prediction and caculate the loss at every time step (sequence to sequence model) helps to improve # training speed seqToseqModel = keras.models.Sequential([ keras.layers.SimpleRNN(100, return_sequences=True, input_shape=[None, 1]), keras.layers.SimpleRNN(100, return_sequences=True), keras.layers.Dense(1), keras.layers.Lambda(lambda x: x * 200.0) ]) # getPlotToFindTheBestLearningRate(seqToseqModel, seq_train_set) def trainSimpleModel(): optimizer = keras.optimizers.SGD(lr=1e-6, momentum=0.9) model = simple_model model.compile(loss=keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) early_stopping = keras.callbacks.EarlyStopping(patience=50) model_checkpoint = keras.callbacks.ModelCheckpoint( os.getcwd() + "/RNN forecast/checkpoint.h5", save_best_only=True) model.fit(train_set, epochs=500, validation_data=valid_set, callbacks=[early_stopping, model_checkpoint]) rnn_forecast = model_forecast( model, # Must be split_time- window_size because we want to forecast from the element [1000] and # the model_forecast function split the series into many batches to fit with the model series[split_time - window_size:-1], window_size)[:, 0] plt.figure(figsize=(10, 6)) plot_series(time_valid, value_valid) plot_series(time_valid, rnn_forecast) plt.show() print(keras.metrics.mean_absolute_error(value_valid, rnn_forecast).numpy()) # for X_batch, Y_batch in create_seq2seq_window_dataset(tf.range(10), 3, # batch_size=1): # print("X:", X_batch.numpy()) # print("Y:", Y_batch.numpy()) def trainSeqToSeqModel(): optimizer = keras.optimizers.SGD(lr=5*1e-6, momentum=0.9) model = seqToseqModel model.compile(loss=keras.losses.Huber(), optimizer=optimizer, metrics=["mae"]) early_stopping = keras.callbacks.EarlyStopping(patience=10) model_checkpoint = keras.callbacks.ModelCheckpoint( os.getcwd() + "/RNN forecast/seq_checkpoint.h5", save_best_only=True) model.fit(seq_train_set, epochs=500, validation_data=seq_valid_set, callbacks=[early_stopping, model_checkpoint]) # Series[..., np.newaxis] add one more dimension to series. For example: [1,2,3,4] -> [[1],[2],[3],[4]] # Series[..., np.newaxis] is equivalent to tf.expand_dims(series, axis=-1) # We must preprocess series by using series[..., np.newaxis] because seqToSeqModel use preprocessed data with one more dimension rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size) print(series[..., np.newaxis]) # Arr[1,2,3] is equivalent to arr[1][2][3] # (split_time - window_size : -1): Get the value in range (the array holding predicted values from the element [1000]) # Because the window element [split_time - window_size = 970] holding values from [970 - 999] # the rnn_forecast[split_time - window_size] holding predicted value of days [971 -> 1000]. In the same # way, the last elements in rnn_forecast holding predicted value of days [1432 - 1461], but we don't # need the predicted value of day 1461, then we use [split_time - window_size:-1], not [split_time - window_size]. # -1: Our model return a sequences of predicted values, but we only takes the last value [-1], # because that is the predicted value for 30-days interval. For ex: [] # 0: Because each value has shape (1,) rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0] plt.figure(figsize=(10, 6)) plot_series(time_valid, value_valid) plot_series(time_valid, rnn_forecast) plt.show() print(keras.metrics.mean_absolute_error(value_valid, rnn_forecast).numpy()) trainSeqToSeqModel()
import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from player import Player player = Player() player.play(["files/start.mp3"]) input("Press Enter stop...")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Aug 11 23:29:27 2018 @author: ck807 """ import numpy as np import tensorflow as tf from residual import Residual from keras.models import Model import matplotlib.pyplot as plt import keras from keras.layers.convolutional import Conv2D, UpSampling2D from keras.layers.pooling import MaxPooling2D from keras.layers import Input, Dropout from keras.optimizers import SGD from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from layers import initial_conv_block, bottleneck_block import keras.backend as K print('Building and compiling the model..') smooth = 1. def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) def dice_coef_loss(y_true, y_pred): return -dice_coef(y_true, y_pred) def jaccard_coef(y_true, y_pred): intersection = K.sum(y_true * y_pred, axis=[0, -1, -2]) sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2]) jac = (intersection + smooth) / (sum_ - intersection + smooth) return K.mean(jac) def get_unet(): with tf.device('/device:GPU:0'): inputs = Input((192, 192, 3)) conv1 = initial_conv_block(inputs, weight_decay=5e-4) conv1 = bottleneck_block(conv1, filters=32, cardinality=32, strides=1, weight_decay=5e-4) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = bottleneck_block(pool1, filters=32, cardinality=32, strides=1, weight_decay=5e-4) conv2 = bottleneck_block(conv2, filters=64, cardinality=32, strides=1, weight_decay=5e-4) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = bottleneck_block(pool2, filters=64, cardinality=32, strides=1, weight_decay=5e-4) conv3 = bottleneck_block(conv3, filters=128, cardinality=32, strides=1, weight_decay=5e-4) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = bottleneck_block(pool3, filters=128, cardinality=32, strides=1, weight_decay=5e-4) conv4 = bottleneck_block(conv4, filters=256, cardinality=32, strides=1, weight_decay=5e-4) drop4 = Dropout(0.2)(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) conv5 = bottleneck_block(pool4, filters=256, cardinality=32, strides=1, weight_decay=5e-4) conv5 = bottleneck_block(conv5, filters=256, cardinality=32, strides=1, weight_decay=5e-4) drop5 = Dropout(0.2)(conv5) with tf.device('/device:GPU:1'): up6 = Conv2D(128, (3,3), activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size = (2,2))(drop5)) merge6 = keras.layers.Concatenate()([drop4, up6]) conv6 = bottleneck_block(merge6, filters=128, cardinality=32, strides=1, weight_decay=5e-4) conv6 = bottleneck_block(conv6, filters=128, cardinality=32, strides=1, weight_decay=5e-4) up7 = Conv2D(64, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) merge7 = keras.layers.Concatenate()([conv3, up7]) conv7 = bottleneck_block(merge7, filters=64, cardinality=32, strides=1, weight_decay=5e-4) conv7 = bottleneck_block(conv7, filters=64, cardinality=32, strides=1, weight_decay=5e-4) up8 = Conv2D(32, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) merge8 = keras.layers.Concatenate()([conv2, up8]) conv8 = bottleneck_block(merge8, filters=32, cardinality=32, strides=1, weight_decay=5e-4) conv8 = bottleneck_block(conv8, filters=32, cardinality=32, strides=1, weight_decay=5e-4) up9 = Conv2D(16, (3,3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) merge9 = keras.layers.Concatenate()([conv1, up9]) conv9 = Residual(48, 16, merge9) conv9 = Residual(16, 4, conv9) conv9 = Residual(4, 1, conv9) conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9) model = Model(inputs, conv10) model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.03, momentum=0.9, nesterov=True), metrics=[dice_coef,jaccard_coef]) return model model = get_unet() model.summary()
# 要求时间复杂度小于O(nlogn) # O(n*logk)解法 class Solution: def topKFrequent(self, nums: List[int], k: int) -> List[int]: res = [] cnt = {} for item in nums: c = cnt.get(item, 0) cnt[item] = c + 1 for key, val in cnt.items(): if len(res) < k: heapq.heappush(res, (val, key)) elif val > res[0][0]: heapq.heappop(res) heapq.heappush(res, (val, key)) ans = [] for i in range(len(res)): ans.append(res[i][1]) return ans
import kivy kivy.require('1.9.1') from kivy.app import App from kivy.lang import Builder from kivy.uix.screenmanager import ScreenManager, Screen Builder.load_string(""" <ScreenOne>: BoxLayout: Button: text: "Go to Screen 2" on_press: root.manager.transition.direction = "left" root.manager.transition.duration = 1 root.manager.current = "screen_two" <ScreenTwo>: BoxLayout: Button: text: "Go to Screen 1" on_press: root.manager.transition.direction = "left" root.manager.transition.duration = 1 root.manager.current = "screen_one" """) class ScreenOne(Screen): pass class ScreenTwo(Screen): pass screen_manager = ScreenManager() screen_manager.add_widget(ScreenOne(name="screen_one")) screen_manager.add_widget(ScreenTwo(name="screen_two")) class KivyTut3App(App): def build(self): return screen_manager sample_app = KivyTut3App() sample_app.run()
from __future__ import annotations import sys from typing import ( Any, Callable, Mapping, Optional, Sequence, ) from tabulate import tabulate from ai.backend.client.cli.pretty import print_error, print_fail from ai.backend.client.cli.pagination import ( echo_via_pager, get_preferred_page_size, tabulate_items, ) from .types import FieldSpec, PaginatedResult, BaseOutputHandler class NoItems(Exception): pass class ConsoleOutputHandler(BaseOutputHandler): def print_item( self, item: Mapping[str, Any] | None, fields: Sequence[FieldSpec], ) -> None: if item is None: print_fail("No matching entry found.") return field_map = {f.field_name: f for f in fields} print(tabulate( [ ( field_map[k].humanized_name, field_map[k].formatter.format_console(v, field_map[k]), ) for k, v in item.items() ], headers=('Field', 'Value'), )) def print_items( self, items: Sequence[Mapping[str, Any]], fields: Sequence[FieldSpec], ) -> None: field_map = {f.field_name: f for f in fields} for idx, item in enumerate(items): if idx > 0: print("-" * 20) print(tabulate( [ ( field_map[k].humanized_name, field_map[k].formatter.format_console(v, field_map[k]), ) for k, v in item.items() ], headers=('Field', 'Value'), )) def print_list( self, items: Sequence[Mapping[str, Any]], fields: Sequence[FieldSpec], *, is_scalar: bool = False, ) -> None: if is_scalar: assert len(fields) == 1 if sys.stdout.isatty(): def infinite_fetch(): current_offset = 0 page_size = get_preferred_page_size() while True: if len(items) == 0: raise NoItems if is_scalar: yield from map( lambda v: {fields[0].field_name: v}, items[current_offset:current_offset + page_size], ) else: yield from items[current_offset:current_offset + page_size] current_offset += page_size if current_offset >= len(items): break try: echo_via_pager( tabulate_items( infinite_fetch(), fields, ), ) except NoItems: print("No matching items.") else: if is_scalar: for line in tabulate_items( map(lambda v: {fields[0].field_name: v}, items), # type: ignore fields, ): print(line, end="") else: for line in tabulate_items( items, # type: ignore fields, ): print(line, end="") def print_paginated_list( self, fetch_func: Callable[[int, int], PaginatedResult], initial_page_offset: int, page_size: int = None, ) -> None: if sys.stdout.isatty() and page_size is None: page_size = get_preferred_page_size() fields: Sequence[FieldSpec] = [] def infinite_fetch(): nonlocal fields current_offset = initial_page_offset while True: result = fetch_func(current_offset, page_size) if result.total_count == 0: raise NoItems current_offset += len(result.items) if not fields: fields.extend(result.fields) yield from result.items if current_offset >= result.total_count: break try: echo_via_pager( tabulate_items( infinite_fetch(), fields, ), ) except NoItems: print("No matching items.") else: page_size = page_size or 20 result = fetch_func(initial_page_offset, page_size) for line in tabulate_items( result.items, # type: ignore result.fields, ): print(line, end="") def print_mutation_result( self, item: Mapping[str, Any], item_name: Optional[str] = None, action_name: Optional[str] = None, extra_info: Mapping = {}, ) -> None: t = [ ['ok', item['ok']], ['msg', item['msg']], *[(k, v) for k, v in extra_info.items()], ] if action_name is not None: t += [['Action', action_name]] if item_name is not None: t += [(k, v) for k, v in item[item_name].items()] print(tabulate( t, headers=('Field', 'Value'), )) def print_mutation_error( self, error: Optional[Exception] = None, msg: str = 'Failed', item_name: Optional[str] = None, action_name: Optional[str] = None, extra_info: Mapping = {}, ) -> None: t = [ ['Message', msg], ] if item_name is not None: t += [['Item', item_name]] if action_name is not None: t += [['Action', action_name]] print(tabulate( t, headers=('Field', 'Value'), )) if error is not None: print_error(error) def print_error( self, error: Exception, ) -> None: print_error(error) def print_fail( self, message: str, ) -> None: print_fail(message)
import mysql.connector import logging import json formatStr = '%(asctime)s - %(message)s' logging.basicConfig(level=logging.INFO, filename='crypto.log', filemode='w', format=formatStr) logFormatter = logging.Formatter(formatStr) rootLogger = logging.getLogger() fileHandler = logging.FileHandler(filename='crypto.log', mode='w') fileHandler.setFormatter(logFormatter) rootLogger.addHandler(fileHandler) consoleHandler = logging.StreamHandler() consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler) db = None cursor = None def db_connect(): global db global cursor db = mysql.connector.connect( host="localhost", user="root", passwd="MYsql123!", database="crypto", buffered=True ) cursor = db.cursor() assert cursor != None return (db, cursor) def insert(sql): cursor.execute(sql) db.commit() def select(sql): cursor.execute(sql) db.commit() return cursor.fetchall() db_connect()
print("about to import") from pyspark.sql import SQLContext from pyspark.sql import HiveContext from pyspark.sql.types import * #import steel_thread import ml_processing from pyspark import SparkContext #import forecast_data_v3 import forecast_data_v4 import numpy as np import pandas as pd print("finished importing") sc = SparkContext() hive_context = HiveContext(sc) sqlContext = SQLContext(sc) outageData=sc.textFile("file:///home/w205/w205_final_project/final_thread/outage_history.csv") weatherData=sc.textFile("file:///home/w205/w205_final_project/final_thread/weather_history_Rhode_Island.csv") riOutages = outageData.filter(lambda x: "Rhode Island" in x) riOutageRecords = riOutages.map(lambda r : r.split(",")) weatherRecords = weatherData.map(lambda r : r.split(",")) RI_Outages = riOutageRecords.map(lambda p: (p[2],p[4],p[5],p[8],p[12])) # I could not figure out how to properly parse this... RI_Weather = weatherRecords.map(lambda p: (p[5],p[6],p[26],p[27],p[28],p[30],p[37],p[38],p[39],p[40],p[41],p[42],p[43],p[44],p[46])) outageSchemaString = 'DATETIME HR MIN AREA NUMCUSTOMERS' # If the above gets updated, this would too (of course) weatherSchemaString = 'DTS ReportType maxTemp minTemp aveTemp aveHumidity WeatherCodes Precip Snowfall SnowDepth aveStationPressure aveSeaLevelPressure aveWindSpeed maxWindSpeed SustainedWindSpeed' outageFields = [StructField(field_name, StringType(), True) for field_name in outageSchemaString.split()] weatherFields = [StructField(field_name, StringType(), True) for field_name in weatherSchemaString.split()] outageSchema = StructType(outageFields) weatherSchema = StructType(weatherFields) schemaOutageData = sqlContext.createDataFrame(RI_Outages, outageSchema) schemaWeatherData = sqlContext.createDataFrame(RI_Weather, weatherSchema) schemaOutageData.registerTempTable('RI_Outages') schemaWeatherData.registerTempTable('RI_Weather') #results_weather = sqlContext.sql('SELECT * FROM RI_Weather WHERE ReportType="SOD" LIMIT 10').show() #results_outages = sqlContext.sql('SELECT DATETIME, AREA, NUMCUSTOMERS, CONCAT(HR, MIN) as DURATION FROM RI_Outages LIMIT 10') #results_outages.show() result_weatherOutage = sqlContext.sql('SELECT to_date(w.DTS) as DT ,w.maxTemp ,w.minTemp ,w.aveTemp ,w.aveHumidity ,w.WeatherCodes ,w.Precip ,w.Snowfall ,w.SnowDepth ,w.aveStationPressure ,w.aveSeaLevelPressure ,w.aveWindSpeed ,w.maxWindSpeed, w.SustainedWindSpeed ,case when o.DATETIME is null then 0 else 1 end as OutageIND FROM RI_Weather w left outer join RI_Outages o on to_date(w.DTS) = to_date(concat(substr(DATETIME,7,4),"-",substr(DATETIME,1,2),"-",substr(DATETIME,4,2))) WHERE w.ReportType="SOD" and year(to_date(w.DTS))=2016 and month(to_date(w.DTS))=2 ORDER BY DT LIMIT 100') #result_weatherOutage.show() #train_data = np.array(result_weatherOutage.select('aveWindSpeed').collect()) #train_labels = np.array(result_weatherOutage.select('OutageIND').collect()) train_data = [] train_labels = [] #data = result_weatherOutage.select('aveWindSpeed').collect() data = result_weatherOutage.select('aveWindSpeed','aveHumidity','maxTemp','minTemp').collect() #data = result_weatherOutage.select('aveWindSpeed','maxTemp','minTemp').collect() for a in data: aveWindSpeed = float('nan') maxTemp = float('nan') minTemp = float('nan') #aveHumidity = float('nan') if a.aveWindSpeed: aveWindSpeed = float(a.aveWindSpeed) if a.maxTemp: maxTemp = float(a.maxTemp) if a.minTemp: minTemp = float(a.minTemp) if a.aveHumidity: aveHumidity = float(a.aveHumidity) train_data.append([aveWindSpeed, maxTemp, minTemp]) data = result_weatherOutage.select('OutageIND').collect() for a in data: #train_labels.append(float(a.OutageIND)) if np.isnan(a.OutageIND): train_labels.append(float('nan')) else: train_labels.append(float(a.OutageIND)) #train_data_temp = np.array(train_data).reshape(-1,1) train_data_temp = np.array(train_data) train_labels_temp = np.array(train_labels).reshape(-1,1) #train_data = (np.array(train_data))[(~np.isnan(train_data_temp)) and (~np.isnan(train_labels_temp))] #train_labels = (np.array(train_labels))[(~np.isnan(train_data_temp)) and (~np.isnan(train_labels_temp))] #train_labels = train_labels_temp[~np.isnan(train_data_temp)] #train_data = train_data_temp[~np.isnan(train_data_temp)] #test_data = forecast_data_v3.get_forecast() train_labels = train_labels_temp[~np.isnan(train_data_temp).any(1)] train_data = train_data_temp[~np.isnan(train_data_temp).any(1)] test_data = forecast_data_v4.get_forecast() #train_data = train_data.reshape(-1,1) #train_labels = train_labels.reshape(-1,1) #test_data = test_data.reshape(-1,1) #print(steel_thread.random_prediction()) #prediction_results = steel_thread.steel_thread_prediction(train_data, train_labels, test_data) #print(prediction_results) #print(type(prediction_results)) result_probabilities, result_predictions = ml_processing.lr_prediction(train_data, train_labels, test_data) outage_probabilities = result_probabilities[:,1] prediction_results = result_predictions #dates = forecast_data_v3.get_dates() dates = forecast_data_v4.get_dates() # print(dates) # print(type(dates)) np.reshape(prediction_results,(4,1)) np.reshape(outage_probabilities, (4,1)) np.reshape(dates,(4,1)) # print(prediction_results.shape) # print(dates.shape) #test_results = [1,0,0,0] #t = np.asarray(test_results) #np.reshape(t,(4,1)) state = ['Rhode Island', 'Rhode Island', 'Rhode Island', 'Rhode Island'] city = ['Providence', 'Providence', 'Providence', 'Providence'] s = np.asarray(state) c = np.asarray(city) np.reshape(s,(4,1)) np.reshape(c,(4,1)) combined = np.vstack((s, c, dates, outage_probabilities, prediction_results)).T #combined = np.vstack((s, c, dates, outage_probabilities, t)).T final_df = pd.DataFrame(combined, columns = ['state', 'city', 'date', 'probability', 'outage']) final_df = hive_context.createDataFrame(final_df) final_df.write.mode("overwrite").saveAsTable('RI_Outage_Table') final_df.show() final_df.printSchema() #result = sqlContext.sql('SELECT outage, date, "Providence, RI" AS location FROM RI_Outage_Table') #result.show()
num1 = float(input ("insira um numero: ")) #num1 = float(input ("insira um numero: ")) print ("o cubo eh : " , num1**3) print ("o quadrado eh : ", num1**2)
# 模拟题 class Solution: def validUtf8(self, data: List[int]) -> bool: n = len(data) i = 0 while i < n: if (data[i] >> 7) ^ 1: i += 1 else: cnt = 0 while (data[i] >> (7 - cnt)) & 1: cnt += 1 if cnt > 4: return False if cnt <= 1 or i + cnt > n: return False for j in range(i+1, i+cnt): if not ((data[j] >> 7) & 1 and (data[j] >> 6) ^ 1): return False i += cnt return True
# Copyright 2021-2022 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import pytest import cunumeric as num from legate.core import LEGATE_MAX_DIM @pytest.mark.parametrize("ndim", range(LEGATE_MAX_DIM + 1)) @pytest.mark.parametrize("keepdims", [True, False]) def test_argmax_and_argmin(ndim, keepdims): shape = (5,) * ndim in_np = np.random.random(shape) in_num = num.array(in_np) for fn in ("argmax", "argmin"): fn_np = getattr(np, fn) fn_num = getattr(num, fn) assert np.array_equal( fn_np(in_np, keepdims=keepdims), fn_num(in_num, keepdims=keepdims) ) if in_num.ndim == 1: continue for axis in range(in_num.ndim): out_np = fn_np(in_np, axis=axis, keepdims=keepdims) out_num = fn_num(in_num, axis=axis, keepdims=keepdims) assert np.array_equal(out_np, out_num) if __name__ == "__main__": import sys sys.exit(pytest.main(sys.argv))
#!/usr/bin/env python #This script will pull ERA-Interim data from Dec. 1997 thru Dec. 2012 # #One input is accepted and sets the grid interval # #List of variables to get # 130 - Temperature (K) # 131 - Eastward wind component (m s^-1) # 132 - Westward wind component (m s^-1) # 133 - Specific Humidity (kg kg^-1) # 135 - Vertical Velocity (Pa s^-1) # 155 - Divergence (s^-1) # 157 - Relative Humidity # 246 - Cloud Liquid Water Content (kg kg^-1) # Standard atmospheric levels # 1000/925/850/700/500/400/300/250/200/150/100 import os import sys def julian_day_mod(year, month): days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] if (year % 4) == 0: days[2] = days[2]+1 if (year % 100) == 0 and (year % 400) != 0: days[2] = days[2]-1 result = days[month] return result #Set grid interval if len(sys.argv) == 2: #Check for user input grid_int= str(sys.argv[1]) #Set user grid int else: grid_int = '1.5' #Set default grid int #Set other variables save_dir = '/Volumes/localdata/ERA_Interim/'; #save dir save_dir = save_dir+'RH_6H_600-400hPa_'+grid_int+'/'; #Save dir area = '90/0/-90/360' #Set the domain to use variables = '157' #Set variables to get levels = '600/550/500/450/400' #Set levels to get #Set date arrays for iteration year = [] #Initialize year month= [] #Initialize month for i in range(1979,2015): #Iterate over years for j in range(1,13): #Iterate over months year.append(i) #Append year to array month.append(j) #Append month to array #Check directory not exist, create it if not os.path.exists(save_dir): os.makedirs(save_dir) grid = grid_int+'/'+grid_int for date in range(len(year)): cur_year = year[date] cur_month= month[date] dd = julian_day_mod(cur_year, cur_month) if month[date] < 10: mm = '0'+str(cur_month) else: mm = str(cur_month) filename = save_dir+'RH_'+str(cur_year)+mm+'.nc' if os.path.isfile(filename): continue date = str(cur_year)+mm+'01/to/'+str(cur_year)+mm+str(dd) from ecmwfapi import ECMWFDataServer server = ECMWFDataServer() server.retrieve({ 'dataset' : "interim", 'step' : "00", 'time' : "00/06/12/18", 'stream' : "oper", 'levtype' : "pl", 'level' : levels, 'date' : date, 'origin' : "all", 'type' : "an", 'parm' : variables, 'grid' : grid, 'area' : area, 'format' : "netcdf", 'target' : filename })
# Author: Vyas K. Srinivasan # Bollinger Band Code - Calculate Bollinger Band import pandas as pd import numpy as np from utils import getData # Helper function to get rolling sum for a given lookback def getSMA(df_data, lookback=20): return df_data['Close'].rolling(lookback).mean().values # Helper function to get rolling std for a given lookback def getRollStd(df_data, lookback=20): return df_data['Close'].rolling(lookback).std().values # Helper function to get Bostian Volume Intensity def getBostian(df_data, lookback=20): np_dailyBostian = df_data['Volume']*(((2*df_data['Close'].values)-df_data['High'].values-df_data['Low'].values)/ \ (df_data['High'].values-df_data['Low'].values)) # just create a dataframe to get easy access to rolling df_temp = pd.DataFrame(np_dailyBostian) return np.squeeze(df_temp[[0]].rolling(lookback).sum().values) def getBollingerBandIndicator(df_data, lookback, band_width=2): np_prices = df_data['Close'].values np_sma = getSMA(df_data, lookback) np_rstd = getRollStd(df_data, lookback) np_top_bb_band = np_sma + (band_width*np_rstd) np_bottom_bb_band = np_sma - (band_width*np_rstd) np_bbp = (np_prices - np_bottom_bb_band) / (np_top_bb_band - np_bottom_bb_band) return np_bbp def getPositions(np_bbp, volume_indicator): np_decisions = np.zeros((np_bbp.shape[0])) i = 0 for bbp, vi in zip(np_bbp, volume_indicator): if(bbp < 0.0 and vi > 0.0): np_decisions[i] = 1 i = i + 1 continue if(bbp > 1.0 and vi < 0.0): np_decisions[i] = 0 i = i + 1 continue if(i != 0): np_decisions[i] = np_decisions[i-1] i = i + 1 return np_decisions def getPositions_bOnly(np_bbp): np_decisions = np.zeros((np_bbp.shape[0])) i = 0 for bbp in np_bbp: if(np.isnan(bbp)): i = i + 1 continue if(bbp < 0.0): np_decisions[i] = 1 i = i + 1 continue if(bbp > 1.0): np_decisions[i] = -1 i = i + 1 continue if(i != 0): np_decisions[i] = np_decisions[i-1] i = i + 1 return np_decisions def main(): df_data = getData() np_bbp = getBollingerBandIndicator(df_data, 20) np_vi = getBostian(df_data, 20) np_positions = getPositions(np_bbp, np_vi) if __name__ == "__main__": main()
import tensorflow as tf mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() # 导入mnist 数据集 # 归一化处理 (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 ''' 定义神经网络 输入层1个神经元,隐含层10个神经元,输出层1个神经元 ''' model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), # 定义输入层 tf.keras.layers.Dense(128, activation='relu'), # 定义隐含层 tf.keras.layers.Dense(10, activation='softmax') # 定义输出层 ]) '''  定义损失函数与优化器 ''' model.compile(optimizer=tf.keras.optimizers.SGD (0.1), # 定义优化器 loss=tf.keras.losses.sparse_categorical_crossentropy, # 定义损失函数 metrics=['accuracy']) # 计算预测正确率 '''  模型训练与保存 ''' model.fit(x_train, y_train, epochs=3) # 训练模型 model.save('my_mnist_model.h5') # 保存模型
import numpy as np import matplotlib.pyplot as plt def data_parse(filename, trainSplit = 0.5, returnMatches = False): ''' Data parsing function - usable for all datasets in Chen (2016) Input: filename - path to data file to read Output: matches - list of all individual matches in the dataset D - dataset "D" from Chen (2016) ''' f = open(filename, 'r') lines = f.readlines() matches_train = [] matches_test = [] D_train = [] D_test = [] matchCount = 0 for i, line in enumerate(lines): if (line[0:6]=='Player'): pass elif line[0:3]=='num': part1, part2 = line.split() if line[0:10] == 'numPlayers': numPlayers = int(part2) if line[0:8] == 'numGames': numGames = int(part2) else: part1, part2 = line.split() player1, player2 = part1.split(':') win1, win2 = part2.split(':') match_tuple = (int(player1), int(player2), int(win1), int(win2)) if matchCount < numGames*trainSplit: matches_train.append(match_tuple) else: matches_test.append(match_tuple) matchCount+=1 # Build training set winsA = [] winsB = [] matchUps = [] for match in matches_train: player1, player2, win1, win2 = match if (player1, player2) in matchUps: ind = matchUps.index((player1, player2)) winsA[ind] += win1 winsB[ind] += win2 else: matchUps.append((player1, player2)) winsA.append(win1) winsB.append(win2) # Build test set for i in range(len(matchUps)): playerA, playerB = matchUps[i] D_train.append((playerA, playerB, winsA[i], winsB[i])) winsA = [] winsB = [] matchUps = [] for match in matches_test: player1, player2, win1, win2 = match if (player1, player2) in matchUps: ind = matchUps.index((player1, player2)) winsA[ind] += win1 winsB[ind] += win2 else: matchUps.append((player1, player2)) winsA.append(win1) winsB.append(win2) for i in range(len(matchUps)): playerA, playerB = matchUps[i] D_test.append((playerA, playerB, winsA[i], winsB[i])) if returnMatches: return numPlayers, numGames, matches_train, D_train, matches_test, D_test else: return numPlayers, numGames, D_train, D_test def sigmoid(x): out = 1 / (1 + np.exp(-x)) return out def distance(theta1, theta2, mode = 'Euclidean'): ''' Inputs: theta1 - first vector (array like) theta2 - second vector (array like) mode - either 'Euclidean' or 'Riemannian' depending on which metric Output: dist - distance between theta1 and theta2 under given metric ''' if mode == 'Riemannian': a = 1 - np.linalg.norm(theta1)**2 b = 1 - np.linalg.norm(theta2)**2 dist = np.arccosh(1 + 2 * np.linalg.norm(theta1 - theta2)**2 / a / b) else: dist = np.linalg.norm(theta1 - theta2)**2 return dist def ddist(theta1, theta2, mode = 'Euclidean'): ''' Inputs: theta1 - first vector (array like) theta2 - second vector (array like) mode - either 'Euclidean' or 'Riemannian' depending on which metric Outputs: ddist1 - gradient of distance w.r.t. first input vector ddist2 - gradient of distance w.r.t. second input vector ''' if mode == 'Riemannian': # constants used in computing gradient alpha = 1 - np.linalg.norm(theta1)**2 beta = 1 - np.linalg.norm(theta2)**2 gamma = 1 + 2 / alpha / beta * np.linalg.norm(theta1 - theta2)**2 # compute gradients ddist1 = 4/beta/np.sqrt(gamma**2-1)*((np.linalg.norm(theta2)**2 - 2*theta1.dot(theta2) + 1)/alpha**2 * theta1 - 1 / alpha*theta2) ddist2 = 4/alpha/np.sqrt(gamma**2-1)*((np.linalg.norm(theta1)**2 - 2*theta1.dot(theta2) + 1)/beta**2 * theta2 - 1 / beta*theta1) else: ddist1 = 2 * (theta1 - theta2) ddist2 = -2 * (theta1 - theta2) return ddist1, ddist2 def matchup(playerA, playerB, mode = 'Euclidean'): ''' Inputs: playerA - tuple containing blade, chest, and strength parameter for playerA playerB - tuple containing blade, chest, and strength parameter for playerB mode - either 'Euclidean' (default) or 'Riemannian' for type of embedding Output: score - value of matchup function ''' bladeA, chestA, gammaA = playerA bladeB, chestB, gammaB = playerB score = distance(bladeB, chestA, mode = mode) - distance(bladeA, chestB, mode = mode) + gammaA - gammaB return score def dmatchup(playerA, playerB, mode = 'Euclidean'): ''' Inputs: playerA - tuple containing blade, chest, and strength parameter for playerA playerB - tuple containing blade, chest, and strength parameter for playerB mode - either 'Euclidean' or 'Riemannian' for type of embedding Outputs: playerAout - tuple containing derivatives for blade, chest, and strength parameter for A playerBout - tuple containing derivatives for blade, chest, and strength parameter for B ''' bladeA, chestA, gammaA = playerA bladeB, chestB, gammaB = playerB dchestA, dbladeB = ddist(chestA, bladeB, mode = mode) dbladeA, dchestB = ddist(bladeA, chestB, mode = mode) dbladeA *= -1 dchestB *= -1 dgammaA = 1 dgammaB = -1 playerAout = (dbladeA, dchestA, dgammaA) playerBout = (dbladeB, dchestB, dgammaB) return playerAout, playerBout def loss(playerA, playerB, wins, mode = 'Euclidean'): ''' Inputs: playerA - list of tuples containing parameters for the batch or minibatch for player A playerB - list of tuples containing parameters for the batch or minibatch for player A wins - list of tuples containing na and nb values Output: lossVal - value of the loss Note: function is overloaded to accommodate both batch updates/evaluations (batch GD) or single evaluations (for SGD) ''' if isinstance(playerA, list) and isinstance(playerB, list): lossVal = 0 for i in range(len(playerA)): match = matchup(playerA[i], playerB[i], mode = mode) na, nb = wins[i] lossVal += na*np.log(1 + np.exp(-match)) + nb*np.log(1 + np.exp(match)) else: match = matchup(playerA, playerB, mode = mode) na, nb = wins lossVal = na*np.log(1 + np.exp(-match)) + nb*np.log(1 + np.exp(match)) return lossVal def dloss(playerA, playerB, wins, mode = 'Euclidean'): ''' Inputs: playerA - list of tuples containing parameters for the batch or minibatch for player A playerB - list of tuples containing parameters for the batch or minibatch for player A wins - list of tuples containing na and nb values Output: dplayerA - gradients for player A's parameters (either list of tuples or a tuple) dplayerB - gradients for player B's parameters (either list of tuples or a tuple) ''' if isinstance(playerA, list) and isinstance(playerB, list): # Using recursion in the loop cleans up implementation for i in range(len(playerA)): dplayerA[i], dplayerB[i] = dloss(playerA[i], playerB[i], wins[i], mode = mode) else: match = matchup(playerA, playerB, mode = mode) na, nb = wins dl = -na * np.exp(-match) / (1 + np.exp(-match)) + nb * 1 / (1 + np.exp(-match)) dmatchA, dmatchB = dmatchup(playerA, playerB, mode = mode) dbladeA, dchestA, dgammaA = dmatchA dbladeB, dchestB, dgammaB = dmatchB dbladeA *= dl dchestA *= dl dgammaA *= dl dbladeB *= dl dchestB *= dl dgammaB *= dl dplayerA = (dbladeA, dchestA, dgammaA) dplayerB = (dbladeB, dchestB, dgammaB) return dplayerA, dplayerB def proj(theta, epsilon = 1e-3): ''' Inputs: theta - vector to scale epsilon - constant for numerical stability Output: thetaOut - scalaed theta ''' if np.linalg.norm(theta) >= 1: thetaOut = theta / np.linalg.norm(theta) - epsilon else: thetaOut = theta return thetaOut def SGD_update(theta, dtheta, alpha, mode = 'Euclidean'): ''' Inputs: theta - parameter at iteration t dtheta - gradient at iteration t alpha - learning rate at iteration t mode - 'Euclidean or 'Riemannian' Outputs: thetaNew - updated theta ''' if mode == 'Riemannian': thetaNew = proj(theta - alpha*(1 - np.linalg.norm(theta)**2)**2 / 4 * dtheta) else: thetaNew = theta - alpha*dtheta return thetaNew def evaluate(y, yhat): ''' Function to evaluate accuracy of predictions Inputs: y - true labels yhat - predicted labels Outputs: acc - accuracy ''' eqs = np.equal(y,yhat) acc = np.mean(eqs.astype(float)) return acc class blade_chest: ''' Class to hold and update blade-chest embedding parameters (blade/chest vectors and gamma's) ''' def __init__(self, numberOfPlayers, dim, bias = True, initParam = 1e-2, BT = False): ''' Initialize the blade_chest class Inputs: numberOfPlayers - total number of players for which to initialize parameters dim - dimensionality of the embeddings ''' self.BT = BT if self.BT: self.blades = np.zeros((numberOfPlayers,dim)) self.chests = np.zeros((numberOfPlayers,dim)) self.gammas = initParam*(np.random.uniform(size = (numberOfPlayers)) - 0.5) self.bias = bias else: self.blades = initParam*(np.random.uniform(size = (numberOfPlayers,dim)) - 0.5) self.chests = initParam*(np.random.uniform(size = (numberOfPlayers,dim)) - 0.5) self.gammas = np.zeros(numberOfPlayers) self.bias = True def SGD_optimizer(self, playerAnum, playerBnum, wins, alpha, mode = 'Euclidean', reg = 0): ''' Inputs: playerA - ID number of the first player playerB - ID number of the second player wins - tuple or list of tuples containing na and nb values mode - 'Euclidean' or 'Riemannian' for type of embedding Outputs ''' # Compute derivatives playerA = (self.blades[playerAnum,:], self.chests[playerAnum,:], self.gammas[playerAnum]) playerB = (self.blades[playerBnum,:], self.chests[playerBnum,:], self.gammas[playerBnum]) dplayerA, dplayerB = dloss(playerA, playerB, wins, mode = mode) # PLAYER A dblade, dchest, dgamma = dplayerA dRegBlade, dRegChest = ddist(self.blades[playerAnum,:], self.chests[playerAnum,:], mode = mode) if not self.BT: self.blades[playerAnum,:] = SGD_update(self.blades[playerAnum,:], dblade+reg*dRegBlade, alpha, mode = mode) self.chests[playerAnum,:] = SGD_update(self.chests[playerAnum,:], dchest+reg*dRegChest, alpha, mode = mode) if self.bias: self.gammas[playerAnum] = SGD_update(self.gammas[playerAnum], dgamma, alpha) # PLAYER B dblade, dchest, dgamma = dplayerB dRegBlade, dRegChest = ddist(self.blades[playerBnum,:], self.chests[playerBnum,:], mode = mode) if not self.BT: self.blades[playerBnum,:] = SGD_update(self.blades[playerBnum,:], dblade+reg*dRegBlade, alpha, mode = mode) self.chests[playerBnum,:] = SGD_update(self.chests[playerBnum,:], dchest+reg*dRegChest, alpha, mode = mode) if self.bias: self.gammas[playerBnum] = SGD_update(self.gammas[playerBnum], dgamma, alpha) def accuracy(self, D, mode = 'Euclidean'): ''' Inputs: D - data over which to evaluate the accuracy. List of tuples (playerA, playerB, na, nb) Outputs: acc - accuracy ''' acc = 0 Nprime = 0 for i in range(len(D)): playerAnum, playerBnum, na, nb = D[i] playerA = (self.blades[playerAnum,:], self.chests[playerAnum,:], self.gammas[playerAnum]) playerB = (self.blades[playerBnum,:], self.chests[playerBnum,:], self.gammas[playerBnum]) prob = sigmoid(matchup(playerA, playerB, mode = mode)) acc += na*int(prob > 0.5) + nb*int(prob <= 0.5) Nprime += na + nb return acc/Nprime def naive_train(self, numPlayers, Dtrain): ''' Input: D - datapoints for predictions ''' numer = np.ones((numPlayers, numPlayers)) denom = 2*np.ones((numPlayers, numPlayers)) for d in Dtrain: playerAnum, playerBnum, na, nb = d numer[playerAnum, playerBnum] += na denom[playerAnum, playerBnum] += na + nb numer[playerBnum, playerAnum] += nb denom[playerBnum, playerAnum] += na + nb self.naiveMtx = numer / denom def naive_eval(self, Deval): ''' Input: D - datapoints for predictions ''' matchMtx = self.naiveMtx acc = 0 Nprime = 0 for d in Deval: playerAnum, playerBnum, na, nb = d acc += na*int(matchMtx[playerAnum, playerBnum] > 0.5) + nb*int(matchMtx[playerAnum, playerBnum] <= 0.5) Nprime += na + nb return acc/Nprime
myFile = open('first.txt', 'w') # Open the file myFile.write('hello Python\n') myFile = open('first.txt', 'a') # Open the file to append something myFile.write('hello Python 3.5 ') myFile = open('first.txt') #print(myFile.readline()) # Read the lines in the file for line in open('first.txt'): # Using iterator to read the file print(line,end='')
import pymysql.cursors connection = pymysql.connect(host=#'hostname', user=#'username', password=#'password', db=#'dbname', charset=#'utf8', cursorclass=pymysql.cursors.DictCursor) cur = connection.cursor() cur.execute('select name from fixed_card_data where redirect_name not like ""') names = cur.fetchall() for name in names: #print(name) print(name["name"]) cur.execute('select fixed_card_id from fixed_card_data where redirect_name =' + '"' + name["name"] + '"') cardid = cur.fetchall() #print(name[0]+"の裏面のidは"+str(cardid[0][0])+"です。") #print(cardid[0]) print(cardid[0]["fixed_card_id"]) cur.execute('update fixed_card_data set redirect_id='+str(cardid[0]["fixed_card_id"]) + ' where name ='+'"'+name["name"]+'"') connection.commit()
# from rest_framework import serializers from mywing.angel.models import Angel from mywing.task.serializers import TaskSerializer class AngelSerializer(serializers.ModelSerializer): owned_tasks = TaskSerializer(many=True, read_only=True) helped_tasks = TaskSerializer(many=True, read_only=True) class Meta: model = Angel fields = ('id', 'real_name', 'contribution', 'owned_tasks', 'helped_tasks') read_only_fields = ('id', 'real_name', 'contribution') class CASLoginSerializer(serializers.Serializer): domain = serializers.CharField() service = serializers.CharField() ticket = serializers.CharField()
import pandas as pd import pydeck import altair as alt import folium from vega_datasets import data COLOR_BREWER_BLUE_SCALE = [ [240, 249, 232], [204, 235, 197], [168, 221, 181], [123, 204, 196], [67, 162, 202], [8, 104, 172], ] mydf = pd.DataFrame({ 'name': ['Constanta', 'Turin', 'Madrid', 'Copenhagen', 'Berlin'], 'lat': [45.1598, 45.0703, 40.4168, 55.6761, 52.5200], 'long': [28.6348, 7.6869, -3.7038, 12.5683, 13.40] }) descriptions = ['Was born here!', 'Grew up here.', 'Studied psychology', 'Worked at the IT university', 'Home since 2020'] mydf['descr'] = descriptions mydf['tooltip'] = mydf.name + ' | ' + mydf.descr def get_map(): layer = pydeck.Layer( "HeatmapLayer", mydf, opacity=0.9, get_position=["long", "lat"], get_fill_color=[180, 0, 200, 140], color_range = COLOR_BREWER_BLUE_SCALE, get_weight=1) view_state = pydeck.ViewState( longitude=11.5820, latitude=48.1351, zoom=2, min_zoom=2, max_zoom=2, pitch=0, bearing=0) r = pydeck.Deck(layers=[layer], initial_view_state=view_state, map_style='light') return r def get_altair_map(tip_size=400): countries = alt.topo_feature(data.world_110m.url, 'countries') points = alt.Chart(mydf).mark_circle().encode( longitude='long', latitude='lat', size=alt.value(tip_size), tooltip='tooltip' ).project( type= 'mercator', scale= 350, center= [20,50], clipExtent= [[0, 0], [400, 300]], ).properties( width=500, height=400 ) background = alt.Chart(countries).mark_geoshape( fill='#CCCCCC', stroke='white' ).project( type= 'mercator', scale= 350, # Magnify center= [20,50], # [lon, lat] clipExtent= [[0, 0], [400, 300]], # [[left, top], [right, bottom]] ).properties( width=400, height=300 ) return background + points def get_folium_map(): # center on Liberty Bell z = 4 m = folium.Map(location=[48.1351, 11.5820], zoom_start=z, min_zoom=z, max_zoom=z, zoom_control=False) for i, row in mydf.iterrows(): folium.Marker([row['lat'], row['long']], tooltip=row['name'] + '. ' + row['descr'],).add_to(m) return m
from django.urls import path from .views import (PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, UserPostListView ) from . import views #. is for the current directory """ urlpatterns after being called from django_project blog.urls, it will come to this folder and run a path with an empty string. when it runs it, it will invoke views.home NOTE: views.home replaced with PostDetailView.as_view, which is showing Post blog-home: using PostDetailView class, this page contains the post post-detail: based on the clicked post. it will only show information on that individual post # NOTE: Once clicked, any reference in the template page itself will be referenced as "object". Also, the path to the template is <app>/<model>_<viewtype>.html, example: blog/post/1/.html post-create: Generate a new field convention: <app>/<model>_<form>.html, example: blog/post/1/.html post-update: update a post It will use the post template to update the post. convention: <app>/<model>_<form>.html, example: blog/post/1/update.html post-delete: delete a post It will use the post_confirm_delete template to update the post. convention: <app>/<model>_<form>.html, example: blog/post/1/update.html blog-about: routes to about page using views.about function """ urlpatterns = [ path('', PostListView.as_view(), name='blog-home'), path('user/<str:username>', UserPostListView.as_view(), name='user-posts'), path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'), path('post/new/', PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'), path('about/', views.about, name='blog-about'), ] """ Reason for error when using PostListView.as_view() looking for a view by: <app>/<model>_<viewtype>.html """
# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import logging import sys import time import traceback import botocore.exceptions from oslo_log import log import six from tempest.lib import base from tempest.lib import exceptions import testtools from ec2api_tempest_plugin import botocoreclient from ec2api_tempest_plugin import config as cfg CONF = cfg.CONF LOG = log.getLogger(__name__) logging.getLogger('botocore').setLevel(logging.INFO) logging.getLogger( 'botocore.vendored.requests.packages.urllib3.connectionpool' ).setLevel(logging.WARNING) logging.getLogger('paramiko.transport').setLevel(logging.WARNING) class EC2Waiter(object): def __init__(self, wait_func): self.wait_func = wait_func self.default_timeout = CONF.aws.build_timeout self.default_check_interval = CONF.aws.build_interval def _state_wait(self, f, f_args=None, f_kwargs=None, final_set=set(), error_set=('error')): if not isinstance(final_set, set): final_set = set((final_set,)) if not isinstance(error_set, set): error_set = set((error_set,)) interval = self.default_check_interval start_time = time.time() args = f_args if f_args is not None else [] kwargs = f_kwargs if f_kwargs is not None else {} try: old_status = status = f(*args, **kwargs) except exceptions.NotFound: old_status = status = "NotFound" while True: if status != old_status: LOG.info('State transition "%s" ==> "%s" %d second', old_status, status, time.time() - start_time) if status in final_set: return status if error_set is not None and status in error_set: raise testtools.TestCase.failureException( 'State changes to error state! ' 'While waiting for %s at "%s"' % (final_set, status)) dtime = time.time() - start_time if dtime > self.default_timeout: raise testtools.TestCase.failureException( 'State change timeout exceeded! ' '(%ds) While waiting for %s at "%s"' % (dtime, final_set, status)) time.sleep(interval) interval += self.default_check_interval old_status = status try: status = f(*args, **kwargs) except exceptions.NotFound: status = "NotFound" def _state_wait_gone(self, f, f_args=None, f_kwargs=None): interval = self.default_check_interval start_time = time.time() args = f_args if f_args is not None else [] kwargs = f_kwargs if f_kwargs is not None else {} try: old_status = status = f(*args, **kwargs) while True: if status != old_status: LOG.info('State transition "%s" ==> "%s" %d second', old_status, status, time.time() - start_time) dtime = time.time() - start_time if dtime > self.default_timeout: raise testtools.TestCase.failureException( "State change timeout exceeded while waiting" " for deleting") time.sleep(interval) interval += self.default_check_interval old_status = status status = f(*args, **kwargs) except exceptions.NotFound: pass def wait_available(self, obj_id, final_set=('available')): self._state_wait(self.wait_func, f_args=[obj_id], final_set=final_set) def wait_delete(self, obj_id): self._state_wait_gone(self.wait_func, f_args=[obj_id]) def wait_no_exception(self, *args, **kwargs): interval = self.default_check_interval start_time = time.time() while True: try: self.wait_func(*args, **kwargs) return except Exception: pass dtime = time.time() - start_time if dtime > self.default_timeout: raise testtools.TestCase.failureException( "Timeout exceeded while waiting") time.sleep(interval) interval += self.default_check_interval def wait_for_result(self, *args, **kwargs): interval = self.default_check_interval start_time = time.time() while True: result = self.wait_func(*args, **kwargs) if result: return result dtime = time.time() - start_time if dtime > self.default_timeout: raise testtools.TestCase.failureException( "Timeout exceeded while waiting") time.sleep(interval) interval += self.default_check_interval def safe_setup(f): """A decorator used to wrap the setUpClass for safe setup.""" def decorator(cls): try: f(cls) except Exception as se: exc_info = sys.exc_info() LOG.exception("setUpClass failed: %s" % se) try: cls.tearDownClass() except Exception as te: LOG.exception("tearDownClass failed: %s" % te) six.reraise(*exc_info) return decorator def get_device_name_prefix(device_name): """Return device name without device number. /dev/sda1 -> /dev/sd /dev/vda -> /dev/vd """ dev_num_pos = 0 while '0' <= device_name[dev_num_pos - 1] <= '9': dev_num_pos -= 1 return device_name[:dev_num_pos - 1] class TesterStateHolder(object): ec2_client = None _instance = None def __new__(cls, *args, **kwargs): if not cls._instance: cls._instance = super(TesterStateHolder, cls).__new__( cls, *args, **kwargs) return cls._instance _ec2_enabled = None _vpc_enabled = None def get_ec2_enabled(self): if self._ec2_enabled is None: self._fill_attributes() return self._ec2_enabled def get_vpc_enabled(self): if self._vpc_enabled is None: self._fill_attributes() return self._vpc_enabled def _fill_attributes(self): self._ec2_enabled = False self._vpc_enabled = False data = self.ec2_client.describe_account_attributes() for item in data.get('AccountAttributes', []): if item['AttributeName'] == 'supported-platforms': for value in item['AttributeValues']: if value['AttributeValue'] == 'VPC': self._vpc_enabled = True if value['AttributeValue'] == 'EC2': self._ec2_enabled = True def skip_without_ec2(*args, **kwargs): """A decorator useful to skip tests if EC2-classic is not supported.""" def decorator(f): @functools.wraps(f) def wrapper(self, *func_args, **func_kwargs): if not TesterStateHolder().get_ec2_enabled(): msg = "Skipped because EC2-classic is not enabled" raise testtools.TestCase.skipException(msg) return f(self, *func_args, **func_kwargs) return wrapper return decorator def skip_without_vpc(*args, **kwargs): """A decorator useful to skip tests if VPC is not supported.""" def decorator(f): @functools.wraps(f) def wrapper(self, *func_args, **func_kwargs): if not TesterStateHolder().get_vpc_enabled(): msg = "Skipped because VPC is disabled" raise testtools.TestCase.skipException(msg) return f(self, *func_args, **func_kwargs) return wrapper return decorator def check_vpnaas_enabled(): if not CONF.aws.vpnaas_enabled: msg = ("Skipped VPN test as vpnaas is not available") raise testtools.TestCase.skipException(msg) def skip_without_vpnaas_enabled(*args, **kwargs): """A decorator useful to skip tests without specified network extension.""" def decorator(f): @functools.wraps(f) def wrapper(self, *func_args, **func_kwargs): check_vpnaas_enabled() return f(self, *func_args, **func_kwargs) return wrapper return decorator class EC2TestCase(base.BaseTestCase): """Recommended to use as base class for boto related test.""" # The trash contains cleanup functions and paramaters in tuples # (function, *args, **kwargs) _global_resource_trash_bin = {} _global_sequence = -1 @classmethod @safe_setup def setUpClass(cls): super(EC2TestCase, cls).setUpClass() if not CONF.service_available.ec2api: raise cls.skipException("ec2api is disabled") cls.client = botocoreclient.get_ec2_client( CONF.aws.ec2_url, CONF.aws.aws_region, CONF.aws.aws_access, CONF.aws.aws_secret, CONF.aws.ca_bundle) cls.s3_client = botocoreclient.get_s3_client( CONF.aws.s3_url, CONF.aws.aws_region, CONF.aws.aws_access, CONF.aws.aws_secret, CONF.aws.ca_bundle) TesterStateHolder().ec2_client = cls.client @classmethod def addResourceCleanUpStatic(cls, function, *args, **kwargs): """Adds CleanUp callable, used by tearDownClass. Recommended to a use (deep)copy on the mutable args. """ tb = traceback.extract_stack(limit=2) cls._global_sequence = cls._global_sequence + 1 cls._global_resource_trash_bin[cls._global_sequence] = (function, args, kwargs, tb[0]) return cls._global_sequence def setUp(self): super(EC2TestCase, self).setUp() self._resource_trash_bin = {} self._sequence = -1 def tearDown(self): fail_count = self.cleanUp(self._resource_trash_bin) super(EC2TestCase, self).tearDown() if fail_count: raise exceptions.TempestException("%d cleanUp operation failed" % fail_count) def addResourceCleanUp(self, function, *args, **kwargs): """Adds CleanUp callable, used by tearDown. Recommended to a use (deep)copy on the mutable args. """ tb = traceback.extract_stack(limit=2)[0] self._sequence = self._sequence + 1 self._resource_trash_bin[self._sequence] = (function, args, kwargs, tb) LOG.debug("For cleaning up: %s\n From: %s" % (self.friendly_function_call_str(function, *args, **kwargs), str((tb[0], tb[1], tb[2])))) return self._sequence def cancelResourceCleanUp(self, key): """Cancel Clean up request.""" del self._resource_trash_bin[key] # NOTE(andrey-mp): if ERROR in responce_code then treat object as deleted _VALID_CLEANUP_ERRORS = [ 'NotFound', 'Gateway.NotAttached' ] # NOTE(andrey-mp): function must return boolean - should we retry # deleting or not _HOOKED_CLEANUP_ERRORS = { ('delete_vpc', 'DependencyViolation'): ( 'delete_vpc_failed', lambda kwargs: kwargs['VpcId']) } _CLEANUP_WAITERS = { 'delete_vpc': ( 'get_vpc_waiter', lambda kwargs: kwargs['VpcId']), 'delete_subnet': ( 'get_subnet_waiter', lambda kwargs: kwargs['SubnetId']), 'delete_network_interface': ( 'get_network_interface_waiter', lambda kwargs: kwargs['NetworkInterfaceId']), 'terminate_instances': ( 'get_instance_waiter', lambda kwargs: kwargs['InstanceIds'][0]), 'delete_volume': ( 'get_volume_waiter', lambda kwargs: kwargs['VolumeId']), 'detach_volume': ( 'get_volume_attachment_waiter', lambda kwargs: kwargs['VolumeId']), 'delete_snapshot': ( 'get_snapshot_waiter', lambda kwargs: kwargs['SnapshotId']), 'deregister_image': ( 'get_image_waiter', lambda kwargs: kwargs['ImageId']), 'detach_vpn_gateway': ( 'get_vpn_gateway_attachment_waiter', lambda kwargs: kwargs['VpnGatewayId']), 'delete_vpn_connection': ( 'get_vpn_connection_waiter', lambda kwargs: kwargs['VpnConnectionId']), 'delete_customer_gateway': ( 'get_customer_gateway_waiter', lambda kwargs: kwargs['CustomerGatewayId']), 'delete_vpn_gateway': ( 'get_vpn_gateway_waiter', lambda kwargs: kwargs['VpnGatewayId']), 'disassociate_address': ( 'get_address_assoc_waiter', lambda kwargs: kwargs), } @classmethod def tearDownClass(cls): fail_count = cls.cleanUp(cls._global_resource_trash_bin) super(EC2TestCase, cls).tearDownClass() if fail_count: raise exceptions.TempestException("%d cleanUp operation failed" % fail_count) @classmethod def cleanUp(cls, trash_bin): """Calls the callables added by addResourceCleanUp, when you overwire this function dont't forget to call this too. """ fail_count = 0 trash_keys = sorted(trash_bin, reverse=True) for key in trash_keys: (function, pos_args, kw_args, tb) = trash_bin[key] try: LOG.debug("Cleaning up: %s\n From: %s" % (cls.friendly_function_call_str(function, *pos_args, **kw_args), str((tb[0], tb[1], tb[2])))) res = cls.cleanUpItem(function, pos_args, kw_args) if not res: fail_count += 1 LOG.error('Failure in cleanup for: %s' % str(kw_args)) except BaseException: fail_count += 1 LOG.exception('Failure in cleanup for: %s' % str(kw_args)) finally: del trash_bin[key] return fail_count @classmethod def cleanUpItem(cls, function, pos_args, kw_args): attempts_left = 10 interval = 1 deleted = False while not deleted and attempts_left > 0: try: function(*pos_args, **kw_args) deleted = True key = function.__name__ if key in cls._CLEANUP_WAITERS: (waiter, obj_id) = cls._CLEANUP_WAITERS[key] waiter = getattr(cls, waiter) obj_id = obj_id(kw_args) try: waiter().wait_delete(obj_id) except botocore.exceptions.ClientError: LOG.exception('Exception occurred in cleanup waiting') return False except botocore.exceptions.ClientError as e: error_code = e.response['Error']['Code'] for err in cls._VALID_CLEANUP_ERRORS: if err in error_code: deleted = True break else: hook_res = False key = (function.__name__, error_code) if key in cls._HOOKED_CLEANUP_ERRORS: (hook, obj_id) = cls._HOOKED_CLEANUP_ERRORS[key] hook = getattr(cls, hook) obj_id = obj_id(kw_args) hook_res = hook(obj_id) if not hook_res: LOG.error('Cleanup failed: %s', e, exc_info=True) return False LOG.error('Retrying cleanup due to: %s', e) time.sleep(interval) attempts_left -= 1 interval += 1 return deleted @classmethod def friendly_function_name_simple(cls, call_able): name = "" if hasattr(call_able, "im_class"): name += call_able.im_class.__name__ + "." name += call_able.__name__ return name @classmethod def friendly_function_call_str(cls, call_able, *args, **kwargs): string = cls.friendly_function_name_simple(call_able) string += "(" + ", ".join(map(str, args)) if len(kwargs): if len(args): string += ", " string += ", ".join("=".join(map(str, (key, value))) for (key, value) in kwargs.items()) return string + ")" @classmethod def _vpc_get_state(cls, vpc_id): try: data = cls.client.describe_vpcs(VpcIds=[vpc_id]) if not data['Vpcs']: raise exceptions.NotFound() return data['Vpcs'][0]['State'] except botocore.exceptions.ClientError: error_code = sys.exc_info()[1].response['Error']['Code'] if error_code == 'InvalidVpcID.NotFound': raise exceptions.NotFound() raise @classmethod def get_vpc_waiter(cls): return EC2Waiter(cls._vpc_get_state) @classmethod def _subnet_get_state(cls, subnet_id): try: data = cls.client.describe_subnets(SubnetIds=[subnet_id]) if not data['Subnets']: raise exceptions.NotFound() return data['Subnets'][0]['State'] except botocore.exceptions.ClientError: error_code = sys.exc_info()[1].response['Error']['Code'] if error_code == 'InvalidSubnetID.NotFound': raise exceptions.NotFound() raise @classmethod def get_subnet_waiter(cls): return EC2Waiter(cls._subnet_get_state) @classmethod def _address_assoc_get_state(cls, kwargs): try: ip = kwargs.get('PublicIp') alloc_id = kwargs.get('AllocationId') assoc_id = kwargs.get('AssociationId') if ip: data = cls.client.describe_addresses(PublicIps=[ip]) elif alloc_id: data = cls.client.describe_addresses(AllocationIds=[alloc_id]) elif assoc_id: data = cls.client.describe_addresses( Filters=[{'Name': 'association-id', 'Values': [assoc_id]}]) LOG.debug('Addresses: %s' % str(data.get('Addresses'))) if ('Addresses' in data and len(data['Addresses']) == 1 and data['Addresses'][0].get('InstanceId')): return 'available' raise exceptions.NotFound() except botocore.exceptions.ClientError: raise exceptions.NotFound() @classmethod def get_address_assoc_waiter(cls): return EC2Waiter(cls._address_assoc_get_state) @classmethod def _instance_get_state(cls, instance_id): try: data = cls.client.describe_instances(InstanceIds=[instance_id]) if not data['Reservations']: raise exceptions.NotFound() if not data['Reservations'][0]['Instances']: raise exceptions.NotFound() state = data['Reservations'][0]['Instances'][0]['State']['Name'] if state != 'terminated': return state raise exceptions.NotFound() except botocore.exceptions.ClientError: error_code = sys.exc_info()[1].response['Error']['Code'] if error_code == 'InvalidInstanceID.NotFound': raise exceptions.NotFound() raise @classmethod def get_instance_waiter(cls): return EC2Waiter(cls._instance_get_state) @classmethod def _network_interface_get_state(cls, ni_id): try: data = cls.client.describe_network_interfaces( NetworkInterfaceIds=[ni_id]) if not data['NetworkInterfaces']: raise exceptions.NotFound() return data['NetworkInterfaces'][0]['Status'] except botocore.exceptions.ClientError: error_code = sys.exc_info()[1].response['Error']['Code'] if error_code == 'InvalidNetworkInterfaceID.NotFound': raise exceptions.NotFound() raise @classmethod def get_network_interface_waiter(cls): return EC2Waiter(cls._network_interface_get_state) @classmethod def _volume_get_state(cls, volume_id): try: data = cls.client.describe_volumes(VolumeIds=[volume_id]) if not data['Volumes']: raise exceptions.NotFound() return data['Volumes'][0]['State'] except botocore.exceptions.ClientError: error_code = sys.exc_info()[1].response['Error']['Code'] if error_code == 'InvalidVolume.NotFound': raise exceptions.NotFound() raise @classmethod def get_volume_waiter(cls): return EC2Waiter(cls._volume_get_state) @classmethod def _volume_attachment_get_state(cls, volume_id): try: data = cls.client.describe_volumes(VolumeIds=[volume_id]) volume = data['Volumes'][0] if 'Attachments' in volume and len(volume['Attachments']) > 0: return volume['Attachments'][0]['State'] raise exceptions.NotFound() except botocore.exceptions.ClientError: error_code = sys.exc_info()[1].response['Error']['Code'] if error_code == 'InvalidVolume.NotFound': raise exceptions.NotFound() raise @classmethod def get_volume_attachment_waiter(cls): return EC2Waiter(cls._volume_attachment_get_state) @classmethod def _snapshot_get_state(cls, snapshot_id): try: data = cls.client.describe_snapshots(SnapshotIds=[snapshot_id]) if not data['Snapshots']: raise exceptions.NotFound() return data['Snapshots'][0]['State'] except botocore.exceptions.ClientError: error_code = sys.exc_info()[1].response['Error']['Code'] if error_code == 'InvalidSnapshot.NotFound': raise exceptions.NotFound() raise @classmethod def get_snapshot_waiter(cls): return EC2Waiter(cls._snapshot_get_state) @classmethod def _image_get_state(cls, image_id): try: data = cls.client.describe_images(ImageIds=[image_id]) if not data['Images']: raise exceptions.NotFound() return data['Images'][0]['State'] except botocore.exceptions.ClientError: error_code = sys.exc_info()[1].response['Error']['Code'] if error_code == 'InvalidAMIID.NotFound': raise exceptions.NotFound() raise @classmethod def get_image_waiter(cls): return EC2Waiter(cls._image_get_state) @classmethod def _vpn_gateway_get_attachment_state(cls, vpn_gateway_id): try: data = cls.client.describe_vpn_gateways( VpnGatewayIds=[vpn_gateway_id]) attachments = data['VpnGateways'][0].get('VpcAttachments') if (not attachments or attachments[0]['State'] == 'detached'): raise exceptions.NotFound() return attachments[0]['State'] except botocore.exceptions.ClientError as ex: error_code = ex.response['Error']['Code'] if error_code == 'InvalidVpnGatewayID.NotFound': raise exceptions.NotFound() raise @classmethod def get_vpn_gateway_attachment_waiter(cls): return EC2Waiter(cls._vpn_gateway_get_attachment_state) @classmethod def _vpn_object_get_state(cls, func, kwargs, data_key, error_not_found): # NOTE(andrey-mp): use this for vpn_connection, vpn_gateway, # customer_gateway due to similar states try: data = func(**kwargs) if not data[data_key]: raise exceptions.NotFound() if data[data_key][0]['State'] == 'deleted': raise exceptions.NotFound() return data[data_key][0]['State'] except botocore.exceptions.ClientError as ex: error_code = ex.response['Error']['Code'] if error_code == error_not_found: raise exceptions.NotFound() raise @classmethod def _vpn_connection_get_state(cls, vpn_connection_id): return cls._vpn_object_get_state( cls.client.describe_vpn_connections, {'VpnConnectionIds': [vpn_connection_id]}, 'VpnConnections', 'InvalidVpnConnectionID.NotFound') @classmethod def get_vpn_connection_waiter(cls): return EC2Waiter(cls._vpn_connection_get_state) @classmethod def _customer_gateway_get_state(cls, customer_gateway_id): return cls._vpn_object_get_state( cls.client.describe_customer_gateways, {'CustomerGatewayIds': [customer_gateway_id]}, 'CustomerGateways', 'InvalidCustomerGatewayID.NotFound') @classmethod def get_customer_gateway_waiter(cls): return EC2Waiter(cls._customer_gateway_get_state) @classmethod def _vpn_gateway_get_state(cls, vpn_gateway_id): return cls._vpn_object_get_state( cls.client.describe_vpn_gateways, {'VpnGatewayIds': [vpn_gateway_id]}, 'VpnGateways', 'InvalidVpnGatewayID.NotFound') @classmethod def get_vpn_gateway_waiter(cls): return EC2Waiter(cls._vpn_gateway_get_state) @classmethod def _vpn_connection_get_route_state(cls, vpn_connection_id, destination_cidr_block=None): try: data = cls.client.describe_vpn_connections( VpnConnectionIds=[vpn_connection_id]) try: route = next( r for r in data['VpnConnections'][0]['Routes'] if r['DestinationCidrBlock'] == destination_cidr_block) except StopIteration: raise exceptions.NotFound() if route['State'] == 'deleted': raise exceptions.NotFound() return route['State'] except botocore.exceptions.ClientError as ex: error_code = ex.response['Error']['Code'] if error_code == 'InvalidVpnGatewayID.NotFound': raise exceptions.NotFound() raise @classmethod def get_vpn_connection_route_waiter(cls, destination_cidr_block): return EC2Waiter( functools.partial(cls._vpn_connection_get_route_state, destination_cidr_block=destination_cidr_block)) @classmethod def _vpn_connection_get_tunnel_up_state(cls, vpn_connection_id): data = cls.client.describe_vpn_connections( VpnConnectionIds=[vpn_connection_id]) for item in data['VpnConnections'][0].get('VgwTelemetry', []): if 'UP' == item['Status']: return 'UP' raise exceptions.NotFound() @classmethod def get_vpn_connection_tunnel_waiter(cls): return EC2Waiter(cls._vpn_connection_get_tunnel_up_state) @classmethod def delete_vpc_failed(cls, vpc_id): try: LOG.warning('VpnGateways: ' + str(cls.client.describe_vpn_gateways( Filters=[{'Name': 'attachment.vpc-id', 'Values': [vpc_id]}] )['VpnGateways'])) LOG.warning('RouteTables: ' + str(cls.client.describe_route_tables( Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}] )['RouteTables'])) return True except Exception: LOG.exception('Error occurred during "delete_vpc_failed" hook') return False def assertEmpty(self, list_obj, msg=None): self.assertTrue(len(list_obj) == 0, msg) def assertNotEmpty(self, list_obj, msg=None): self.assertTrue(len(list_obj) > 0, msg) def assertRaises(self, error_code, fn, rollback_fn=None, **kwargs): try: fn_data = fn(**kwargs) if rollback_fn: try: rollback_fn(fn_data) except Exception: LOG.exception('Rollback failed') msg = ("%s hasn't returned exception for params %s" % (str(fn.__name__), str(kwargs))) raise self.failureException(msg) except botocore.exceptions.ClientError as e: self.assertEqual(error_code, e.response['Error']['Code']) # NOTE(andrey-mp): Helpers zone def get_instance(self, instance_id): data = self.client.describe_instances(InstanceIds=[instance_id]) self.assertEqual(1, len(data.get('Reservations', []))) instances = data['Reservations'][0].get('Instances', []) self.assertEqual(1, len(instances)) return instances[0] def get_instance_bdm(self, instance_id, device_name): # device_name=None means getting bdm of root instance device instance = self.get_instance(instance_id) if not device_name: device_name = instance.get('RootDeviceName') if not device_name: return None bdms = instance.get('BlockDeviceMappings') if bdms is None: return None bdt = [bdt for bdt in bdms if bdt['DeviceName'] == device_name] return None if len(bdt) == 0 else bdt[0] def run_instance(self, clean_dict=None, **kwargs): kwargs.setdefault('ImageId', CONF.aws.image_id) kwargs.setdefault('InstanceType', CONF.aws.instance_type) kwargs.setdefault('Placement', {'AvailabilityZone': CONF.aws.aws_zone}) kwargs['MinCount'] = 1 kwargs['MaxCount'] = 1 data = self.client.run_instances(*[], **kwargs) instance_id = data['Instances'][0]['InstanceId'] res_clean = self.addResourceCleanUp(self.client.terminate_instances, InstanceIds=[instance_id]) self.get_instance_waiter().wait_available(instance_id, final_set=('running')) if clean_dict is not None: clean_dict['instance'] = res_clean return instance_id def create_vpc_and_subnet(self, cidr): data = self.client.create_vpc(CidrBlock=cidr) vpc_id = data['Vpc']['VpcId'] self.addResourceCleanUp(self.client.delete_vpc, VpcId=vpc_id) self.get_vpc_waiter().wait_available(vpc_id) data = self.client.create_subnet(VpcId=vpc_id, CidrBlock=cidr, AvailabilityZone=CONF.aws.aws_zone) subnet_id = data['Subnet']['SubnetId'] self.addResourceCleanUp(self.client.delete_subnet, SubnetId=subnet_id) return vpc_id, subnet_id def prepare_route(self, vpc_id, gw_id): data = self.client.describe_route_tables( Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]) self.assertEqual(1, len(data['RouteTables'])) route_table_id = data['RouteTables'][0]['RouteTableId'] kwargs = { 'DestinationCidrBlock': '0.0.0.0/0', 'RouteTableId': route_table_id, 'GatewayId': gw_id } self.client.create_route(*[], **kwargs) return route_table_id def create_and_attach_internet_gateway(self, vpc_id): data = self.client.create_internet_gateway() gw_id = data['InternetGateway']['InternetGatewayId'] self.addResourceCleanUp(self.client.delete_internet_gateway, InternetGatewayId=gw_id) data = self.client.attach_internet_gateway(VpcId=vpc_id, InternetGatewayId=gw_id) self.addResourceCleanUp(self.client.detach_internet_gateway, VpcId=vpc_id, InternetGatewayId=gw_id) return gw_id
import collections.abc import difflib import io import mmap import platform from typing import BinaryIO, Callable, Collection, Sequence, TypeVar, Union import numpy as np import torch from torchvision._utils import sequence_to_str __all__ = [ "add_suggestion", "fromfile", "ReadOnlyTensorBuffer", ] def add_suggestion( msg: str, *, word: str, possibilities: Collection[str], close_match_hint: Callable[[str], str] = lambda close_match: f"Did you mean '{close_match}'?", alternative_hint: Callable[ [Sequence[str]], str ] = lambda possibilities: f"Can be {sequence_to_str(possibilities, separate_last='or ')}.", ) -> str: if not isinstance(possibilities, collections.abc.Sequence): possibilities = sorted(possibilities) suggestions = difflib.get_close_matches(word, possibilities, 1) hint = close_match_hint(suggestions[0]) if suggestions else alternative_hint(possibilities) if not hint: return msg return f"{msg.strip()} {hint}" D = TypeVar("D") def _read_mutable_buffer_fallback(file: BinaryIO, count: int, item_size: int) -> bytearray: # A plain file.read() will give a read-only bytes, so we convert it to bytearray to make it mutable return bytearray(file.read(-1 if count == -1 else count * item_size)) def fromfile( file: BinaryIO, *, dtype: torch.dtype, byte_order: str, count: int = -1, ) -> torch.Tensor: """Construct a tensor from a binary file. .. note:: This function is similar to :func:`numpy.fromfile` with two notable differences: 1. This function only accepts an open binary file, but not a path to it. 2. This function has an additional ``byte_order`` parameter, since PyTorch's ``dtype``'s do not support that concept. .. note:: If the ``file`` was opened in update mode, i.e. "r+b" or "w+b", reading data is much faster. Be aware that as long as the file is still open, inplace operations on the returned tensor will reflect back to the file. Args: file (IO): Open binary file. dtype (torch.dtype): Data type of the underlying data as well as of the returned tensor. byte_order (str): Byte order of the data. Can be "little" or "big" endian. count (int): Number of values of the returned tensor. If ``-1`` (default), will read the complete file. """ byte_order = "<" if byte_order == "little" else ">" char = "f" if dtype.is_floating_point else ("i" if dtype.is_signed else "u") item_size = (torch.finfo if dtype.is_floating_point else torch.iinfo)(dtype).bits // 8 np_dtype = byte_order + char + str(item_size) buffer: Union[memoryview, bytearray] if platform.system() != "Windows": # PyTorch does not support tensors with underlying read-only memory. In case # - the file has a .fileno(), # - the file was opened for updating, i.e. 'r+b' or 'w+b', # - the file is seekable # we can avoid copying the data for performance. Otherwise we fall back to simply .read() the data and copy it # to a mutable location afterwards. try: buffer = memoryview(mmap.mmap(file.fileno(), 0))[file.tell() :] # Reading from the memoryview does not advance the file cursor, so we have to do it manually. file.seek(*(0, io.SEEK_END) if count == -1 else (count * item_size, io.SEEK_CUR)) except (AttributeError, PermissionError, io.UnsupportedOperation): buffer = _read_mutable_buffer_fallback(file, count, item_size) else: # On Windows just trying to call mmap.mmap() on a file that does not support it, may corrupt the internal state # so no data can be read afterwards. Thus, we simply ignore the possible speed-up. buffer = _read_mutable_buffer_fallback(file, count, item_size) # We cannot use torch.frombuffer() directly, since it only supports the native byte order of the system. Thus, we # read the data with np.frombuffer() with the correct byte order and convert it to the native one with the # successive .astype() call. return torch.from_numpy(np.frombuffer(buffer, dtype=np_dtype, count=count).astype(np_dtype[1:], copy=False)) class ReadOnlyTensorBuffer: def __init__(self, tensor: torch.Tensor) -> None: self._memory = memoryview(tensor.numpy()) self._cursor: int = 0 def tell(self) -> int: return self._cursor def seek(self, offset: int, whence: int = io.SEEK_SET) -> int: if whence == io.SEEK_SET: self._cursor = offset elif whence == io.SEEK_CUR: self._cursor += offset pass elif whence == io.SEEK_END: self._cursor = len(self._memory) + offset else: raise ValueError( f"'whence' should be ``{io.SEEK_SET}``, ``{io.SEEK_CUR}``, or ``{io.SEEK_END}``, " f"but got {repr(whence)} instead" ) return self.tell() def read(self, size: int = -1) -> bytes: cursor = self.tell() offset, whence = (0, io.SEEK_END) if size == -1 else (size, io.SEEK_CUR) return self._memory[slice(cursor, self.seek(offset, whence))].tobytes()
import sys import numpy as np import scipy as sp import math from scipy.io import loadmat from scipy.signal import medfilt from keras import backend as K from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import MaxPooling1D, Conv1D, Activation from keras.optimizers import SGD from keras.utils import to_categorical from keras.callbacks import LearningRateScheduler from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt max_len = 96735 channel_num = 25 epochs = 3 def create_CNN(): global max_len global channel_num print('Build CNN model...') if K.image_data_format() == 'channels_first': shape_ord = (1, 60000) else: shape_ord = (60000, 1) model = Sequential(( Conv1D(32, 10, activation='relu', input_shape=shape_ord), Conv1D(64, 10, activation='relu'), MaxPooling1D(pool_size=10), Flatten(), Dense(64, activation='relu'), Dense(2, activation='sigmoid'), )) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model def create_perceptron(): global max_len global channel_num print('Build Perceptron model...') if K.image_data_format() == 'channels_first': shape_ord = (1, 60000) else: shape_ord = (60000, 1) model = Sequential() model.add(Flatten(input_shape=shape_ord)) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dense(2)) model.add(Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model def load_data(v): global max_len subjects = [1,2,3,4,5,6,7,8,9] X = [] Y = [] count = 0 data_dir = '../data/four_class_motor_imagery' for i in subjects: D = loadmat(data_dir + '/P%02d' % i + v + '.mat' ) sessions = D['sessions'] for sess in sessions: X_sess = sess[0][0] if X_sess.shape[0] != max_len: continue ix = X_sess.shape[0] X_sess = np.vstack([X_sess, np.zeros((max_len - X_sess.shape[0], channel_num))]) if X_sess.shape[0] > max_len: continue gender = sess[0][1][0] if gender == 'male': gender = 0 else: gender = 1 print(X_sess.shape) print(gender) X.append(X_sess.T) Y.append([gender for x in range(25)]) count+=1 return np.array(X).reshape(count*channel_num,96735), np.array(Y).reshape(count*channel_num,1) def info(type, value, tb): if hasattr(sys, 'ps1') or not sys.stderr.isatty(): sys.__excepthook__(type, value, tb) else: import traceback, pdb traceback.print_exception(type, value, tb) print pdb.pm() def normalize(X): return (X - np.min(X))/(np.max(X) + np.min(X)) def step_decay(epoch): initial_lrate = 0.1 drop = 0.5 epochs_drop = 10.0 lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) return lrate model = create_CNN() #model = create_perceptron() sys.excepthook = info seed = 7 np.random.seed(seed) # for reproducibility # get training data X_train, Y_train = load_data('T') n_train = X_train.shape[0] # split into train/validation X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train, test_size=0.33, random_state=seed) Y_train = to_categorical(Y_train, 2).reshape((Y_train.shape[0],2)) Y_valid = to_categorical(Y_valid, 2).reshape((Y_valid.shape[0],2)) # remove outliers X_train[X_train>80] = 80 X_train[X_train<-80] = -80 # mean and absolute min/max normalization across channels for i in range(15): X_train[i,:] = X_train[i,:] - np.mean(X_train[i,:]) ma = np.max(X_train[i,:]) mi = np.min(X_train[i,:]) if np.abs(ma) > np.abs(mi): X_train[i,:] = X_train[i,:]/np.abs(ma) else: X_train[i,:] = X_train[i,:]/np.abs(mi) # median filtering X_train = medfilt(X_train) def moving_average(a, n=3) : ret = np.cumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n #X_train = moving_average(X_train) # get testing data X_test, Y_test = load_data('E') n_test = X_test.shape[0] Y_test = to_categorical(Y_test, 2).reshape((Y_test.shape[0],2)) # outlier removal X_test[X_test>80] = 80 X_test[X_test<-80] = -80 # mean and max/min normalization across channels for i in range(15): X_test[i,:] = X_test[i,:] - np.mean(X_test[i,:]) ma = np.max(X_test[i,:]) mi = np.min(X_test[i,:]) if np.abs(ma) > np.abs(mi): X_test[i,:] = X_test[i,:]/np.abs(ma) else: X_test[i,:] = X_test[i,:]/np.abs(mi) # median filtering X_test = medfilt(X_test) def moving_average(a, n=3) : ret = np.cumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n #X_test = moving_average(X_test) # step decay for gradient descent lrate = LearningRateScheduler(step_decay) callbacks_list = [lrate] #X_train = X_train[:,20000:60000] #X_valid = X_valid[:,20000:60000] #X_test = X_test[:,20000:60000] print(X_train.shape,Y_train.shape) print(X_valid.shape,Y_valid.shape) print(X_test.shape, Y_test.shape) # train hist = model.fit(np.expand_dims(X_train,axis=2), Y_train, epochs=epochs, validation_data=(np.expand_dims(X_valid,axis=2), Y_valid)) # print training results fig_loss = plt.figure() plt.xlabel('Epochs') plt.ylabel('Loss') plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.legend(['Training', 'Validation']) fig_acc = plt.figure() plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.plot(hist.history['acc']) plt.plot(hist.history['val_acc']) plt.legend(['Training', 'Validation'], loc='lower right') # evaluating the model on the test data loss, accuracy = model.evaluate(np.expand_dims(X_test,axis=2), Y_test, verbose=0) print('Test Loss:', loss) print('Test Accuracy:', accuracy) fig_loss.savefig('fig_loss.png') fig_acc.savefig('fig_acc.png')
from divisors import divisors def test_one(): assert divisors(1)==[1] def test_two(): assert divisors(2)==[1,2] def test_three_to_nine(): assert divisors(3)==[1,3] assert divisors(4)==[1,2,4] assert divisors(5)==[1,5] assert divisors(6)==[1,2,3,6] assert divisors(7)==[1,7] assert divisors(8)==[1,2,4,8] assert divisors(9)==[1,3,9] def test_thirty_six(): assert divisors(36)==[1,2,3,4,6,9,12,18,36] #def test_no_2_in_3(): # assert divisors(3) # assert 2 in div_list == False # this ^^^ did not work... # gave NameError: name 'div_list' is not defined #def test_2_not_in_3(): # assert 2 in divisors(3).div_list == False # this ^^^ did not work either... # gave AttributeError: 'list' object has no attribute 'div_list' # aha! it goes like this: def test_for_not_divisors(): assert 2 not in divisors(3) assert 4 not in divisors(7) assert 5 not in divisors(9)
num_alunos = 5 nomes = [] notas = [] media = 0 for i in range(num_alunos): nomes(input('Informe o nome do aluno: ')) notas.append(eval(input('Informe a nota de '+ nomes[i] +': '))) media = media + notas[i] media = media / num_alunos print('A media da turma é: ',media) for i in range(num_alunos): if notas[i] > media: print('Parabens', nomes[i])
import alpaca_trade_api as tradeapi import yfinance as yf import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import coloredlogs import tensorflow as tf import numpy as np import time import sys from agent import Agent from methods import eval_model_new from utils import get_stock_data, get_state from get_all_tickers import get_tickers as gt import matplotlib.pyplot as plt #tf.get_logger().setLevel("ERROR") #import logging #tf.get_logger().setLevel(logging.ERROR) buy_agg = .5 sell_agg = .5 getters = gt ts = [input("stock ticker: ")] #ts = ["CIDM", "LI", "AAPL", "BA", "IVR", "XAN", "MSFT", "TSLA", "GE", "F", "GM", "TWTR", "GEVO", "H", "FB", "GOOG", "LXRX", "OCGN", "CTRM", "GOLD", "AMZN", "CANG", "BNGO", "SPCE", "SRPT", "NAKD", "NFLX", "SNDL"] #ts = gt.get_tickers_filtered(mktcap_min=1000) ts = sorted(list(set(ts)), key=lambda x: len(x)) d = {0: "HOLD", 1: "BUY", 2: "SELL"} tickers = [yf.Ticker(a) for a in ts] agent = Agent(30, model_name=sys.argv[1]) agent.first_iter = False buys = [] sells = [] for t in range(len(tickers)): hist = tickers[t].history(period="3mo") #tup = agent.act(hist, True) dc = {} for key in hist.keys(): #print(hist[key]) #print(key) dc[key] = list(hist[key])[-30:] hist = get_stock_data("", d=dc) #print(hist) state = get_state(hist, 30, 30) #print(state) tup = agent.act(state, True) mod = tf.keras.models.Model(inputs = agent.model.inputs, outputs=agent.model.layers[2].output) o = mod.predict(state)[0] #o_min, o_max = o.min(), o.max() #plt.imshow((o - o_min)/(o_max-o_min), cmap='gray') plt.imshow(o) plt.xticks([]) plt.yticks([]) plt.savefig('conv_output.png', dpi=400) #print(state) print("ticker: {} | {} | {}".format(ts[t], d[tup[0]], tup[1])) if tup[0]==1: buys.append((ts[t], tup[1])) elif tup[0]==2: sells.append((ts[t], tup[1])) buys.sort(key=lambda q: q[1], reverse=True)
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from pants.backend.build_files.fmt.base import FmtBuildFilesRequest from pants.backend.python.lint.black import subsystem as black_subsystem from pants.backend.python.lint.black.rules import _run_black from pants.backend.python.lint.black.subsystem import Black from pants.backend.python.subsystems.python_tool_base import get_lockfile_interpreter_constraints from pants.core.goals.fmt import FmtResult from pants.engine.rules import collect_rules, rule from pants.util.logging import LogLevel class BlackRequest(FmtBuildFilesRequest): tool_subsystem = Black @rule(desc="Format with Black", level=LogLevel.DEBUG) async def black_fmt(request: BlackRequest.Batch, black: Black) -> FmtResult: black_ics = await get_lockfile_interpreter_constraints(black) return await _run_black(request, black, black_ics) def rules(): return [ *collect_rules(), *BlackRequest.rules(), *black_subsystem.rules(), ]
""" Author : Lily Date : 2018-09-21 QQ : 339600718 赫妍 HERA Hera-s 抓取思路:所有页面在同一个页面上,直接解析页面即可 Url: http://www.hera.com/cn/zh/misc/store.html """ import requests import re import datetime from lxml import etree filename = "Hera-s" + re.sub('[^0-9]', '', str(datetime.datetime.now())) + ".csv" url = "http://www.hera.com/cn/zh/misc/store.html" html = requests.get(url).text html_lxml = etree.HTML(html) stores = html_lxml.xpath('//*[@id="main"]/div/div/div/div') with open(filename, 'w', encoding='utf-8') as f: f.write('name,address,lat,lon,\n') for store in stores: name = store.xpath('./ul/li[1]//text()')[0] address = store.xpath('./ul/li[2]//text()')[0] lat = store.xpath('./div/div/div/@data-lat')[0] lon = store.xpath('./div/div/div/@data-lng')[0] f.write(name + "," + address + "," + lat + "," + lon + ",\n") f.close()
import cv2 import segmentation as seg import preprocessingImage as ppi # class containing logic for counting areas in a grayscale image & process it in 2 stages # stage 1: pre-processing # stage 2: segmentation class count_area: # function to display properties of gray image def properties_image(gray): print ( 'Type of the image : ' , type ( gray ) ) print ( 'Shape of the image : {}'.format ( gray.shape ) ) print ( 'Image Height {}'.format ( gray.shape[0] ) ) print ( 'Image Width {}'.format ( gray.shape[1] ) ) print ( 'Dimension of Image {}'.format ( gray.ndim ) ) print ( 'Image size {}'.format ( gray.size ) ) def count_areas(self, fileName, height, width): # stage1 : preprocessing image, gray , isBin = ppi.ReadImage ( fileName , int ( height ) , int ( width ) ) levels = ppi.countDistinctGrayLevelsInImage ( gray ) # *** uncomment this code to view original image *** # if isBin == 0: # cv2.imshow ( "Original" , image ) # stage2: segmentation countArr = seg.count_areas_per_shade ( levels , isBin , image , gray ) seg.display ( countArr )
#!/usr/bin/env python # -*- coding: utf-8 -*- from datetime import datetime import csv class htls(object): """ HTLS """ def __init__(self, word = None): """ Do init. """ ## Read txt file. self.aw = open('./behao2.txt', 'r').read().split('\n')[:-1] ## set the HTLS vars. self.hts = ['成熟運','發展運','巔峰運','老化運','病變運','破滅運','谷底運','蘊釀運','吸收運','成長運'] self.htss = ['90-126','126-162','162-198','198-234','234-270','270-306','306-342','342-18','18-54','54-90'] self.lss = ['名','財','官','利','交','拜','衰','煞','絕'] ## Run. if word: self.s(word) def s(self, word): """ search Stroke """ total = 0 s_op = '' for r in word.decode('utf-8'): for i in self.aw: ii = i.split(' ') if r in ii[2].decode('utf-8'): total += int(ii[0]) s_op += '%s %2s %s\n' % (r.encode('utf-8'), ii[0], ii[1]) self.total = total self.s_op = s_op def ht(self, age): """ HT cal. """ o = self.hts ## ['成熟運','發展運','巔峰運','老化運','病變運','破滅運','谷底運','蘊釀運','吸收運','成長運'] oo = self.htss ## ['90-126','126-162','162-198','198-234','234-270','270-306','306-342','342-18','18-54','54-90'] behao = self.total % 10 if age < 10: age += 10 t_age = abs(age - behao) return '%s (%s)' % (o[t_age % 10],oo[t_age % 10]) def ls(self, year = datetime.today().year - 1911): """ LS cal. """ name = self.total % 9 ll = self.lss ## ['名','財','官','利','交','拜','衰','煞','絕'] if year < 9: year += 9 return '%s' % ll[abs(year - name) % 9] def all(self, age, year = datetime.today().year - 1911): """ All in one. age for ht, year for ls. """ re = self.s_op + '總計 ' + str(self.total) + '\n' if age:re += '河圖:' + self.ht(int(age)) + '\n' re += '洛書:' + self.ls(int(year)) return re def masscal(q): """ mass cal. q must be dict. """ re = ['姓名 年齡 流年 筆劃 河圖 角度 洛書'] times = 0 for i in q: try: if i: name, age, year = i.replace('\t', ' ').split(' ') cal = htls(name.encode('utf-8')) re.append('%s %s %s %s %s %s %s' % (name.encode('utf-8'), str(age), str(year), str(cal.total), str(cal.ht(int(age))), str(cal.ls(int(year))), str(cal.s_op.replace('\n', ' ')))) else: re.append('') except: re.append('Format Fault.') times += 1 return re,times def htexp(q): """ Explain ht result.""" csv_read = csv.reader(open('./htlssay2.csv','r')) ii = 1 re = {} for i in csv_read: print ii hh = '' for h in i[1:]: hh += '● ' + h + '\n' re[i[0]] = hh ii += 1 return re[q]
# The classic Hubot Shipit script. import random from espresso.main import robot squirrels = [ "http://images.cheezburger.com/completestore/2011/11/2/aa83c0c4-2123-4bd3-8097-966c9461b30c.jpg", "http://images.cheezburger.com/completestore/2011/11/2/46e81db3-bead-4e2e-a157-8edd0339192f.jpg", "http://28.media.tumblr.com/tumblr_lybw63nzPp1r5bvcto1_500.jpg", "http://i.imgur.com/DPVM1.png", "http://d2f8dzk2mhcqts.cloudfront.net/0772_PEW_Roundup/09_Squirrel.jpg", "http://www.cybersalt.org/images/funnypictures/s/supersquirrel.jpg", "http://www.zmescience.com/wp-content/uploads/2010/09/squirrel.jpg", "http://img70.imageshack.us/img70/4853/cutesquirrels27rn9.jpg", "http://img70.imageshack.us/img70/9615/cutesquirrels15ac7.jpg", "https://dl.dropboxusercontent.com/u/602885/github/sniper-squirrel.jpg", "http://1.bp.blogspot.com/_v0neUj-VDa4/TFBEbqFQcII/AAAAAAAAFBU/E8kPNmF1h1E/s640/squirrelbacca-thumb.jpg", "https://dl.dropboxusercontent.com/u/602885/github/soldier-squirrel.jpg", "https://dl.dropboxusercontent.com/u/602885/github/squirrelmobster.jpeg", ] @robot.hear(r'(?i)ship\s*it') def ship_it(res): res.send(random.choice(squirrels))
# -*- coding: utf-8 -*- """ @author: tut_group_50 """ import numpy as np import csv from sklearn.preprocessing import LabelEncoder def load_data(folder): """ Loads the data to numpy arrays Parameters: folder: foldername in working dir that contains the data or path to folder Returns: X - the provided learning data (np array) X_kaggle - the data that needs to be predicted (np array) y - labels for the provided learning data (encoded) groups - np array with block id for corresponding samples lenc - label encoder """ print("Loading training samples...") X = np.load(folder + '/X_train_kaggle.npy') print("Loading testing samples (Kaggle)...") X_test_kaggle = np.load(folder + '/X_test_kaggle.npy') y = [] groups = [] print("Loading labels & groups...") with open(folder + '/groups.csv') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: if '#' in row[0]: continue else: groups.append(row[1]) y.append(row[2]) groups = np.array(list(map(int, groups))) print(X.shape) print(X_test_kaggle.shape) print('groups:') print(groups) print(y) print(len(y)) lenc = LabelEncoder() y = lenc.fit_transform(y) print("Data loading done.") return X, X_test_kaggle, y, groups, lenc
import unittest import json from flask import current_app as app from unittest.mock import patch from app.test.base import BaseTestCase class TestHealthcheckBlueprint(BaseTestCase): def test_healthcheck_api_with_good_config(self): response = self.client.get('/healthcheck/') result = json.loads(response.data.decode()) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) self.assertEqual(True, all(result['result'])) class TestHealthcheckBlueprintWithBadConfig(BaseTestCase): def setUp(self): # Inject some bad config here # but don't call parent app.config.update({ 'ELASTICSEARCH_URL': 'bad', 'REDIS_HOST': 'bad', 'SQLALCHEMY_DATABASE_URI': 'bad' }) def tearDown(self): # Don't call parent pass def test_healthcheck_api_with_bad_config(self): response = self.client.get('/healthcheck/') result = json.loads(response.data.decode()) self.assertEqual('application/json', response.content_type) self.assertEqual(500, response.status_code) if __name__ == '__main__': unittest.main()
import socket import time from contextlib import closing import datetime import numpy as np import matplotlib.dates as mdates def main(): host = '127.0.0.1' port = 4000 count = 0 dat = 1 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) n_cnt =400 time_range = n_cnt# 24 * 30 dates = [datetime.datetime(2017,1,1) + datetime.timedelta(hours=i) for i in range(time_range)] vals = [np.sin(2 * np.pi * i / 240) for i in range(time_range)] with closing(sock): while True: #message = "fgwrp" message = 'AT1 ' +str(dates[count]) + ' ' +str(vals[count]) print(message) sock.sendto(message, (host, port)) count += 1 dat *=-0.98 if count > n_cnt -1: count = 0 dat = 1 time.sleep(0.1) return if __name__ == '__main__': main()
from selenium.webdriver import ActionChains class MainMenu(): def __init__(self, driver): self.driver = driver def goto_users_sub_menu(self): ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath('//*[@id="menu_admin_viewAdminModule"]/b')).move_to_element(self.driver.find_element_by_id('menu_admin_UserManagement')).move_to_element(self.driver.find_element_by_id('menu_admin_viewSystemUsers')).click().perform()
get_age = int(input("Enter your age")) if get_age <= 13: print("Kids are allowed from Gate No. 3") elif get_age <= 40: print("Adults are allowed from Gate No. 4") else: print("Old Citizen are allowed from Gate No. 5")
import unittest from katas.kyu_7.batman_quotes import BatmanQuotes class BatmanQuotesTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(BatmanQuotes.get_quote([ 'WHERE IS SHE?!', 'Holy haberdashery, Batman!', 'Let\'s put a smile on that faaaceee!'], 'Rob1n' ), 'Robin: Holy haberdashery, Batman!')
class Vec3: def __init__(self, x=0, y=0, z=0): self.x = x self.y = y self.z = z def __add__(self, rhs): c = self.clone() c += rhs return c def __iadd__(self, rhs): self.x += rhs.x self.y += rhs.y self.z += rhs.z return self def length(self): return self.lengthSqr() ** .5 def lengthSqr(self): return self.x * self.x + self.y * self.y + self.z * self.z def __mul__(self, k): c = self.clone() c *= k return c def __imul__(self, k): self.x *= k self.y *= k self.z *= k return self def clone(self): return Vec3(self.x, self.y, self.z) def __neg__(self): return Vec3(-self.x, -self.y, -self.z) def __sub__(self, rhs): return self.__add__(-rhs) def __isub__(self, rhs): return self.__iadd__(-rhs) def __repr__(self): return "Vec3(%s,%s,%s)"%(self.x,self.y,self.z) def __iter__(self): return iter((self.x, self.y, self.z)) def _map(self, func): self.x = func(self.x) self.y = func(self.y) self.z = func(self.z) def __cmp__(self, rhs): dx = self.x - rhs.x if dx != 0: return dx dy = self.y - rhs.y if dy != 0: return dy dz = self.z - rhs.z if dz != 0: return dz return 0 def __eq__(self, rhs): if self.x == rhs.x and self.y == rhs.y and self.z == rhs.z: return True return False def iround(self): self._map(lambda v:int(v+0.5)) def ifloor(self): self._map(int) def rotateLeft(self): self.x, self.z = self.z, -self.x def rotateRight(self): self.x, self.z = -self.z, self.x def testVec3(): # Note: It's not testing everything # 1.1 Test initialization it = Vec3(1, -2, 3) assert it.x == 1 assert it.y == -2 assert it.z == 3 assert it.x != -1 assert it.y != +2 assert it.z != -3 # 2.1 Test cloning and equality clone = it.clone() assert it == clone it.x += 1 assert it != clone # 3.1 Arithmetic a = Vec3(10, -3, 4) b = Vec3(-7, 1, 2) c = a + b assert c - a == b assert c - b == a assert a + a == a * 2 assert a - a == Vec3(0,0,0) assert a + (-a) == Vec3(0,0,0) # Test repr e = eval(repr(it)) assert e == it if __name__ == "__main__": testVec3()
import numpy as np import glob import os import queue from bbcliutils.rztdata import ExecContexts from bbcliutils.rztdata.RZTData import RZTData def generate_queue(source): queue_array = [filename for filename in glob.glob(source)] queue_array.sort() file_queue = queue.Queue() for i in range(len(queue_array)): file_queue.put(item=queue_array[i]) return file_queue def print_data(): folder_queue = generate_queue(source="/home/sagnikb/sa_model_data_all/SAMPLE_*") for _ in range(int(folder_queue.qsize())): foldername = folder_queue.get() file_queue = generate_queue(source=str(foldername) + "/*.csv") for _ in range(int(file_queue.qsize())): filename = file_queue.get() base_name = os.path.basename(str(filename)) ds_config = dict() filesize = os.path.getsize(str(filename)) / (1024 * 1024) ds_config["path"] = str(filename) ds_config["encoding"] = "utf-8" ds_config["header"] = "True" rzt_prod = RZTData(ExecContexts.prod) df = rzt_prod.read(ds_config) n_rows = df.count() n_cols = len(df.cols()) if "CUST_ID" and "FORACID" in list(df.cols()) : uniques_custid = len(df.unique(key="CUST_ID")) nas_custid = df["CUST_ID"].na_info() uniques_foracid = len(df.unique(key="FORACID")) nas_foracid = df["FORACID"].na_info() else: uniques_custid = 0 nas_custid = 0 uniques_foracid = 0 nas_foracid = 0 print(base_name, filesize, n_rows, n_cols, uniques_custid, uniques_foracid, nas_custid, nas_foracid) print_data()