text
stringlengths
8
6.05M
from enum import Enum import random class Color(Enum): red = 1 green = 2 blue = 3 class Shape(Enum): circle = 1 triangle = 2 square = 3 class Number(Enum): one = 1 two = 2 three = 3 class Fill(Enum): full = 1 half = 2 none = 3 class Card(object): def __init__(self, _color, _shape, _number, _fill): self.color = _color self.shape = _shape self.number = _number self.fill = _fill def __str__(self): return str(Color(self.color))+','+str(Shape(self.shape))+','+str(Number(self.number))+','+str(Fill(self.fill)) def getColor(self): return self.color def getShape(self): return self.shape def getNumber(self): return self.number def getFill(self): return self.fill def getAttributes(self): return [self.getColor(), self.getShape(), self.getNumber(), self.getFill()] class Deck(object): def __init__(self, number_of_attributes=4): self.attributes = [Color, Shape, Number, Fill] attributes = [[self.attributes[i](1)] for i in range(4)] for i in range(number_of_attributes): attributes[i] = self.attributes[i] self.cards = [Card(color, shape, number, fill) for color in attributes[0] for shape in attributes[1] for number in attributes[2] for fill in attributes[3]] def shuffle(self): random.shuffle(self.cards) def getCards(self): return self.cards def drawCards(self, num_of_cards): cards = self.cards[-num_of_cards:] self.cards = self.cards[:-num_of_cards] return cards def isEmpty(self): if len(self.cards) == 0: return True else: return False class Set(object): def __init__(self, _card1, _card2, _card3): self.card1 = _card1 self.card2 = _card2 self.card3 = _card3 self.attributes = ["color","shape","number","fill"] def isAttributeValid(self, attribute): if (getattr(self.card1, attribute) == getattr(self.card2, attribute) and getattr(self.card2, attribute) == getattr(self.card3, attribute)) \ or (getattr(self.card1, attribute) != getattr(self.card2, attribute) and getattr(self.card2, attribute) != getattr(self.card3, attribute) \ and getattr(self.card1, attribute) != getattr(self.card3, attribute)): return True return False def isSetValid(self): for attribute in self.attributes: if not self.isAttributeValid(attribute): return False return True class Table(object): def __init__(self, _cards): self.cards = _cards def getCards(self): return self.cards def removeCards(self, slots): for slot in sorted(slots, reverse=True): del self.cards[slot] def fillTable(self, deck): num_of_cards = 12-len(self.cards) if num_of_cards: self.cards.extend(deck.drawCards(num_of_cards)) while not self.hasSet() and not deck.isEmpty(): self.cards.extend(deck.drawCards(3)) def hasSet(self): for i in range(len(self.cards)): for j in range(i+1, len(self.cards)): for k in range(j+1, len(self.cards)): s = Set(self.cards[i],self.cards[j],self.cards[k]) if s.isSetValid(): return True return False class SetGame(object): def __init__(self, _deck): self.deck = _deck self.table = Table(self.deck.drawCards(12)) self.winner = None def getTable(self): return self.table def getDeck(self): return self.deck def isActive(self): if self.deck.isEmpty() and not self.table.hasSet(): return False return True if __name__ == '__main__': print("Start set_game.py to play the game")
import math import numpy as np import tensorflow as tf from sklearn.base import BaseEstimator, TransformerMixin from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam import matplotlib.pyplot as plt np.random.seed(14) tf.random.set_seed(14) class GAN(BaseEstimator, TransformerMixin): def __init__(self, discriminator_network_params, generator_network_params, epochs=2000, batch_size=256, seed=42, dummy_fields_indices=None): self.discriminator_network_params = discriminator_network_params self.generator_network_params = generator_network_params self.epochs = epochs self.batch_size = batch_size self.seed = seed self.dummy_fields_indices = dummy_fields_indices self.generator_model = None self.discriminator_model = None self.GAN_model = None self.data = None self.GAN_loss = [] self.gen_loss = [] self.gen_acc = [] self.disc_loss = [] self.disc_acc = [] self.best_GAN_loss = math.inf self.best_weights_GAN = None self.best_weights_discriminator = None self.best_weights_generator = None self.create_model() def _create_generator_inputs(self, n): """ Sampling a noise dataset as an input for the generator output is a matrix of [batch_size/2, gen_input_dim] """ gen_input_dim = self.generator_network_params['input_dim'] generator_inputs = np.random.normal(0, 1, size=[int(n), gen_input_dim]) return generator_inputs def _generate_n_samples(self, n): """ Sampling dataset from the generator output distribution output is a matrix of [batch_size/2, number of features in the real data] """ generator_input = self._create_generator_inputs(n) fake_samples = self.generator_model.predict(generator_input) # Rounding one-hot fields to 0 or 1 if self.dummy_fields_indices != None: for index in self.dummy_fields_indices: fake_samples[:,index] = np.where(fake_samples[:,index] > 0.5, 1, 0) return fake_samples, np.zeros((int(self.batch_size/2), 1)) def _real_samples(self): """ Sampling dataset from the real data output is a matrix of [batch_size, number of features in the real data] """ real_samples_indices = np.random.choice(self.data.shape[0], size=int(self.batch_size / 2), replace=False) real_samples = self.data[real_samples_indices] return real_samples, np.ones((int(self.batch_size/2), 1)) def _build_generator_model(self): """ Creating a generator model """ dimensions = self.generator_network_params['dimensions'] input_dim = self.generator_network_params['input_dim'] output_dim = self.generator_network_params['output_dim'] activations = self.generator_network_params['activations'] model = Sequential() model.add(Dense(dimensions[0], activations[0], input_dim=input_dim)) for dimension, activation in zip(dimensions[1:], activations[1:]): model.add(Dense(dimension, activation)) model.add(Dense(output_dim, activation='sigmoid')) print("Generator Model:\n") model.summary() self.generator_model = model def _build_discriminator_model(self): """ Creating a discriminator model """ dimensions = self.discriminator_network_params['dimensions'] input_dim = self.discriminator_network_params['input_dim'] activations = self.discriminator_network_params['activations'] model = Sequential() model.add(Dense(dimensions[0], activations[0], input_dim=input_dim)) for dimension, activation in zip(dimensions[1:], activations[1:]): model.add(Dense(dimension, activation)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-5), metrics=['accuracy']) print("Discriminator Model:\n") model.summary() self.discriminator_model = model def _build_GAN(self): """ Creating GAN model by sequencing the generator and the discriminator. """ self.discriminator_model.trainable = False GAN_model = Sequential() GAN_model.add(self.generator_model) GAN_model.add(self.discriminator_model) GAN_model.compile(optimizer=Adam(lr=1e-5), loss="binary_crossentropy", metrics=['accuracy']) self.GAN_model = GAN_model def create_model(self): self._build_generator_model() self._build_discriminator_model() self._build_GAN() def _do_iteration(self, epoch): """ Make one learning iteration of the whole GAN model. """ # Getting half-batch of real samples and generated samples real_X, real_y = self._real_samples() generated_X, generated_y = self._generate_n_samples(self.batch_size/2) # Training the discriminator first loss_for_real, accuracy_for_real = self.discriminator_model.train_on_batch(real_X, real_y) loss_for_generated, accuracy_for_generated = self.discriminator_model.train_on_batch(generated_X, generated_y) curr_avg_disc_loss = (loss_for_real + loss_for_generated)/2 curr_avg_disc_accuracy = (accuracy_for_real + accuracy_for_generated)/2 self.disc_loss.append(curr_avg_disc_loss) self.disc_acc.append(curr_avg_disc_accuracy) # Generating noise for the generator GAN_X_1 = self._create_generator_inputs(self.batch_size/2) GAN_X_2 = self._create_generator_inputs(self.batch_size/2) GAN_X = np.concatenate((GAN_X_1, GAN_X_2)) GAN_y = np.ones((self.batch_size, 1)) # Training the generator curr_generator_loss, curr_generator_accuracy = self.GAN_model.train_on_batch(GAN_X, GAN_y) self.gen_loss.append(curr_generator_loss) self.gen_acc.append(curr_generator_accuracy) # Boosting Step: # If one of the two (G and D) is weaker than the other -> it keeps training for another 3 iterations. # The idea is to close the gap between the discriminator and the generator as much as possible. for i in range(3): if self.gen_loss[-1] > self.disc_loss[-1]: GAN_X_1 = self._create_generator_inputs(self.batch_size/2) GAN_X_2 = self._create_generator_inputs(self.batch_size/2) GAN_X = np.concatenate((GAN_X_1, GAN_X_2)) GAN_y = np.ones((self.batch_size, 1)) self.GAN_model.train_on_batch(GAN_X, GAN_y) else: real_X, real_y = self._real_samples() generated_X, generated_y = self._generate_n_samples(self.batch_size/2) self.discriminator_model.train_on_batch(real_X, real_y) self.discriminator_model.train_on_batch(generated_X, generated_y) print(f"Finished epoch {epoch} with batch_size={self.batch_size}.\n" f"Generator-loss = {curr_generator_loss}\t" f"Generator-accuracy = {curr_generator_accuracy}\n" f"Discriminator-loss = {curr_avg_disc_loss}\t" f"Discriminator-accuracy = {curr_avg_disc_accuracy}\n") return curr_avg_disc_loss, curr_generator_loss def fit(self, X, patiance, y=None): """ Main fitting method of the model """ if (self.GAN_model == None): raise Exception("You should create the GAN model first (call create_model())") self.data = X not_improved = 0 for epoch in range(self.epochs): discriminator_loss, generator_loss = self._do_iteration(epoch) last_gen_loss = generator_loss last_disc_loss = discriminator_loss # Calculating the loss of the GAN as the sum of losses plus the gap between G and D. gap_factor = abs(last_gen_loss - last_disc_loss) curr_GAN_loss = last_gen_loss + last_disc_loss + gap_factor if curr_GAN_loss < self.best_GAN_loss: self.best_GAN_loss = curr_GAN_loss self.epoch_with_best_loss = epoch not_improved = 0 self.best_weights_GAN = self.GAN_model.get_weights() self.best_weights_discriminator = self.discriminator_model.get_weights() self.best_weights_generator = self.generator_model.get_weights() else: not_improved += 1 # Early-stop if not_improved > patiance: break # Loading the weights of the best model self.GAN_model.set_weights(self.best_weights_GAN) self.discriminator_model.set_weights(self.best_weights_discriminator) self.generator_model.set_weights(self.best_weights_generator) self.check_model_performance(100) print(f"*** Epoch with best weights: {self.epoch_with_best_loss} ***") return self def plot_loss_graph(self, loss, model_type, dataset): """ Plotting loss graph """ fig = plt.figure() title = f"Loss function of {model_type}" plt.plot(range(len(loss)), loss) plt.title = title plt.xlabel = "Epoch" plt.ylabel = "Loss" plt.savefig(fname=f"figures/{dataset}_{model_type}_loss") plt.close() def plot_acc_graph(self, acc, model_type, dataset): """ Plotting accuracy graph """ fig = plt.figure() title = f"Accuracy function of {model_type}" plt.plot(range(len(acc)), acc) plt.title = title plt.xlabel = "Epoch" plt.ylabel = "Accuracy" plt.savefig(fname=f"figures/{dataset}_{model_type}_acc") plt.close() def check_model_performance(self, n_samples): """ Evaluates the model's performance by testing the discriminator of some generated samples. """ # Generates N samples and run the discriminator on them. generated_samples = self._generate_n_samples(n_samples)[0] discriminator_pred = self.discriminator_model.predict(generated_samples) # Calculating the euclidean distance of each real sample from the rest of the real samples. real_euclidean_distances = [np.linalg.norm(self.data[i] - np.delete(self.data, [i], axis=0).mean(axis=0)) for i in range(len(self.data))] real_euclidean_dist_std = np.std(real_euclidean_distances) real_euclidean_dist_mean = np.mean(real_euclidean_distances) # Constants for the loop global_minimal_euclidean_dist = np.inf best_fooled_sample = None global_maximal_euclidean_dist = -1 worst_fooled_sample = None tuples = [] for i in range(len(generated_samples)): minimal_euclidean_dist = np.inf closest_real_sample = None ind_of_closest_real_sample = None # For each generated samples, check if fooled the discriminator. if discriminator_pred[i] > 0.5: disc_fooled = 1 else: disc_fooled = 0 # In addition, find its closest real sample for j in range(len(self.data)): current_euclidean_dist = np.linalg.norm(generated_samples[i] - self.data[j]) if current_euclidean_dist < minimal_euclidean_dist: minimal_euclidean_dist = current_euclidean_dist closest_real_sample = self.data[j] ind_of_closest_real_sample = j # Calculates its distance from the rest (euclidean distance) generated_dist_from_all_real = np.linalg.norm(generated_samples[i] - self.data.mean(axis=0)) all_data_wo_closest = np.delete(self.data, [ind_of_closest_real_sample], axis=0) all_data_wo_closest_mean = all_data_wo_closest.mean(axis=0) real_euclidean_dist_from_mean = np.linalg.norm(closest_real_sample - all_data_wo_closest_mean) # Data structure to store all this data for each generated sample tup = (generated_samples[i], closest_real_sample, generated_dist_from_all_real, minimal_euclidean_dist, real_euclidean_dist_from_mean, disc_fooled) tuples.append(tup) if disc_fooled and minimal_euclidean_dist < global_minimal_euclidean_dist: global_minimal_euclidean_dist = minimal_euclidean_dist best_fooled_sample = tup elif not disc_fooled and minimal_euclidean_dist > global_maximal_euclidean_dist: global_maximal_euclidean_dist = minimal_euclidean_dist worst_fooled_sample = tup total_fooled = sum([tup[5] for tup in tuples]) print("***************************************************************************************************") print("***************************************************************************************************") print(f"Percentage of generated samples that fooled the Discriminator: {(total_fooled / n_samples) * 100}%\n") print(f"Mean euclidean distance of real samples: {real_euclidean_dist_mean}\n") print(f"Standard-Deviation of euclidean distances of real samples: {real_euclidean_dist_std}\n") print(f"Example of a normalized sample that fooled the Discriminator: {best_fooled_sample[0]}\nClosest real-sample: {best_fooled_sample[1]}\n" f"Euclidean distance between generated and all real samples: {best_fooled_sample[2]}\n" f"Euclidean distance between generated and real: {best_fooled_sample[3]}\n" f"Euclidean distance between real and other real samples: {best_fooled_sample[4]}") if worst_fooled_sample != None: print(f"\nExample of a normalized sample that DID NOT fooled the Discriminator: {worst_fooled_sample[0]}\nClosest real-sample: {worst_fooled_sample[1]}\n" f"Euclidean distance between generated and all real samples: {worst_fooled_sample[2]}\n" f"Euclidean distance between generated and real: {worst_fooled_sample[3]}\n" f"Euclidean distance between real and other real samples: {worst_fooled_sample[4]}") print("***************************************************************************************************") print("***************************************************************************************************") return tuples
heatmap_prep.index.name = 'year' heatmap_prep.columns.name = 'month' fig, ax = plt.subplots() ax = sns.heatmap(heatmap_prep, cmap='Reds')
from PyQt5 import QtWidgets from bsp.leveleditor.math.Plane import Plane from bsp.leveleditor.mapobject.Solid import Solid from bsp.leveleditor.mapobject.SolidFace import SolidFace from bsp.leveleditor.mapobject.SolidVertex import SolidVertex from bsp.leveleditor import LEUtils class BaseBrush: Name = "Brush" CanRound = True def __init__(self): self.controls = [] self.controlsGroup = QtWidgets.QGroupBox(self.Name + " Options") self.controlsGroup.setLayout(QtWidgets.QFormLayout()) def addControl(self, ctrl): if ctrl.label: self.controlsGroup.layout().addRow(ctrl.label, ctrl.control) else: self.controlsGroup.layout().addRow(ctrl.control) self.controls.append(ctrl) return ctrl def create(self, generator, mins, maxs, material, roundDecimals, temp = False): raise NotImplementedError def makeSolid(self, generator, faces, material, temp = False, color = None): solid = Solid(generator.getNextID()) solid.setTemporary(temp) if color is not None: solid.setColor(color) for arr in faces: face = SolidFace(generator.getNextFaceID(), Plane.fromVertices(arr[0], arr[1], arr[2]), solid) face.setMaterial(material) for vert in arr: face.vertices.append(SolidVertex(vert, face)) solid.faces.append(face) face.alignTextureToFace() if temp: face.setPreviewState() face.generate() if not temp: solid.setToSolidOrigin() solid.generateFaces() solid.recalcBoundingBox() else: solid.reparentTo(base.render) return solid
import boto3 from datetime import datetime def create_s3_bucket(bucket_name): """ create s3 bucket """ conn = boto3.client('s3') response = conn.create_bucket( Bucket=bucket_name, ) # put objects to bucket response = conn.put_object( Bucket=bucket_name, Key='generateClassifier.py', Body='sample' ) # set lifecycle response = conn.put_bucket_lifecycle( Bucket='string', LifecycleConfiguration={ 'Rules': [ { 'Expiration': { 'Date': datetime(2019, 10, 1), 'Days': 10, 'ExpiredObjectDeleteMarker': True }, 'Prefix': 'abc', 'Status': 'Enabled', 'Transition': { 'Date': datetime(2019, 11, 1), 'Days': 10, 'StorageClass': 'GLACIER' }, 'NoncurrentVersionTransition': { 'NoncurrentDays': 10, 'StorageClass': 'GLACIER' }, 'NoncurrentVersionExpiration': { 'NoncurrentDays': 10 }, 'AbortIncompleteMultipartUpload': { 'DaysAfterInitiation': 10 } }, ] } ) create_s3_bucket('126e1f3dv')
import random import requests from bs4 import BeautifulSoup from imgurpython import ImgurClient def aime(key): client_id = 'c3e767d450a401e' client_secret = 'cdf5fb70e82bc00e65c0d1d1a4eed318ae82024c' client = ImgurClient(client_id,client_secret) if key == 'Aime' or key == 'aime': album = ['hLZwL','Qt8En'] i = random.randint(0, len(album) - 1) images = client.get_album_images(album[i]) index = random.sample(range(0, len(images)),5) else: album = 'hoBxs' #i = random.randint(0, len(album) - 1) images = client.get_album_images(album) index = random.sample(range(0, len(images)),5) imgurResult = [] for i in index: imageDict = dict() #imageDict['imageLink'] = images[i].link.replace('http', 'https') imageDict['imageLink'] = images[i].link description = images[i].description.split('http')[0].strip('\n') imageDict['title&price'] = description #imageDict['title'] = description.split('$')[0].strip() #imageDict['price'] = '$'+ description.split('$')[1].strip() imageDict['shopeeLink'] = images[i].description.split('$')[1][3:].strip() imgurResult.append(imageDict) return imgurResult
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure pdb is named as expected (shared between .cc files). """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp() CHDIR = 'compiler-flags' test.run_gyp('pdbname-override.gyp', chdir=CHDIR) test.build('pdbname-override.gyp', test.ALL, chdir=CHDIR) # Confirm that the pdb generated by the compiler was renamed (and we also # have the linker generated one). test.built_file_must_exist('compiler_generated.pdb', chdir=CHDIR) test.built_file_must_exist('linker_generated.pdb', chdir=CHDIR) test.pass_test()
def alternate_sq_sum(arr): return sum(a if i % 2 == 0 else a ** 2 for i, a in enumerate(arr))
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Nov 30 14:14:32 2019 @author: kongweizhen """ import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import plotly.graph_objs as go import pandas as pd df = pd.read_excel('https://s3.amazonaws.com/programmingforanalytics/NBA_data.xlsx') app = dash.Dash(__name__) app.css.append_css({ 'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css' }) options = numerical_features = ['James Harden','Paul George','Giannis Antetokounmpo','Joel Embiid','LeBron James','Stephen Curry','Kawhi Leonard','Devin Booker','Kevin Durant','Anthony Davis','Damian Lillard','Kemba Walker','Bradley Beal','Blake Griffin','Karl-Anthony Towns','Kyrie Irving','Donovan Mitchell','Zach LaVine','Russell Westbrook','Klay Thompson'] app.layout = html.Div( [ html.Label(["Player 1", dcc.Dropdown(id="my-dynamic-dropdown")]), html.Label( ["Player 2", dcc.Dropdown(id="my-multi-dynamic-dropdown", multi=True), ] ), ] ) @app.callback( dash.dependencies.Output("my-multi-dynamic-dropdown", "options"), [dash.dependencies.Input("my-multi-dynamic-dropdown", "search_value")], [dash.dependencies.State("my-multi-dynamic-dropdown", "value")], ) def update_multi_options(search_value, value): if not search_value: raise PreventUpdate # Make sure that the set values are in the option list, else they will disappear # from the shown select list, but still part of the `value`. return [ o for o in options if search_value in o["label"] or o["value"] in (value or []) ] if __name__ == "__main__": app.run_server(debug=True)
import translator class XorTranslator(translator.Translator): """One time pad translator""" def translate(self, cipher): if len(cipher) > len(self.key): return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(cipher[:len(self.key)], self.key)]) else: return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(cipher, self.key[:len(cipher)])]) def encode(self, cipher): return self.translate(cipher)
#!/usr/bin/env python # -*- coding:utf-8 -*- class General(Exception): pass class Specific1(General): pass class Specific2(General): pass def raiser0(): raise General() def raiser1(): raise Specific1() def raiser2(): raise Specific2() for func in (raiser0, raiser1, raiser2): try: func() except General: import sys print('caught: ', sys.exc_info()[0]) for func in (raiser0, raiser1, raiser2): try: func() except General as err: print('caught: ', err.__class__)
# -*- coding: utf-8 -*- # @Time : 2018/12/28 11:03 # @Author : Monica # @Email : 498194410@qq.com # @File : basepage.py from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from Common.my_log import MyLog from datetime import datetime from time import strftime from Common.project_path import screenshot_path import time import win32gui import win32con # 封装基本函数 - 执行日志、异常处理、失败截图 # 所有的页面公共的部分(基本操作) class BasePage: def __init__(self, driver): self.driver = driver # 等待元素可见 def wait_eleVisible(self, locator, times=30, poll_frequency=0.5, doc=""): """ :param locator: 元素定位。元组形式(元素定位类型、元素定位方式) :param times: 超时前的秒数 :param poll_frequency: 轮询频率,默认情况下,为0.5秒。 :param doc: 模块名_页面名称_操作名称 :return: None """ MyLog().my_log("INFO", "{}等待元素{}可见".format(doc, locator)) try: # 开始等待的时间 start = datetime.now() WebDriverWait(self.driver, times, poll_frequency).until(EC.visibility_of_element_located(locator)) # 结束等待的时间 end = datetime.now() # 求一个差值写在日志当中,等待了多久,单位毫秒 MyLog().my_log("INFO", "元素等待结束,等待时间为:{}".format((end - start).microseconds)) time.sleep(0.5) return True except: MyLog().my_log("exception", "等待元素可见失败!!!") # 截图 self.save_current_screenshot(doc) return False # 等待元素存在 def wait_elePresence(self, locator, times=30, poll_frequency=0.5, doc=""): MyLog().my_log("INFO", "{}等待元素{}存在".format(doc, locator)) try: WebDriverWait(self.driver, times, poll_frequency).until(EC.presence_of_element_located(locator)) return True except: MyLog().my_log("exception", "等待元素存在失败!!!") # 截图 self.save_current_screenshot(doc) return False # 查找元素 def get_element(self, locator, doc=""): MyLog().my_log("INFO", "{}查找元素:{}".format(doc, locator)) try: return self.driver.find_element(*locator) except: MyLog().my_log("exception", "查找元素失败!!!") # 截图 self.save_current_screenshot(doc) raise # 查找多个元素 def get_elements(self, locator, doc=""): MyLog().my_log("INFO", "{}查找元素:{}".format(doc, locator)) try: return self.driver.find_elements(*locator) except: MyLog().my_log("exception", "查找元素失败!!!") # 截图 self.save_current_screenshot(doc) raise # 点击操作 def click_element(self, locator, doc=""): MyLog().my_log("INFO", "{}点击元素:{}".format(doc, locator)) # 找到要点击的元素 ele = self.get_element(locator) try: ele.click() except: MyLog().my_log("exception", "点击元素失败!!!") # 截图 self.save_current_screenshot(doc) raise # 清空内容 def clear_content(self,locator, doc=""): MyLog().my_log("INFO", "清空输入框内的数据") try: time.sleep(0.5) self.get_element(locator).clear() except: MyLog().my_log("exception", "清空失败!!!") # 截图 self.save_current_screenshot(doc) raise # 输入操作 def input_text(self, locator, text, doc=""): MyLog().my_log("INFO", "{}输入元素:{}".format(doc, locator)) # 找到输入框的元素 ele = self.get_element(locator) try: ele.send_keys(text) except: MyLog().my_log("exception", "元素输入失败!!!") # 截图 self.save_current_screenshot(doc) raise # 获取元素的文本内容 def get_text(self, locator, doc=""): MyLog().my_log("INFO", "获取{}元素的文本内容".format(locator)) # 找到指定的元素 ele = self.get_element(locator) try: # 获取属性值 return ele.text except: MyLog().my_log("exception", "获取元素的文本内容失败!!!") # 截图 self.save_current_screenshot(doc) raise # 获取元素的属性值 def get_element_attribute(self, locator, attribute_text, doc=""): MyLog().my_log("INFO", "获取{}元素的属性".format(locator)) # 找到指定的元素 ele = self.get_element(locator) try: # 获取属性值 return ele.get_attribute(attribute_text) except: MyLog().my_log("exception", "获取元素的属性失败!!!") # 截图 self.save_current_screenshot(doc) raise # alert处理 def alert_action(self, action='accept', doc=""): MyLog().my_log("INFO", "等待alert弹出框出现!") # 等待alert出现 WebDriverWait(self.driver, 10).until(EC.alert_is_present()) try: # alert切换 不是html页面 alert = self.driver.switch_to.alert if action == "accept": # 点击确认 alert.accept() elif action == "dismiss": # 点击取消 alert.dismiss() elif action == "text": # 获取弹窗框里的文字 return alert.text except: MyLog().my_log("exception", "alert处理执行失败!!!") # 截图 self.save_current_screenshot(doc) raise # iframe切换 def switch_iframe(self, locator, iframe_reference="enter", doc=""): try: if iframe_reference == "enter": # 切入iframe WebDriverWait(self.driver, 10).until(EC.frame_to_be_available_and_switch_to_it(locator)) time.sleep(0.5) elif iframe_reference == "out_main": # 切出iframe,回到默认主页面 self.driver.switch_to.default_content() elif iframe_reference == "out_up": # 切出iframe,返回父级页面(上一级) self.driver.switch_to.parent_frame() except: MyLog().my_log("exception", "iframe_{}切换失败!!!".format(iframe_reference)) # 截图 self.save_current_screenshot(doc) raise # 上传操作 def upload_file(filepath, title="打开"): """ :param filepath:需要上传的文件路径 :param title: 弹窗窗口名,默认谷歌浏览器“打开”,火狐为“文件上传” :return: None """ # 一级窗口 dialog = win32gui.FindWindow("#32770", title) # 二级窗口 comboxex32 = win32gui.FindWindowEx(dialog, 0, "ComboBoxEx32", None) # 三级窗口 combox = win32gui.FindWindowEx(comboxex32, 0, "ComboBox", None) # 文本的输入窗口 - 四级 edit = win32gui.FindWindowEx(combox, 0, "Edit", None) # 打开按钮 - 二级窗口 button = win32gui.FindWindowEx(dialog, 0, "Button", "打开(&O)") # 输入文件地址 win32gui.SendMessage(edit, win32con.WM_SETTEXT, None, filepath) # 发送文件路径 # 点击 打开按钮 提交文件 win32gui.SendMessage(dialog, win32con.WM_COMMAND, 1, button) # 滚动条处理 # 窗口切换 # 截图 def save_current_screenshot(self, name): # 图片名称:模块名_页面名称_操作名称_年-月-日_时分秒.png file_name = screenshot_path + "\\" + "{}_{}.png".format(name, '%s' % strftime('%Y-%m-%d_%H_%M_%S')) self.driver.save_screenshot(file_name) MyLog().my_log("INFO", "截取网页成功,文件路径为:{}".format(file_name)) # 断言 def assert_common(self, assert_sentence, doc="断言失败"): try: assert_sentence except: MyLog().my_log("exception", "断言失败!!!") # 截图 self.save_current_screenshot(doc) raise
# -*- coding: utf-8 -*- import connection import users import logging if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) con = connection.getConnection() try: for (uid, version) in users.UserDAO.findAll(con): logging.info('obteniendo mails del usuario : {}'.format(uid)) mails = users.MailDAO.findAll(con, uid) for m in mails: logging.info(m.__dict__) finally: connection.closeConnection(con)
from select_factor import get_factors from mylib import get_data from mylib import get_data_fromDB #从数据库中获取数据 from mylib import train_test_split from label_generator import generate_label from mltool import method from sklearn.metrics import classification_report filename='E:/300354.csv' #使用文件 tablename='stock_'+'300354' #使用数据库 #high,low,dopen,close,vol=get_data(filename) high,low,dopen,close,vol=get_data_fromDB(tablename) #high类型: ndarry 维数1 datasets=get_factors(high,low,close,vol,BBANDS=True,DEMA=True,EMA=True,AD=True,ADOSC=True,OBV=True) train_data,test_data=train_test_split(datasets) label=generate_label(close,sign=1) train_label,test_label=train_test_split(label) method[5].fit(train_data,train_label) pred=method[5].predict(test_data) print(classification_report(test_label,pred))
#!/usr/bin/python import sys from candy_split import * if len(sys.argv) == 1: filename = "sample.txt" else: filename = sys.argv[1] run(filename)
# -*- coding: utf-8 -*- from collections import Counter class Solution: def findPairs(self, nums, k): if k < 0: return 0 elif k == 0: return len([_ for _, count in Counter(nums).items() if count > 1]) result = 0 nums_set = set(nums) for num in nums_set: if num + k in nums_set: result += 1 return result if __name__ == "__main__": solution = Solution() assert 2 == solution.findPairs([3, 1, 4, 1, 5], 2) assert 4 == solution.findPairs([1, 2, 3, 4, 5], 1) assert 1 == solution.findPairs([1, 3, 1, 5, 4], 0)
#coding:utf-8 from flask import render_template, redirect, url_for, session,\ flash from flask.ext.login import login_required, current_user from . import main from ..decorators import permission_required from ..models import Permission, Role, Follow, User, db, Article @main.route('/',methods=['GET']) def index(): articles = Article.query.order_by(Article.publish_time.desc()).all()[:5] users = User.query.order_by(User.last_login_time.desc()).all()[:5] return render_template('index.html') @main.app_errorhandler(403) def permission_denied(e): return render_template('403.html'),403 @main.app_errorhandler(404) def page_not_found(e): return render_template('404.html'),404 @main.app_errorhandler(413) def file_too_large(e): return render_template('413.html'),413 @main.app_errorhandler(500) def server_error(e): return render_template('500.html'),500
companies = [ "kellogg co.", "ihs markit ltd.", "tripadvisor", "toray industries", "advantest", "principal financial group", "kimberly-clark", "citrix systems", "asahi glass", "dainippon screen", "salesforce.com", "baxter international inc.", "netapp", "johnson controls international", "fidelity national information services", "fifth third bancorp", "discovery inc. class a", "varian medical systems", "tokyo gas", "concho resources", "facebook", "maxim integrated products inc", "t.co", "ajinomoto", "hilton worldwide holdings inc", "keysight technologies", "charles schwab corporation", "nissan motor", "isuzu motors", "konica minolta holdings", "seiko epson", "groupon", "royal caribbean cruises ltd", "carnival corp.", "keisei electric railway", "arthur j. gallagher & co.", "d. r. horton", "tokio marine holdings", "intl flavors & fragrances", "pentair plc", "american airlines group", "kobe steel", "unitedhealth group", "suzuki motor", "intel corp.", "stanley black & decker", "pfizer", "sumitomo heavy industries", "naver", "goldman sachs", "kyocera", "flir systems", "expeditors", "coty, inc", "news corp. class b", "jingdong mall", "costco wholesale corp.", "applied materials inc.", "bloomberg l.p.", "borgwarner", "equity residential", "maruha nichiro holdings", "mizuho financial group", "ameren corp", "cme group inc.", "facebook, inc.", "intuit inc. ", "leggett & platt", "medtronic plc", "cadence design systems", "pnc financial services", "visa inc.", "discover financial services", "mondelez international", "fuji electric holdings", "vk", "bristol-myers squibb", "mcdonald's", "westrock", "haseko corp.", "tokyu", "cardinal health inc.", "secom", "henry schein", "windows live", "lilly (eli) & co.", "aliexpress", "emerson electric company", "public storage", "mattel inc.", "symantec corp.", "oneok", "google hong kong", "hess corporation", "sealed air", "capri holdings", "wellcare", "alphabet inc class a", "huntington ingalls industries", "general electric", "l brands inc.", "takeda pharmaceutical company", "ansys", "paypal", "cabot oil & gas", "odakyu electric railway", "dowdupont", "nec", "lam research", "weyerhaeuser", "avery dennison corp", "sl green realty", "twenty-first century fox class b", "ameriprise financial", "mylan n.v.", "snap-on", "delta air lines inc.", "booking holdings inc", "intuitive surgical inc.", "spotify", "vertex pharmaceuticals inc", "toho zinc", "united continental holdings", "merck & co.", "blackrock", "lennar corp.", "agilent technologies inc", "alliant energy corp", "yamaha motor", "paccar inc.", "fujifilm holdings", "kajima", "toyota tsusho", "lowe's cos.", "kikkoman", "pornhub", "matsui securities", "danaher corp.", "nielsen holdings", "resona holdings", "svb financial", "s&p global, inc.", "monster beverage", "davita inc.", "textron inc.", "nitto denko", "home depot", "corning inc.", "tyson foods", "nextera energy", "grainger (w.w.) inc.", "ametek inc.", "discovery inc. class c", "amada co. ltd. ", "shin-etsu chemical", "seven & i holdings", "thermo fisher scientific", "international business machines", "united rentals, inc.", "denso", "tokyo electron", "j. front retailing", "willis towers watson", "chugai pharmaceutical", "laboratory corp. of america holding", "duke energy", "valero energy", "google germany", "nippon paper group, inc.", "concordia financial group", "dover corp.", "sky perfect jsat holdings inc.", "skyworks solutions", "anthem inc.", "cboe global markets", "hino motors", "centerpoint energy", "analog devices, inc.", "simon property group inc", "fanuc", "wabtec corporation", "honda motor", "republic services inc", "fastenal co", "itochu", "edison int'l", "wells fargo", "hitachi", "google france", "consolidated edison", "charter communications", "nvidia corporation", "union pacific", "pepsico inc.", "casio computer", "walmart", "cognizant technology solutions", "kubota", "sumitomo", "digital realty trust inc", "illinois tool works", "walgreens boots alliance", "waste management inc.", "msci inc", "toyo seikan kaisha", "isetan mitsukoshi holdings", "v.f. corp.", "lkq corporation", "amazon.com inc.", "paychex inc.", "cisco systems", "kroger co.", "oji paper", "t. rowe price group", "komatsu", "sekisui house", "cvs health", "mitsui mining & smelting", "molson coors brewing company", "assurant", "chevron", "furukawa electric", "lockheed martin corp.", "mitsui engineering & shipbuilding", "mitsubishi ufj financial group", "raytheon co.", "automatic data processing", "gs yuasa", "netflix", "csx corp.", "jacobs engineering group", "uber", "regeneron", "furukawa", "nippon steel", "american international group", "martin marietta materials", "phillips 66", "tencent", "everest re group ltd.", "daikin industries", "polo ralph lauren corp.", "yahoo! japan", "realty income corporation", "canon", "airbnb", "tokai carbon", "tokyo electric power", "jxtg holdings", "sumco", "amazon", "fortune brands home & security", "entergy corp.", "ebay", "synchrony financial", "global payments inc.", "noble energy inc", "sumitomo chemical", "alps electric", "yahoo japan", "a.o. smith corp", "affiliated managers group inc", "resmed", "arconic inc.", "mgm resorts international", "nsk", "shionogi", "dish network", "amp project", "perrigo", "hcp inc.", "twenty-first century fox class a", "fluor corp.", "general motors", "dowa holdings", "tosoh", "j. b. hunt transport services", "the travelers companies inc.", "mitsui o.s.k. lines", "nippon meat packers", "dte energy co.", "sempra energy", "instagram", "ntn", "caterpillar", "goldman sachs group", "workday", "evergy", "dai nippon printing", "metlife inc.", "becton dickinson", "flipkart", "amgen inc.", "familymart uny holdings", "pioneer natural resources", "toto ltd.", "intercontinental exchange", "chubu electric power", "the home depot", "tokyo dome", "livejasmin", "abbott laboratories", "showa denko", "toho", "haosou", "ppg industries", "aes corp", "sumitomo mitsui financial group", "hca holdings", "exxon mobil corp.", "microsoft office", "unitika", "american electric power", "northrop grumman corp.", "kyowa hakko kirin", "sumitomo dainippon pharma", "minebea", "eastman chemical", "pinnacle west capital", "verisk analytics", "github", "fortinet", "deere & co.", "humana inc.", "ebay inc.", "honeywell int'l inc.", "shinsei bank", "verizon", "kddi", "sumitomo metal mining", "hasbro inc.", "american express co", "hollyfrontier corp", "xcel energy inc", "carmax inc", "credit saison", "yahoo!", "terumo", "nektar therapeutics", "advance auto parts", "jm smucker", "public serv. enterprise inc.", "linde plc", "sherwin-williams", "united parcel service", "united health group inc.", "mettler toledo", "united technologies", "t&d holdings", "target corp.", "hp inc.", "ebara", "nrg energy", "marui group", "inpex", "denki kagaku kogyo", "zoetis", "mitsubishi estate", "te connectivity ltd.", "nichirei", "boeing company", "lamb weston holdings inc", "kirin brewery", "chiyoda", "all nippon airways", "helmerich & payne", "sumitomo mitsui trust holdings", "air products & chemicals inc", "f5 networks", "sojitz", "whirlpool corp.", "alphabet inc.", "marubeni", "campbell soup", "microsoft corp.", "xnxx", "ms&ad insurance group", "transdigm group", "fortive corp", "nissan chemical industries", "google brazil", "wynn resorts ltd", "o'reilly automotive", "garmin ltd.", "iqvia holdings inc.", "take-two interactive", "eisai", "google", "meituan-dianping", "3m", "meiji holdings", "berkshire hathaway", "bank of america corp", "copart inc", "taiheiyo cement", "ngk insulators", "parker-hannifin", "aptiv plc", "yamato holdings", "baidu", "l-3 communications holdings", "caterpillar inc.", "halliburton co.", "cbre group", "procter & gamble", "ford motor", "taiyo yuden", "unum group", "mccormick & co.", "sumitomo osaka cement", "shenma", "waters corporation", "the cooper companies", "booking", "mitsubishi motors", "aeon", "odnoklassniki", "host hotels & resorts", "morgan stanley", "eog resources", "microchip technology", "verizon communications", "astellas pharma", "oki electric industry", "adobe systems inc", "ipg photonics corp.", "nippon express", "cms energy", "nikon", "mitsubishi", "netflix inc.", "general mills", "perkinelmer", "google india", "stack overflow", "atmos energy corp", "porn555", "japan tobacco", "pfizer inc.", "xylem inc.", "youtube", "marathon oil corp.", "nippon suisan kaisha", "wikipedia", "southern co.", "norwegian cruise line", "cintas corporation", "viacom inc.", "sina weibo", "welltower inc.", "visa", "e*trade", "expedia group", "suntrust banks", "arista networks", "yamaha", "sony financial", "linkedin", "xhamster", "obayashi", "activision blizzard", "citizens financial group", "american tower corp.", "dollar tree", "3m company", "avalonbay communities, inc.", "panasonic", "equifax inc.", "conocophillips", "technipfmc", "microsoft", "hitachi zōsen", "cf industries holdings inc", "gartner inc", "qorvo", "advanced micro devices inc", "micron technology", "lyondellbasell", "nike", "regency centers corporation", "gap inc.", "mastercard inc.", "quest diagnostics", "sogou", "flowserve corporation", "edwards lifesciences", "rollins inc.", "red hat inc.", "mitsubishi materials", "dxc technology", "autodesk inc.", "sapporo holdings", "jefferies financial group", "fedex corporation", "fukuoka financial group", "jtekt", "citigroup inc.", "first republic bank", "kansas city southern", "kawasaki heavy industries", "sina corp", "gilead sciences", "nippon kayaku", "okuma holdings", "genuine parts", "estee lauder cos.", "edreams odigeo", "juniper networks", "jpmorgan chase & co.", "nisshinbo holdings", "albemarle corp", "ntt docomo", "jack henry & associates inc", "stryker corp.", "tractor supply company", "xerox", "mazda motor", "mohawk industries", "imdb", "starbucks corp.", "kohl's corp.", "sumitomo electric industries", "essex property trust, inc.", "newell brands", "diamondback energy", "aon plc", "pulte homes inc.", "franklin resources", "zions bancorp", "williams cos.", "hitachi construction machinery", "jgc corporation", "accuweather", "xilinx", "ulta beauty", "harris corporation", "asahi breweries", "b2w", "ibm", "block h&r", "nippon electric glass", "apache corporation", "google spain", "google italy", "schlumberger ltd.", "samsung", "biogen inc.", "nordstrom", "kraft heinz co", "nippon sheet glass", "western union co", "travelers", "walt disney", "southwest airlines", "toppan printing", "trend micro", "apartment investment & management ", "the bank of new york mellon corp.", "jpmorgan chase", "freeport-mcmoran inc.", "constellation brands", "citizen holdings", "comerica inc.", "nippon yusen", "northern trust corp.", "idexx laboratories", "pioneer", "otsuka holdings co.", "mitsui", "brighthouse financial inc", "mcdonald's corp.", "nasdaq, inc.", "nisshin seifun group", "cincinnati financial", "coca-cola company", "kuraray", "sysco corp.", "intel", "robert half international", "softbank", "brown-forman corp.", "fleetcor technologies inc", "mitsui chemicals", "mitsui fudosan", "boeing", "jfe holdings", "bitly", "pvh corp.", "konami", "toyobo", "duke realty corp", "mckesson corp.", "exelon corp.", "dentsu", "alaska air group inc", "torchmark corp.", "allegion", "centene corporation", "nisshin steel", "the clorox company", "interpublic group", "broadridge financial solutions", "american express", "quanta services inc.", "comcast corp.", "exxonmobil", "fujitsu", "shizuoka bank", "allstate corp", "nomura holdings", "apple", "hewlett packard enterprise", "alphabet inc class c", "takara holdings", "yandex", "tencent qq", "bridgestone", "ecolab inc.", "boston properties", "electronic arts", "aozora bank", "east japan railway company", "udr inc", "invesco ltd.", "osaka gas", "total system services", "illumina inc", "dai-ichi life", "tiffany & co.", "national oilwell varco inc.", "regions financial corp.", "incyte", "ihi", "alipay", "fiserv inc", "ingersoll-rand plc", "tokuyama corporation", "marathon petroleum", "ball corp", "boston scientific", "roper technologies", "progressive corp.", "cbs corp.", "pinterest", "loews corp.", "state street corp.", "iron mountain incorporated", "showa shell sekiyu", "twitter, inc.", "mitsubishi heavy industries", "west japan railway", "taobao", "reddit", "bing", "texas instruments", "amazon japan", "hanesbrands inc", "under armour class a", "yum! brands inc", "amerisourcebergen corp", "at&t inc.", "teijin", "asahi kasei", "daiwa house industry", "chevron corp.", "eaton corporation", "cummins inc.", "the mosaic company", "tmall", "olympus", "takashimaya", "whatsapp", "moody's corp", "rakuten", "news corp. class a", "dentsply sirona", "google japan", "newmont mining corporation", "american water works company inc", "asos.com", "sompo holdings", "kansai electric power", "dena", "celgene corp.", "broadcom", "harley-davidson", "zimmer biomet holdings", "people's united financial", "anadarko petroleum corp", "twitch", "extra space storage", "tapestry, inc.", "baker hughes, a ge company", "apple inc.", "archer-daniels-midland co", "alexion pharmaceuticals", "m&t bank corp.", "sohu", "abbvie inc.", "raymond james financial inc.", "cigna corp.", "darden restaurants", "western digital", "kawasaki kisen kaisha", "hartford financial svc.gp.", "vornado realty trust", "equinix", "yokogawa electric", "fast retailing", "align technology", "dominion energy", "tdk", "mail.ru", "mid-america apartments", "macerich", "shiseido", "kimco realty", "chubb limited", "daiichi sankyo", "tokyo tatemono", "keio", "japan post holdings", "sba communications", "norfolk southern corp.", "nucor corp.", "mitsubishi logistics", "ross stores", "ube industries", "coca-cola", "international paper", "ntt data", "mitsubishi chemical holdings", "yokohama rubber", "universal health services, inc.", "colgate-palmolive", "quora", "fujikura", "abiomed inc", "prologis", "ricoh", "autozone inc", "rockwell automation inc.", "msn", "celanese corp.", "qualcomm inc.", "verisign inc.", "blogspot", "prudential financial", "fmc corporation", "eversource energy", "xvideos", "capital one financial", "yaskawa electric corporation", "google turkey", "general dynamics", "subaru", "bb&t corporation", "synopsys inc.", "twitter", "kao corp.", "google uk", "japan steel works", "chiba bank", "recruit holdings", "oracle corp.", "the walt disney company", "packaging corporation of america", "cimarex energy", "merck & company", "lincoln national", "hologic", "akamai technologies inc", "netease", "google russia", "daiwa securities group", "federal realty investment trust", "jd.com", "dollar general", "allergan, plc", "hormel foods corp.", "tjx companies inc.", "comsys holdings", "mitsubishi electric", "foot locker inc", "sony", "church & dwight", "crown castle international corp.", "conagra brands", "tobu railway", "devon energy", "the hershey company", "mail.ru", "accenture plc", "masco corp.", "teleflex inc", "u.s. bancorp", "taisei", "marsh & mclennan", "aflac inc", "alliance data systems", "firstenergy corp", "wec energy group inc", "sumitomo realty & development", "shimizu", "johnson & johnson", "motorola solutions inc.", "vulcan materials", "ppl corp.", "alibaba", "philip morris international", "nisource inc.", "keycorp", "expedia", "kla-tencor corp.", "c. h. robinson worldwide", "macy's inc.", "best buy co. inc.", "toyota motor", "zalando", "seagate technology", "nippon light metal", "cerner", "occidental petroleum", "tokyu land", "altria group inc", "csdn", "amphenol corp", "alexandria real estate equities", "huntington bancshares", "pacific metals", "under armour class c", "nippon telegraph and telephone", "ventas inc", "kinder morgan", "chipotle mexican grill", "omnicom group", "central japan railway company", "marriott int'l.", "centurylink inc" ]
import numpy as np from scipy import stats note_status=['A3', 'A-3', 'A--3', 'A#3', 'A##3', 'B3', 'B-3', 'B--3', 'B#3', 'B##3', 'C3', 'C-3', 'C--3', 'C#3', 'C##3', 'D3', 'D-3', 'D--3', 'D#3', 'D##3', 'E3', 'E-3', 'E--3', 'E#3', 'E##3', 'F3', 'F-3', 'F--3', 'F#3', 'F##3', 'G3', 'G-3', 'G--3', 'G#3', 'G##3', 'A4', 'A-4', 'A--4', 'A#4', 'A##4', 'B4', 'B-4', 'B--4', 'B#4', 'B##4', 'C4', 'C-4', 'C--4', 'C#4', 'C##4', 'D4', 'D-4', 'D--4', 'D#4', 'D##4', 'E4', 'E-4', 'E--4', 'E#4', 'E##4', 'F4', 'F-4', 'F--4', 'F#4', 'F##4', 'G4', 'G-4', 'G--4', 'G#4', 'G##4', 'A5', 'A-5', 'A--5', 'A#5', 'A##5', 'B5', 'B-5', 'B--5', 'B#5', 'B##5', 'C5', 'C-5', 'C--5', 'C#5', 'C##5', 'D5', 'D-5', 'D--5', 'D#5', 'D##5', 'E5', 'E-5', 'E--5', 'E#5', 'E##5', 'F5', 'F-5', 'F--5', 'F#5', 'F##5', 'G5', 'G-5', 'G--5', 'G#5', 'G##5'] interval_status = ["whole", "half", "quarter", "eighth", "16th", "32nd", "64th"] def srocc(output, target): return stats.spearmanr(output, target)[0] def evaluate(note_gen, interval_gen, note_ori, interval_ori): n,m=len(note_gen),len(note_ori) x=[note_status.index(note_gen[i])*6+interval_status.index(interval_gen[i]) for i in range(n)] y=[note_status.index(note_ori[i])*6+interval_status.index(interval_ori[i]) for i in range(m)] score=[srocc(x[i:i+m],y) for i in range(n-m+1)] score.sort(reverse=True) result=0.0 k=m for i in range(k): result+=score[i] cnt=0 for i in range(n-1): flag=1 for j in range(i+1,n-1): if(x[i]==x[j] and x[i+1]==x[j+1]): flag=0 if(flag): cnt+=1 for i in range(n-2): flag=1 for j in range(i+1,n-2): if(x[i]==x[j] and x[i+2]==x[j+2]): flag=0 if(flag): cnt+=1 sum=1 for i in range(n): for j in range(i+1,n): flag=1 for k in range(j-i): if(j+k>=n): break if(not x[i+k]==x[j+k]): flag=0 break if(flag): sum+=j-i return result*cnt/n/sum def evaluate2(note_gen, interval_gen, note_ori, interval_ori, note_ori2, interval_ori2): n,m,m2=len(note_gen),len(note_ori),len(note_ori2) x=[note_status.index(note_gen[i])*6+interval_status.index(interval_gen[i]) for i in range(n)] y=[note_status.index(note_ori[i])*6+interval_status.index(interval_ori[i]) for i in range(m)] z=[note_status.index(note_ori2[i])*6+interval_status.index(interval_ori2[i]) for i in range(m2)] if(m<m2): score=[-233]*(n-m+1) else: score=[-233]*(n-m2+1) for i in range(n-m+1): score[i]=srocc(x[i:i+m],y) for i in range(n-m2+1): val=srocc(x[i:i+m2],z) if(val>score[i]): score[i]=val score.sort(reverse=True) result=0.0 k=m+m2 for i in range(k): result+=score[i] cnt=0 for i in range(n-1): flag=1 for j in range(i+1,n-1): if(x[i]==x[j] and x[i+1]==x[j+1]): flag=0 if(flag): cnt+=1 for i in range(n-2): flag=1 for j in range(i+1,n-2): if(x[i]==x[j] and x[i+2]==x[j+2]): flag=0 if(flag): cnt+=1 sum=1 for i in range(n): for j in range(i+1,n): flag=1 for k in range(j-i): if(j+k>=n): break if(not x[i+k]==x[j+k]): flag=0 break if(flag): sum+=j-i return result*cnt/n/sum if __name__ == '__main__': note_list1 = ['G4','B-4','A4','B-4', 'G4','D4','A4','F#4', 'D4','G4','E-4','C4','A3', 'D4','B-3', 'G3', 'C4', 'A3', 'D4', 'B-3','A3','G3'] interval_list1 = ['quarter','eighth','eighth','quarter','eighth','eighth','quarter','eighth','eighth','half', 'quarter','eighth','eighth','quarter','eighth','eighth','eighth','eighth','quarter','quarter','eighth','eighth'] note_list2 = ['G4','B-4','A4','B-4', 'G4','D4','A4'] interval_list2 = ['quarter','eighth','eighth','quarter','eighth','eighth','quarter'] print(evaluate(note_list1,interval_list1,note_list2,interval_list2))
# Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file # for details. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. # """Markdown extension for xrefs and reference to other Markdown files. Xref is a reference of form [`symbol`][] or [text][`symbol`], where symbol is expected to be one of the following: * package:-scheme URI - it will be resolved using .packages file in the root directory * file path * C++ symbol - will be resolved through xref.json file (see README.md) Xrefs are converted to GitHub links. Additionally this extension retargets links pointing to markdown files to the html files produced from these markdown files. Usage: markdown.markdown(extensions=[XrefExtension()]) """ import json import logging import os import re import xml.etree.ElementTree as etree from typing import Dict, Optional from urllib.parse import urlparse from cpp_indexer import SymbolsIndex, load_index from markdown.extensions import Extension from markdown.inlinepatterns import InlineProcessor from markdown.treeprocessors import Treeprocessor class _XrefPattern(InlineProcessor): """Converts xrefs into GitHub links. Recognizes [`symbol`][] and [text][`symbol`] link formats where symbol is expected to be one of the following: * Fully qualified reference to a C++ class, method or function; * Package URI pointing to one of the packages included in the SDK checkout. * File reference to one of the file in the SDK. """ XREF_RE = r'\[`(?P<symbol>[^]]+)`\]?\[\]|\[(?P<text>[^]]*)\]\[`(?P<target>[^]]+)`\]' def __init__(self, md, symbols_index: SymbolsIndex, packages: Dict[str, str]): super().__init__(_XrefPattern.XREF_RE) self.symbols_index = symbols_index self.packages = packages self.md = md def handleMatch(self, m, data): text = m.group('text') symbol = m.group('symbol') if symbol is None: symbol = m.group('target') uri = self._resolve_ref(symbol) or '#broken-link' # Remember this xref. build process can later use this information # to produce xref reference section at the end of the markdown file. self.md.xrefs[f"`{symbol}`"] = uri # Create <a href='uri'>text</a> element. If text is not defined # simply use a slightly sanitized symbol name. anchor = etree.Element('a') anchor.attrib['href'] = uri anchor.attrib['target'] = '_blank' if text is not None: anchor.text = text else: code = etree.Element('code') code.text = re.sub(r'^dart::', '', symbol) anchor.append(code) # Replace the whole pattern match with anchor element. return anchor, m.start(0), m.end(0) def _resolve_ref(self, ref: str) -> Optional[str]: if ref.startswith('package:'): # Resolve as package uri via .packages. uri = urlparse(ref) (package_name, *path_to_file) = uri.path.split('/', 1) package_path = self.packages[package_name] if len(path_to_file) == 0: return self._make_github_uri(package_path) else: return self._make_github_uri( os.path.join(package_path, path_to_file[0])) elif os.path.exists(ref): # Resolve as a file link. return self._make_github_uri(ref) else: # Resolve as a symbol. loc = self.symbols_index.try_resolve(ref) if loc is not None: return self._make_github_uri(loc.filename, loc.lineno) logging.error('Failed to resolve xref %s', ref) return None def _make_github_uri(self, file: str, lineno: Optional[int] = None) -> str: """Generates source link pointing to GitHub""" fragment = f'#L{lineno}' if lineno is not None else '' return f'https://github.com/dart-lang/sdk/blob/{self.symbols_index.commit}/{file}{fragment}' class _MdLinkFixerTreeprocessor(Treeprocessor): """Redirects links pointing to .md files to .html files built from them.""" def run(self, root): for elem in root.iter('a'): href = elem.get('href') if href is None: continue parsed_href = urlparse(href) if parsed_href.path.endswith('.md'): elem.set( 'href', parsed_href._replace(path=parsed_href.path[:-3] + '.html').geturl()) class XrefExtension(Extension): """Markdown extension which handles xrefs and links to markdown files.""" symbols_index: SymbolsIndex packages: Dict[str, str] def __init__(self) -> None: super().__init__() self.symbols_index = load_index('xref.json') self.packages = XrefExtension._load_package_config() def extendMarkdown(self, md): md.xrefs = {} md.treeprocessors.register(_MdLinkFixerTreeprocessor(), 'mdlinkfixer', 0) md.inlinePatterns.register( _XrefPattern(md, self.symbols_index, self.packages), 'xref', 200) @staticmethod def _load_package_config() -> Dict[str, str]: # Load package_config.json file into a dictionary. with open('.dart_tool/package_config.json', encoding='utf-8') as package_config_file: package_config = json.load(package_config_file) return dict([(pkg['name'], os.path.normpath( os.path.join('.dart_tool/', pkg['rootUri'], pkg['packageUri']))) for pkg in package_config['packages'] if 'packageUri' in pkg])
from multiprocessing import Pool import time def down_load(movie_name): for i in range(5): print('电影:{},下载进度{}%'.format(movie_name,(i / 4 * 100))) time.sleep(1) return movie_name def alert(movie_name): print('恭喜{}下载完成了。。。'.format(movie_name)) if __name__ == '__main__': movie_lst = ['西红柿首富','功夫小子','功夫熊猫','叶问','功夫','战郎','红海行动'] pool = Pool(3) for movie_name in movie_lst: pool.apply_async(down_load,(movie_name,),callback=alert) pool.close() pool.join()
from django.conf.urls import url from . import views from django.contrib.auth.views import login app_name = 'books' urlpatterns = [ url(r'^$', views.index, name='index'), url(r'^register/$', views.UserFormView.as_view(), name='register'), url(r'^logout/$', views.Logout, name='logout'), url(r'^login/$', login, {'template_name': 'home/login.html'}, name='login'), ]
#!/usr/bin/env python3 hFile = open("/etc/passwd", "r") strfiletext = hFile.read() print(strfiletext) print("") print(type(strfiletext)) print("") print(f"len() function counts the amount of characters: {len(strfiletext)}") hFile.close() print("\nUse this technique to display the output of a file as a string and read the length of the string\n") hFile = open("/etc/passwd", "r") listfiletext = hFile.readlines() print(listfiletext) print("") print(type(listfiletext)) print("") print(f"len() function counts the amount of list items: {len(listfiletext)}") hFile.close() print("\nUse this technique when trying to display the output of a file in a list while counting the amount of list items\n") gecosCharacters = 0 with open('/etc/passwd', 'r') as passwdFile: try: while True: currentGecos = next(passwdFile) print(f"The value of currentGecos is:\n{currentGecos}") gecosCharacters = gecosCharacters + len(currentGecos) except StopIteration: print(f"The total number of characters is {gecosCharacters}") print("\nUse this technique when writing a password cracker or sorting through a file list\n")
from socket import * import time,os, random import boto from boto.s3.key import Key import pylibmc import random import sys class Server(): def __init__(self, cache_manager_address, client_address=('', 5000), maxClient=1): # Setup cache_manager_socket self.cache_manager_socket = socket(AF_INET, SOCK_STREAM) self.cache_manager_socket.connect(cache_manager_address) # Get special memcached instance that keeps track of the last 20% of keys self.cache_manager_socket.send("Get_special_memcached_instance") special_ip = self.cache_manager_socket.recv(1024).decode() if not special_ip: print "didn't get the special memcached ip" else: print "got special memcached ip" self.special_instance = pylibmc.Client([special_ip], binary=False, behaviors={"cas": True}) # Get cache machine IPs self.cache_list = [] self.GetCacheList() # connect to S3 self.conn = boto.connect_s3() self.bucket = self.conn.create_bucket('magicalunicorn') # Populate the memcached list self.memcached = [] for ip in self.cache_list: temp = pylibmc.Client([ip]) self.memcached.append(temp) self.special_instance[ip] = [] # Client hit and miss counter self.hits = 0 self.misses = 0 def GetCacheList(self): self.cache_manager_socket.send("Retrieve_cache_list") data = self.cache_manager_socket.recv(1024).decode() if not data: print "didn't get the list" else: print "got cache list" print data caches = data.split(",") new_cache_list = [] new_memcached = [] for cache in caches: new_cache_list.append(cache) if cache in self.cache_list: new_memcached.append(self.memcached[self.cache_list.index(cache)]) else: new_memcached.append(pylibmc.Client([cache])) self.special_instance[cache] = [] # Reassign the cache and memcached lists self.cache_list = new_cache_list self.memcached = new_memcached print self.cache_list def Get(self, key): # print key value = None deactivated_memcaches = [] # Contact all servers for mem in self.memcached: try: if mem.get(key): # found value for key #print "found key in caching layer" value = mem.get(key) self.hits = self.hits + 1 break except pylibmc.Error: # print "Removing memcache machine" deactivated_memcaches.append(mem) # Remove deactivated_memcaches from the cache list for deactivated_cache in deactivated_memcaches: # find index of memcached index = self.memcached.index(deactivated_cache) del self.memcached[index] del self.cache_list[index] if not value: # value not in caching layer # Randomly contact a memcached server to insert index = random.randint(0, len(self.memcached) - 1) cache_machine = self.memcached[index] # check if key exists in S3 possible_key = self.bucket.get_key(int(key)) # not sure of response when key does not exist in S3 if possible_key: # print key + "retrieved key from S3" value = possible_key.get_contents_as_string() # insert value into caching layer #cache_machine[str(key)] = value self.setMemcacheKey(cache_machine, str(key), value) # determine whether or not to perform self.KeepCacheKey(self.cache_list[index], key) # else: # print "key %s is not in S3" % key # increment miss counter self.misses = self.misses + 1 return value def KeepCacheKey(self, ip, key): # print "in keep cache key" keys= self.special_instance[str(ip)] #print keys keys.append(key) if len(keys) > 100: # remove keys until there is only 100 remove_index = len(keys) - 100 keys = keys[remove_index:] self.special_instance[ip] = keys def ConnectToNewCacheMachine(self, IpAddress): self.cache_list.append(IpAddress) self.memcached.append(pylibmc.Client([IpAddress])) def UpdateHitsMisses(self): # update hits and misses in the special instance and reset print self.special_instance set_misses = False while not set_misses: curr_miss_value = self.special_instance.gets("misses") set_misses = self.special_instance.cas("misses", self.misses + curr_miss_value[0], curr_miss_value[1]) set_hits = False while not set_hits: curr_hit_value = self.special_instance.gets("hits") set_hits = self.special_instance.cas("hits", self.hits + curr_hit_value[0], curr_hit_value[1]) # reset hits and misses counters to 0 self.hits = 0 self.misses = 0 def setMemcacheKey(self, client, key, value): value_set = False while not value_set: try: client[key] = value value_set = True except pylibmc.Error: pass Stupid = Server(('localhost', 5001)) counter = 0 with open('wifi_data_repeat_pattern_2_2_randomized.txt', 'r') as ins: start = time.time() for line in ins: # print list(line[:-2]) Stupid.Get(line[:-2]) # Update the cache list every 200 requests counter += 1 if counter % 50 == 0: print line print time.time() print "Updating the cache list" Stupid.GetCacheList() Stupid.UpdateHitsMisses() end = time.time() print end - start
from django.shortcuts import render from websocket import create_connection from .settings.base import * from django.http import HttpResponse from .models import * import json class ObjectDetailMixin: template = None def get(self, request): # bc = "WS_test" # ws = create_connection('ws://localhost:8000/') # ws.send("books?") # bc = str(ws.recv().format()) books_query = Book.objects.all().values() books = list(books_query) if not request.is_ajax(): return render(request, self.template, context={'phone_number': PHONE_NUMBER, 'e_mail': E_MAIL, 'daily_offer': DAILY_OFFER, 'title': TITLE, 'about': ABOUT, 'contacts': CONTACTS, 'address': ADDRESS, 'website': WEBSITE, 'book_count': '0', 'books': books}) else: return HttpResponse(json.dumps({'books': books}), content_type='application/json')
#!/usr/bin/env dls-python from pkg_resources import require require('numpy') require('cothread') import sys import numpy from tmbf import TMBF tmbf = TMBF(sys.argv[1]) # For ADC offsets should be enough to turn DAC output off and grab a single # ADC waveform. Note that we need to set the ADC skew to 0ns first (or else we # need to apply intelligence to our measurements), as the offsets are applied to # the raw input. tmbf.set_save('DAC:ENABLE_S', 'Off') tmbf.set_save('ADC:DELAY_S', '0 ns') current_offset = tmbf.get('ADC:OFFSET_S') # Configure DDR capture for single shot soft capture. tmbf.set('DDR:INPUT_S', 'ADC') tmbf.set('TRG:DDR:SEL_S', 'Soft 1') tmbf.set('TRG:S1:MODE_S', 'One Shot') # tmbf.set('TRG:S1:FIRE_S.PROC', 0) tmbf.s1.arm() adc = tmbf.get('DDR:SHORTWF') offsets = -numpy.round(adc.reshape(-1, 4).mean(0)) new_offset = current_offset + offsets print 'Setting offsets by', offsets, 'to', new_offset tmbf.set('ADC:OFFSET_S', new_offset) tmbf.restore_saved()
from cx_Freeze import setup, Executable setup( name = "21", version = "0.1", description = "Recognition", executables = [Executable("Qfile_uic.py")] )
from datetime import datetime from unittest.mock import sentinel import pytest from . import file_structure as module @pytest.fixture def mock_path_exists(mocker): return mocker.patch("os.path.exists") @pytest.fixture def mock_path_join(mocker): return mocker.patch("os.path.join") @pytest.fixture def mock_makedirs(mocker): return mocker.patch("os.makedirs") @pytest.fixture def mock_listdir(mocker): return mocker.patch("os.listdir") class TestCreateOuputDirectory: def test_returns_output_directory_path( self, mock_path_exists, mock_path_join, mock_makedirs ): mock_path_join.return_value = sentinel.expected_output_directory_path actual_output_directory_path = module.create_output_directory("/foo/bar", "baz") assert actual_output_directory_path == sentinel.expected_output_directory_path def test_creates_output_directory_if_doesnt_exist( self, mock_path_exists, mock_path_join, mock_makedirs ): mock_path_exists.return_value = False mock_path_join.return_value = sentinel.expected_output_directory_path module.create_output_directory("/foo/bar", "baz") mock_makedirs.assert_called_with(sentinel.expected_output_directory_path) def test_doesnt_create_output_directory_if_exists( self, mock_path_exists, mock_path_join, mock_makedirs ): mock_path_exists.return_value = True module.create_output_directory("/foo/bar", "baz") mock_makedirs.assert_not_called() def _mock_path_join(path, filename): return f"os-path-for-{path}/{filename}" @pytest.mark.parametrize( "name, directory_contents", [ ("returns filepaths", ["1.jpeg", "2.jpeg"]), ("sorts filepaths", ["2.jpeg", "1.jpeg"]), ("filters to extension", ["1.jpeg", "2.jpeg", "1.dng", "2.png"]), ], ) def test_get_files_with_extension( mock_listdir, mock_path_join, name, directory_contents ): mock_listdir.return_value = directory_contents mock_path_join.side_effect = _mock_path_join actual = module.get_files_with_extension("/foo/bar", ".jpeg") expected = ["os-path-for-/foo/bar/1.jpeg", "os-path-for-/foo/bar/2.jpeg"] assert actual == expected class TestIsoDatetimeForFilename: def test_returns_iso_ish_string(self): actual = module.iso_datetime_for_filename(datetime(2018, 1, 2, 13, 14, 15)) expected = "2018-01-02--13-14-15" assert actual == expected def test_result_length_matches_constant(self): actual = module.iso_datetime_for_filename(datetime(2018, 1, 2, 13, 14, 15)) assert len(actual) == module.FILENAME_TIMESTAMP_LENGTH class TestIsoDatetimeAndRestFromFilename: def test_returns_datetime(self): actual = module.datetime_from_filename( "2018-01-02--13-14-15-something-something.jpeg" ) expected = datetime(2018, 1, 2, 13, 14, 15) assert actual == expected class TestFilenameHasFormat: @pytest.mark.parametrize( "filename, truthiness", [ ("2018-01-02--13-14-15-something-something.jpeg", True), ("2018-01-02--13-aa-15-something-something.jpeg", False), ("2018-01-02--13-14-1-hi-hi.jpeg", False), ("prefix-2018-01-02--13-14-15something-something.jpeg", False), ], ) def test_filename_has_correct_datetime_format(self, filename, truthiness): assert module.filename_has_correct_datetime_format(filename) is truthiness class TestAlteringFilepath: def test_append_suffix_to_filepath_before_extension(self): actual = module.append_suffix_to_filepath_before_extension( "/dir/dir/image.jpeg", "_i_am_a_filepath_suffix" ) expected = "/dir/dir/image_i_am_a_filepath_suffix.jpeg" assert actual == expected def test_replace_extension(self): actual = module.replace_extension("/dir/dir/image.jpeg", ".tiff") expected = "/dir/dir/image.tiff" assert actual == expected
import path_utilities import numpy as np import extensions import path_utilities import sys import os # FIRST ARGUMENT: the root directory path (the path of the dataset) #=========1=========2=========3=========4=========5=========6=========7= ''' RETURNS: a list of the paths of every file in the directory "path". ''' def DFS(path): stack = [] all_paths = [] stack.append(path); while len(stack) > 0: # pop a path off the stack tmp = stack.pop(len(stack) - 1) # if this is a valid path. if(os.path.isdir(tmp)): # for every item in the "tmp" directory for item in os.listdir(tmp): # throws the path given by "tmp" + "item" onto the stack # for each "item". stack.append(os.path.join(tmp, item)) # if it's not a valid path but it IS a valid file. elif(os.path.isfile(tmp)): # "tmp" format: '/home/ljung/shit/shitposting/' all_paths.append(tmp) return all_paths #=========1=========2=========3=========4=========5=========6=========7= ''' RETURNS: a dictionary which maps extension names of the form "csv" to lists of the full paths of files with those extensions in the dataset.''' def extension_indexer(dataset_path, n, write_path): allpaths = DFS(dataset_path) # a list of all the filenames (without paths) filenames = [] for path in allpaths: filenames.append(path_utilities.get_fname_from_path(path)) filenames_no_ext, exts = extensions.remove_all_extensions(filenames) sorted_tuple = extensions.count_and_sort_exts(exts, n, write_path, dataset_path) sorted_exts, sorted_counts = sorted_tuple top_n_exts = sorted_exts#UNCOMMENT FOR ONLY CONVERTING TOP N EXTS [:n] # makes a dictionary key for each of the top extensions ext_locations = {} for extension in top_n_exts: ext_locations.update({extension:[]}) # checks every file and saves the paths of those with the top extensions # in a dict called "ext_locations" for fp in allpaths: fn = path_utilities.get_fname_from_path(fp) if fn[:2]!="._": ext = path_utilities.get_single_extension(fn) if ext in top_n_exts: ext_list = ext_locations.get(ext) ext_list.append(fp) ext_locations.update({ext:ext_list}) dataset_name = path_utilities.get_last_dir_from_path(dataset_path) ext_write_path = os.path.join(write_path, "extension_index_" + dataset_name + ".npy") np.save(ext_write_path, ext_locations) return ext_locations #=========1=========2=========3=========4=========5=========6=========7= def main(): dataset_path = sys.argv[1] num_slices = sys.argv[2] # allpaths = DFS(dataset_path) '''os.chdir(dataset_path) f1 = open(dataset_path + "transformed_paths.txt","w") f1.write(str(allpaths)) f1.close()''' if __name__ == "__main__": # stuff only to run when not called via 'import' here main()
from locale import setlocale, LC_ALL, format_string from textwrap import wrap from PySide2.QtCore import Qt from PySide2.QtGui import QIcon from PySide2.QtWidgets import QWidget, QLabel, QGroupBox, QGridLayout, QLineEdit, QPushButton from pyperclip import copy from utilities.ManageLng import ManageLng from utilities.PopupWindow import PopupWindow from utilities.Validator import is_empty, is_correct_prefix, is_correct_mask, is_correct_ip_with_prefix class IpInformation(QWidget): def __init__(self): super(IpInformation, self).__init__() # Use language settings self.ml = ManageLng() main_layout = QGridLayout() self.setLayout(main_layout) # Prefix-mask-conversion group box self.prefix_mask_box = QGroupBox(self.ml.get_tr_text("tab_ip_information_prefix_mask_conv")) self.prefix_mask_box.setMaximumHeight(90) main_layout.addWidget(self.prefix_mask_box, 0, 0) prefix_mask_box_layout = QGridLayout() prefix_mask_box_layout.setContentsMargins(200, 20, 200, 20) prefix_mask_box_layout.setHorizontalSpacing(30) self.prefix_mask_box.setLayout(prefix_mask_box_layout) self.prefix_input = QLineEdit() self.prefix_input.setMaxLength(3) self.prefix_input.setAlignment(Qt.AlignCenter) self.prefix_input.setPlaceholderText(self.ml.get_tr_text("tab_ip_information_prefix_ptext")) prefix_mask_box_layout.addWidget(self.prefix_input, 0, 0) self.mask_input = QLineEdit() self.mask_input.setMaxLength(15) self.mask_input.setAlignment(Qt.AlignCenter) self.mask_input.setPlaceholderText(self.ml.get_tr_text("tab_ip_information_mask_ptext")) prefix_mask_box_layout.addWidget(self.mask_input, 0, 1) self.convert_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_conv_btn")) self.convert_btn.setIcon(QIcon("static/images/exchange.png")) self.convert_btn.clicked.connect(self.convert_action) self.prefix_input.returnPressed.connect(self.convert_action) self.mask_input.returnPressed.connect(self.convert_action) prefix_mask_box_layout.addWidget(self.convert_btn, 0, 2, alignment=Qt.AlignLeft) # IP information group box self.ip_information_box = QGroupBox(self.ml.get_tr_text("tab_ip_information_ipinfo_gbox")) main_layout.addWidget(self.ip_information_box, 1, 0) ip_information_box_layout = QGridLayout() ip_information_box_layout.setContentsMargins(80, 80, 80, 80) ip_information_box_layout.setHorizontalSpacing(30) ip_information_box_layout.setVerticalSpacing(15) self.ip_information_box.setLayout(ip_information_box_layout) self.ip_address_label = QLabel(self.ml.get_tr_text("tab_ip_information_ipadd_lab")) ip_information_box_layout.addWidget(self.ip_address_label, 0, 0, alignment=Qt.AlignCenter) self.ip_address_textfield = QLineEdit() self.ip_address_textfield.setPlaceholderText("192.168.1.100/24") self.ip_address_textfield.setAlignment(Qt.AlignCenter) self.ip_address_textfield.setMaxLength(18) ip_information_box_layout.addWidget(self.ip_address_textfield, 0, 1) self.get_info_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_getinfo_btn")) self.get_info_btn.setIcon(QIcon("static/images/get_info.png")) self.get_info_btn.clicked.connect(self.get_info_action) self.ip_address_textfield.returnPressed.connect(self.get_info_action) ip_information_box_layout.addWidget(self.get_info_btn, 0, 2) self.ip_class_label = QLabel(self.ml.get_tr_text("tab_ip_information_ipclass_lab")) ip_information_box_layout.addWidget(self.ip_class_label, 1, 0, alignment=Qt.AlignCenter) self.ip_class_textfield = QLineEdit() self.ip_class_textfield.setReadOnly(True) self.ip_class_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.ip_class_textfield, 1, 1) self.ip_class_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.ip_class_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.ip_class_copy_btn.clicked.connect(lambda: copy_action(self.ip_class_textfield.text())) ip_information_box_layout.addWidget(self.ip_class_copy_btn, 1, 2) self.ip_type_label = QLabel(self.ml.get_tr_text("tab_ip_information_iptype_lab")) ip_information_box_layout.addWidget(self.ip_type_label, 2, 0, alignment=Qt.AlignCenter) self.ip_type_textfield = QLineEdit() self.ip_type_textfield.setReadOnly(True) self.ip_type_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.ip_type_textfield, 2, 1) self.ip_type_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.ip_type_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.ip_type_copy_btn.clicked.connect(lambda: copy_action(self.ip_type_textfield.text())) ip_information_box_layout.addWidget(self.ip_type_copy_btn, 2, 2) self.network_address_label = QLabel(self.ml.get_tr_text("tab_ip_information_netadd_lab")) ip_information_box_layout.addWidget(self.network_address_label, 3, 0, alignment=Qt.AlignCenter) self.network_address_textfield = QLineEdit() self.network_address_textfield.setReadOnly(True) self.network_address_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.network_address_textfield, 3, 1) self.network_address_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.network_address_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.network_address_copy_btn.clicked.connect(lambda: copy_action(self.network_address_textfield.text())) ip_information_box_layout.addWidget(self.network_address_copy_btn, 3, 2) self.subnet_mask_label = QLabel(self.ml.get_tr_text("tab_ip_information_mask_lab")) ip_information_box_layout.addWidget(self.subnet_mask_label, 4, 0, alignment=Qt.AlignCenter) self.subnet_mask_textfield = QLineEdit() self.subnet_mask_textfield.setReadOnly(True) self.subnet_mask_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.subnet_mask_textfield, 4, 1) self.subnet_mask_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.subnet_mask_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.subnet_mask_copy_btn.clicked.connect(lambda: copy_action(self.subnet_mask_textfield.text())) ip_information_box_layout.addWidget(self.subnet_mask_copy_btn, 4, 2) self.first_addressable_ip_label = QLabel(self.ml.get_tr_text("tab_ip_information_firstip_lab")) ip_information_box_layout.addWidget(self.first_addressable_ip_label, 5, 0, alignment=Qt.AlignCenter) self.first_addressable_ip_textfield = QLineEdit() self.first_addressable_ip_textfield.setReadOnly(True) self.first_addressable_ip_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.first_addressable_ip_textfield, 5, 1) self.first_addressable_ip_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.first_addressable_ip_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.first_addressable_ip_copy_btn.clicked.connect(lambda: copy_action(self.first_addressable_ip_textfield.text())) ip_information_box_layout.addWidget(self.first_addressable_ip_copy_btn, 5, 2) self.last_addressable_ip_label = QLabel(self.ml.get_tr_text("tab_ip_information_lastip_lab")) ip_information_box_layout.addWidget(self.last_addressable_ip_label, 6, 0, alignment=Qt.AlignCenter) self.last_addressable_ip_textfield = QLineEdit() self.last_addressable_ip_textfield.setReadOnly(True) self.last_addressable_ip_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.last_addressable_ip_textfield, 6, 1) self.last_addressable_ip_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.last_addressable_ip_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.last_addressable_ip_copy_btn.clicked.connect(lambda: copy_action(self.last_addressable_ip_textfield.text())) ip_information_box_layout.addWidget(self.last_addressable_ip_copy_btn, 6, 2) self.broadcast_address_label = QLabel(self.ml.get_tr_text("tab_ip_information_bcastip_lab")) ip_information_box_layout.addWidget(self.broadcast_address_label, 7, 0, alignment=Qt.AlignCenter) self.broadcast_address_textfield = QLineEdit() self.broadcast_address_textfield.setReadOnly(True) self.broadcast_address_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.broadcast_address_textfield, 7, 1) self.broadcast_address_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.broadcast_address_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.broadcast_address_copy_btn.clicked.connect(lambda: copy_action(self.broadcast_address_textfield.text())) ip_information_box_layout.addWidget(self.broadcast_address_copy_btn, 7, 2) self.next_network_address_label = QLabel(self.ml.get_tr_text("tab_ip_information_nextnetip_lab")) ip_information_box_layout.addWidget(self.next_network_address_label, 8, 0, alignment=Qt.AlignCenter) self.next_network_address_textfield = QLineEdit() self.next_network_address_textfield.setReadOnly(True) self.next_network_address_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.next_network_address_textfield, 8, 1) self.next_network_address_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.next_network_address_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.next_network_address_copy_btn.clicked.connect(lambda: copy_action(self.next_network_address_textfield.text())) ip_information_box_layout.addWidget(self.next_network_address_copy_btn, 8, 2) self.max_endpoint_label = QLabel(self.ml.get_tr_text("tab_ip_information_maxend_lab")) ip_information_box_layout.addWidget(self.max_endpoint_label, 9, 0, alignment=Qt.AlignCenter) self.max_endpoint_textfield = QLineEdit() self.max_endpoint_textfield.setReadOnly(True) self.max_endpoint_textfield.setAlignment(Qt.AlignCenter) ip_information_box_layout.addWidget(self.max_endpoint_textfield, 9, 1) self.max_endpoint_copy_btn = QPushButton(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.max_endpoint_copy_btn.setIcon(QIcon("static/images/copy_clipboard.png")) self.max_endpoint_copy_btn.clicked.connect(lambda: copy_action(self.max_endpoint_textfield.text())) ip_information_box_layout.addWidget(self.max_endpoint_copy_btn, 9, 2) def convert_action(self): if is_empty(self.prefix_input.text()) and \ is_empty(self.mask_input.text()): PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning01"), self.prefix_input) elif not is_empty(self.prefix_input.text()) and \ not is_empty(self.mask_input.text()): if is_correct_prefix(self.prefix_input.text()): prefix_corrected = self.prefix_input.text().replace("/", "").replace("\\", "") self.mask_input.setText(get_mask_from_prefix(prefix_corrected)) else: PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning02"), self.prefix_input) else: if self.prefix_input.text(): if is_correct_prefix(self.prefix_input.text()): prefix_corrected = self.prefix_input.text().replace("/", "").replace("\\", "") self.mask_input.setText(get_mask_from_prefix(prefix_corrected)) else: PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning02"), self.prefix_input) else: if is_correct_mask(self.mask_input.text()): self.prefix_input.setText(f"/{get_prefix_from_mask(self.mask_input.text())}") else: PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning03"), self.mask_input) def get_info_action(self): if is_empty(self.ip_address_textfield.text()): PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning04"), self.ip_address_textfield) elif is_correct_ip_with_prefix(self.ip_address_textfield.text()): ip = self.ip_address_textfield.text().split("/")[0] ip_first_octet = int(str(ip).split(".")[0]) prefix = self.ip_address_textfield.text().split("/")[1] if ip_first_octet == 127: PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning05"), self.ip_address_textfield) elif 224 <= ip_first_octet <= 239: PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning06"), self.ip_address_textfield) elif 240 <= ip_first_octet <= 254: PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning07"), self.ip_address_textfield) elif ip_first_octet == 255: PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning08"), self.ip_address_textfield) else: self.ip_class_textfield.setText(self.ml.get_tr_text(get_ip_class(ip))) self.ip_type_textfield.setText(self.ml.get_tr_text(get_ip_type(ip, prefix))) self.network_address_textfield.setText(get_network_address(ip, prefix)) self.subnet_mask_textfield.setText(get_mask_from_prefix(prefix)) self.first_addressable_ip_textfield.setText(get_first_addressable_ip(ip, prefix)) self.last_addressable_ip_textfield.setText(get_last_addressable_ip(ip, prefix)) self.broadcast_address_textfield.setText(get_broadcast_ip(ip, prefix)) self.next_network_address_textfield.setText(get_next_network_ip(ip, prefix)) self.max_endpoint_textfield.setText(get_max_endpoint_formatted(prefix)) else: PopupWindow("warning", self.ml.get_tr_text("tab_ip_information_warning09"), self.ip_address_textfield) def re_translate_ui(self, lang): self.ml = ManageLng(lang) if self.ip_address_textfield.text(): ip = self.ip_address_textfield.text().split("/")[0] prefix = self.ip_address_textfield.text().split("/")[1] else: ip = None prefix = None self.prefix_mask_box.setTitle(self.ml.get_tr_text("tab_ip_information_prefix_mask_conv")) self.prefix_input.setPlaceholderText(self.ml.get_tr_text("tab_ip_information_prefix_ptext")) self.mask_input.setPlaceholderText(self.ml.get_tr_text("tab_ip_information_mask_ptext")) self.convert_btn.setText(self.ml.get_tr_text("tab_ip_information_conv_btn")) self.ip_information_box.setTitle(self.ml.get_tr_text("tab_ip_information_ipinfo_gbox")) self.ip_address_label.setText(self.ml.get_tr_text("tab_ip_information_ipadd_lab")) self.get_info_btn.setText(self.ml.get_tr_text("tab_ip_information_getinfo_btn")) self.ip_class_label.setText(self.ml.get_tr_text("tab_ip_information_ipclass_lab")) self.ip_class_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.ip_type_label.setText(self.ml.get_tr_text("tab_ip_information_iptype_lab")) self.ip_type_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.network_address_label.setText(self.ml.get_tr_text("tab_ip_information_netadd_lab")) self.network_address_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.subnet_mask_label.setText(self.ml.get_tr_text("tab_ip_information_mask_lab")) self.subnet_mask_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.first_addressable_ip_label.setText(self.ml.get_tr_text("tab_ip_information_firstip_lab")) self.first_addressable_ip_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.last_addressable_ip_label.setText(self.ml.get_tr_text("tab_ip_information_lastip_lab")) self.last_addressable_ip_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.broadcast_address_label.setText(self.ml.get_tr_text("tab_ip_information_bcastip_lab")) self.broadcast_address_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.next_network_address_label.setText(self.ml.get_tr_text("tab_ip_information_nextnetip_lab")) self.next_network_address_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) self.max_endpoint_label.setText(self.ml.get_tr_text("tab_ip_information_maxend_lab")) self.max_endpoint_copy_btn.setText(self.ml.get_tr_text("tab_ip_information_copy_btn")) if self.ip_class_textfield.text() and self.ip_type_textfield.text(): self.ip_class_textfield.setText(self.ml.get_tr_text(get_ip_class(ip))) self.ip_type_textfield.setText(self.ml.get_tr_text(get_ip_type(ip, prefix))) def copy_action(input_field_text): if input_field_text: copy(input_field_text) def get_32bit_format(ip_address): format_32bit = "" for octet in ip_address.split("."): format_32bit += f'{bin(int(octet)).replace("0b", "").rjust(8, "0")}' return format_32bit def get_ip_from_32bit_format(format_32bit): ip_dec = "" for octet in wrap(format_32bit, 8): ip_dec += f"{int(octet, 2)}." return ip_dec[:-1] def get_mask_from_prefix(prefix): mask_decimal = "" for octet in wrap((int(prefix) * "1").ljust(32, "0"), 8): mask_decimal += f"{int(octet, 2)}." return mask_decimal[:-1] def get_prefix_from_mask(mask): mask_bin_32bit = "" for octet in mask.split("."): mask_bin_32bit += bin(int(octet)).replace("0b", "").rjust(8, "0") return mask_bin_32bit.count("1") def get_ip_class(ip_address): first_octet = int(str(ip_address).split(".")[0]) second_octet = int(str(ip_address).split(".")[1]) if 0 <= first_octet <= 127: if first_octet == 10: return "tab_ip_information_ipclass_a_class_pri" else: return "tab_ip_information_ipclass_a_class_pub" elif 128 <= first_octet <= 191: if (first_octet == 172 and 16 <= second_octet <= 31) or (first_octet == 169 and second_octet == 254): return "tab_ip_information_ipclass_b_class_pri" else: return "tab_ip_information_ipclass_b_class_pub" elif 192 <= first_octet <= 223: if first_octet == 192 and second_octet == 168: return "tab_ip_information_ipclass_c_class_pri" else: return "tab_ip_information_ipclass_c_class_pub" def get_ip_type(ip_address, prefix): if ip_address == get_network_address(ip_address, prefix): return "tab_ip_information_iptype_netadd" elif ip_address == get_broadcast_ip(ip_address, prefix): return "tab_ip_information_iptype_broadd" return "tab_ip_information_iptype_hostadd" def get_network_address(ip_address, prefix): mask_32bit = get_32bit_format(get_mask_from_prefix(prefix)) ip_address_32bit = get_32bit_format(ip_address) network_address_32bit = f"{ip_address_32bit[:-mask_32bit.count('0')]}{'0' * mask_32bit.count('0')}" return get_ip_from_32bit_format(network_address_32bit) def get_first_addressable_ip(ip_address, prefix): network_address_32bit = get_32bit_format(get_network_address(ip_address, prefix)) network_address_increased = bin(int(network_address_32bit, 2) + int("1", 2)).replace("0b", "").rjust(32, "0") return get_ip_from_32bit_format(network_address_increased) def get_last_addressable_ip(ip_address, prefix): network_address_32bit = get_32bit_format(get_network_address(ip_address, prefix)) max_endpoint_bin = int(bin(int(get_max_endpoint(prefix))).replace("0b", ""), 2) last_addressable_ip_32bit = bin(int(network_address_32bit, 2) + max_endpoint_bin).replace("0b", "").rjust(32, "0") return get_ip_from_32bit_format(last_addressable_ip_32bit) def get_broadcast_ip(ip_address, prefix): last_addressable_ip_32bit = get_32bit_format(get_last_addressable_ip(ip_address, prefix)) broadcast_ip_32bit = bin(int(last_addressable_ip_32bit, 2) + int("1", 2)).replace("0b", "").rjust(32, "0") return get_ip_from_32bit_format(broadcast_ip_32bit) def get_next_network_ip(ip_address, prefix): broadcast_ip_32bit = get_32bit_format(get_broadcast_ip(ip_address, prefix)) if broadcast_ip_32bit == "11011111111111111111111111111111" or \ broadcast_ip_32bit == "01111110111111111111111111111111": return "-" else: next_network_ip_32bit = bin(int(broadcast_ip_32bit, 2) + int("1", 2)).replace("0b", "").rjust(32, "0") return get_ip_from_32bit_format(next_network_ip_32bit) def get_max_endpoint(prefix): hostbits = 32 - int(prefix) hosts_bin = ("1" * hostbits).rjust(32, "0") return str(int(hosts_bin, 2) - 1) def get_max_endpoint_formatted(prefix): max_endpoint = str(get_max_endpoint(prefix)).format("{0:n}", get_max_endpoint(prefix)) setlocale(LC_ALL, "") return format_string("%d", int(max_endpoint), grouping=True)
# -*- coding: utf-8 -*- """ Created on Tue Apr 28 17:05:11 2020 @author: FAHMI-PC """ #import library import os import sys import cv2 import csv import smtplib import mimetypes import datetime import numpy as np import pandas as pd import time from do_something import * from email import encoders from PIL import Image,ImageTk from email.mime.text import MIMEText from email.mime.base import MIMEBase from email.message import EmailMessage from tkinter.filedialog import askopenfilename from email.mime.multipart import MIMEMultipart from PIL import Image, ImageTk try: import Tkinter as tk except ImportError: import tkinter as tk try: import ttk py3 = False except ImportError: import tkinter.ttk as ttk py3 = True def vp_start_gui(): '''Starting point when module is the main routine.''' global val, w, root root = tk.Tk() top = mainScreen (root) root.mainloop() w = None def create_mainScreen(root, *args, **kwargs): '''Starting point when module is imported by another program.''' global w, w_win, rt rt = root w = tk.Toplevel (root) top = mainScreen (w) AMS_support.init(w, top, *args, **kwargs) return (w, top) def destroy_mainScreen(): global w w.destroy() w = None class mainScreen: def __init__(self, top=None): '''This class configures and populates the toplevel window. top is the toplevel containing window.''' _bgcolor = '#d9d9d9' # X11 color: 'gray85' _fgcolor = '#000000' # X11 color: 'black' _compcolor = '#d9d9d9' # X11 color: 'gray85' _ana1color = '#d9d9d9' # X11 color: 'gray85' _ana2color = '#ececec' # Closest X11 color: 'gray92' font9 = "-family {SF Pro Display} -size 14 -weight bold -slant" \ " roman -underline 0 -overstrike 0" font10 = "-family {SF Pro Display} -size 14 -weight bold " \ "-slant roman -underline 0 -overstrike 0" font11 = "-family {SF Pro Display} -size 14 -weight bold " \ "-slant roman -underline 0 -overstrike 0" font12 = "-family {SF Pro Display} -size 12 -weight bold " \ "-slant roman -underline 0 -overstrike 0" def deleteID(): self.studentID.delete(first=0, last=30) def deleteName(): self.studentName.delete(first=0, last=30) def testVal(inStr, acttyp): if acttyp == '1': if not inStr.isdigit(): return False return True def takeImage(): entryOne = self.studentID.get() entryTwo = self.studentName.get() if entryOne == "": self.Notification.configure(background="#800000") self.Notification.configure(foreground="#FFFFFF") self.Notification.configure(text="Please enter ID!") elif entryTwo == "": self.Notification.configure(background="#800000") self.Notification.configure(foreground="#FFFFFF") self.Notification.configure(text="Please enter Name!") else: try: cam = cv2.VideoCapture(0) detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') ID = self.studentID.get() Name = self.studentName.get() sampleNum = 0 while (True): ret, img = cam.read() gray = cv2.cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = detector.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255,255,255), 5) sampleNum += 1 cv2.imwrite("TrainingImage/ " + Name + "." + ID + "." + str(sampleNum) + ".png", gray[y:y + h, x:x + w]) cv2.imshow("Taking images for student " + self.studentName.get(), img) if 0xFF == ord('Q') & cv2.waitKey(1): break elif sampleNum >= 100: break cam.release() cv2.destroyAllWindows() ts = time.time() ######################Check for errors below###################### Date = datetime.datetime.fromtimestamp(ts).strftime("%d/%m/%Y") Time = datetime.datetime.fromtimestamp(ts).strftime("%H:%M:%S") row = [ID, Name, Date, Time] with open("StudentDetails.csv", "a+") as csvFile: writer = csv.writer(csvFile, delimiter=',') writer.writerow(row) csvFile.close() res = "Images Saved for ID : " + ID + " Name : " + Name self.Notification.configure(text=res, bg="#008000", width=64, font=('SF Pro Display', 16, 'bold')) self.Notification.place(x=92, y=430) except FileExistsError as F: f = 'Student Data already exists' self.Notification.configure(text=f, bg="Red", width=64) self.Notification.place(x=92, y=430) def trainImage(): recognizer = cv2.face.LBPHFaceRecognizer_create() global detector detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") global faces,Id faces, Id = getImagesAndLabels("TrainingImage") recognizer.train(faces, np.array(Id)) recognizer.write('TrainingImageLabel/Trainer.yml') res = "Student has been trained by the software." self.Notification.configure(text=res, bg="#008000", width=64, font=('SF Pro Display', 16, 'bold')) self.Notification.place(x=92, y=430) def getImagesAndLabels(path): imagePaths = [os.path.join(path, f) for f in os.listdir(path)] faceSamples = [] IDS = [] for imagePath in imagePaths: pilImage = Image.open(imagePath).convert('L') imageNp = np.array(pilImage, 'uint8') Id = int(os.path.split(imagePath)[-1].split(".")[1]) faces = detector.detectMultiScale(imageNp) for (x, y, w, h) in faces: faceSamples.append(imageNp[y:y + h, x:x + w]) IDS.append(Id) return faceSamples, IDS def autoAttendance(): def fillAttendance(): SubjectEntry = self.subjectEntry.get() now = time.time() future = now + 25 if time.time() < future: if SubjectEntry == "": self.welcomeMessageAuto.configure(background="#800000") self.welcomeMessageAuto.configure(foreground="#FFFFFF") self.welcomeMessageAuto.configure(text="Please enter subject!") else: recognizer = cv2.face.LBPHFaceRecognizer_create() try: recognizer.read("TrainingImageLabel\Trainer.yml") except: self.welcomeMessageAuto.configure(text='Please make a folder names "TrainingImage"') harcascadePath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(harcascadePath) df = pd.read_csv("StudentDetails.csv") cam = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX colNames = ['ID','Date','Time'] attendance = pd.DataFrame(columns = colNames) while True: ret, im = cam.read() gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.2, 5) for (x, y, w, h) in faces: global Id Id, conf = recognizer.predict(gray[y:y + h, x:x + w]) if conf <= 100: print (conf) global Subject global aa global date global timeStamp Subject = self.subjectEntry.get() ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') aa = df.loc[df['ID'] == Id].values tt = "ID: " + str(Id) attendance.loc[len(attendance)] = [Id, date, timeStamp] cv2.rectangle(im, (x,y), (x + w, y + h), (250, 250, 250), 7) cv2.putText(im, (tt), (x + h, y), font, 1, (255, 255, 0,), 4) else: ID = "Unknown" cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7) cv2.putText(im, str(ID), (x + h, y), font, 1, (0, 25, 255), 4) if time.time() > future: break attendance = attendance.drop_duplicates(['ID'], keep = 'first') cv2.imshow("Filling attedance ...", im) key = cv2.waitKey(30) &0xFF if key == 27: break ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime("%d_%m_%Y") timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') Hour, Minute, Second = timeStamp.split(":") fileName = "Attendance/" + self.subjectEntry.get() + "_" + date + "_Time_" + Hour + "_" + Second + ".csv" attendance = attendance.drop_duplicates(['ID'], keep = "first") print (attendance) ######################Check for errors below###################### attendance.to_csv(fileName, index = False) dateForDB = datetime.datetime.fromtimestamp(ts).strftime('%Y_%m_%d') dbTableName = str(Subject + "_" + dateForDB + "_Time_" + Hour + "_" + Minute + "_" + Second) import mysql.connector try: connection = mysql.connector.connect(host='localhost', user='root', password='root', database='ams') cursor = connection.cursor() except Exception as e: print (e) sql = "CREATE TABLE " + dbTableName + """(SrNo INT NOT NULL AUTO_INCREMENT, ID varchar(100) NOT NULL, Name VARCHAR(50) NOT NULL, Date VARCHAR(20) NOT NULL, Time VARCHAR(20) NOT NULL, PRIMARY KEY (SrNo)); """ insertData = "INSERT INTO " + dbTableName + " (SrNo, ID, Name, Date, Time) VALUES (0, "+str(Id)+", "+str(aa)+", "+str(date)+", "+str(timeStamp)+");" try: cursor.execute(sql) cursor.execute(insertData) except Exception as ex: print(ex) self.welcomeMessageAuto.configure(text="Attendance filled Successfully") cam.release() cv2.destroyAllWindows() root = tk.Tk() root.title("Attendance of " + Subject) root.configure(background="#1B1B1B") root.configure(highlightbackground="#d9d9d9") root.configure(highlightcolor="black") root.iconbitmap("mainIcon.ico") root.focus_force() cs = 'D:/FOLDER KHUSUS NGAMPUS/SEMESTER 6/SISTEM TERSEBAR/UTS/UTS/FIX/Multiprocessing/Smart Absensi/Smart Absensi' + fileName with open(cs, newline="") as file: reader = csv.reader(file) r = 0 for col in reader: c = 0 for row in col: label = tk.Label(root, width=8, height=1, fg="black", font=('SF Pro Display', 15, ' bold '), bg="#008000", text=row) label.grid(row=r, column=c) c += 1 r += 1 root.mainloop() subjectScreen = tk.Tk() subjectScreen.iconbitmap("icon.ico") subjectScreen.title("Enter Subject for Automatic Attendance") subjectScreen.geometry("585x325+316+165") subjectScreen.resizable(0,0) subjectScreen.configure(background="#1B1B1B") subjectScreen.focus_force() self.welcomeMessageAuto = tk.Message(subjectScreen) self.welcomeMessageAuto.place(relx=0.12, rely=0.591, relheight=0.102, relwidth=0.742) self.welcomeMessageAuto.configure(background="#008000") self.welcomeMessageAuto.configure(font=font9) self.welcomeMessageAuto.configure(foreground="#FFFFFF") self.welcomeMessageAuto.configure(highlightbackground="#d9d9d9") self.welcomeMessageAuto.configure(highlightcolor="black") self.welcomeMessageAuto.configure(text='''Welcome, +Username''') self.welcomeMessageAuto.configure(width=434) self.enterSubject = tk.Label(subjectScreen) self.enterSubject.place(relx=0.12, rely=0.431, height=29, width=132) self.enterSubject.configure(activebackground="#f9f9f9") self.enterSubject.configure(activeforeground="black") self.enterSubject.configure(background="#1B1B1B") self.enterSubject.configure(disabledforeground="#a3a3a3") self.enterSubject.configure(font="-family {SF Pro Display} -size 14 -weight bold") self.enterSubject.configure(foreground="#FFFFFF") self.enterSubject.configure(highlightbackground="#d9d9d9") self.enterSubject.configure(highlightcolor="black") self.enterSubject.configure(text='''Enter Subject:''') self.subjectEntry = tk.Entry(subjectScreen) self.subjectEntry.place(relx=0.41, rely=0.431, height=27, relwidth=0.451) self.subjectEntry.configure(background="#D9D9D9") self.subjectEntry.configure(disabledforeground="#a3a3a3") self.subjectEntry.configure(font="-family {SF Pro Display} -size 14 -weight bold") self.subjectEntry.configure(foreground="#000000") self.subjectEntry.configure(highlightbackground="#d9d9d9") self.subjectEntry.configure(highlightcolor="black") self.subjectEntry.configure(insertbackground="black") self.subjectEntry.configure(selectbackground="#c4c4c4") self.subjectEntry.configure(selectforeground="black") self.fillAttendanceBtnAuto = tk.Button(subjectScreen) self.fillAttendanceBtnAuto.place(relx=0.12, rely=0.769, height=38, width=154) self.fillAttendanceBtnAuto.configure(activebackground="#ececec") self.fillAttendanceBtnAuto.configure(activeforeground="#000000") self.fillAttendanceBtnAuto.configure(background="#2E2E2E") self.fillAttendanceBtnAuto.configure(disabledforeground="#a3a3a3") self.fillAttendanceBtnAuto.configure(font="-family {SF Pro Display} -size 14 -weight bold") self.fillAttendanceBtnAuto.configure(foreground="#FFFFFF") self.fillAttendanceBtnAuto.configure(highlightbackground="#d9d9d9") self.fillAttendanceBtnAuto.configure(highlightcolor="black") self.fillAttendanceBtnAuto.configure(pady="0") self.fillAttendanceBtnAuto.configure(text='''Fill Attendance''') self.fillAttendanceBtnAuto.configure(command=fillAttendance) subjectScreen.mainloop() self.enterSubject = tk.Label(subName) self.enterSubject.place(relx=0.12, rely=0.431, height=29, width=132) self.enterSubject.configure(background="#1B1B1B") self.enterSubject.configure(disabledforeground="#a3a3a3") self.enterSubject.configure(font=font9) self.enterSubject.configure(foreground="#FFFFFF") self.enterSubject.configure(text='''Enter Subject:''') self.subjectEntry = tk.Entry(subName) self.subjectEntry.place(relx=0.41, rely=0.431, height=27, relwidth=0.451) self.subjectEntry.configure(background="#D9D9D9") self.subjectEntry.configure(disabledforeground="#a3a3a3") self.subjectEntry.configure(font=font9) self.subjectEntry.configure(foreground="#000000") self.subjectEntry.configure(insertbackground="black") self.fillAttendanceBtn = tk.Button(subName) self.fillAttendanceBtn.place(relx=0.598, rely=0.769, height=38, width=154) self.fillAttendanceBtn.configure(activebackground="#ececec") self.fillAttendanceBtn.configure(activeforeground="#000000") self.fillAttendanceBtn.configure(background="#2E2E2E") self.fillAttendanceBtn.configure(disabledforeground="#a3a3a3") self.fillAttendanceBtn.configure(font=font9) self.fillAttendanceBtn.configure(foreground="#FFFFFF") self.fillAttendanceBtn.configure(highlightbackground="#d9d9d9") self.fillAttendanceBtn.configure(highlightcolor="black") self.fillAttendanceBtn.configure(pady="0") self.fillAttendanceBtn.configure(text='''Fill Attendance''') self.fillAttendanceBtn.configure(command=fillAttendanceManual) self.chooseSubject = tk.Message(subName) self.chooseSubject.place(relx=0.0, rely=0.062, relheight=0.217, relwidth=1.009) self.chooseSubject.configure(background="#2E2E2E") self.chooseSubject.configure(font="-family {SF Pro Display} -size 36 -weight bold") self.chooseSubject.configure(foreground="#FFFFFF") self.chooseSubject.configure(highlightbackground="#d9d9d9") self.chooseSubject.configure(highlightcolor="black") self.chooseSubject.configure(text='''Choose Subject''') self.chooseSubject.configure(width=585) self.welcomeMessageSubject = tk.Message(subName) self.welcomeMessageSubject.place(relx=0.12, rely=0.591, relheight=0.102, relwidth=0.742) self.welcomeMessageSubject.configure(background="#008000") self.welcomeMessageSubject.configure(font="-family {SF Pro Display} -size 14 -weight bold") self.welcomeMessageSubject.configure(foreground="#FFFFFF") self.welcomeMessageSubject.configure(highlightbackground="#d9d9d9") self.welcomeMessageSubject.configure(highlightcolor="black") self.welcomeMessageSubject.configure(text='''Welcome, +Username''') self.welcomeMessageSubject.configure(width=434) subName.mainloop() top.geometry("1367x696+-9+0") top.minsize(120, 1) top.maxsize(1370, 749) top.resizable(0, 0) top.iconbitmap("icon.ico") top.focus_force() top.title("SMART ABSENSI - FACE 2 UNLOCK") top.configure(background="#1B1B1B") top.configure(highlightbackground="#d9d9d9") top.configure(highlightcolor="black") self.Title = tk.Message(top) self.Title.place(relx=-0.007, rely=0.042, relheight=0.134, relwidth=1.005) self.Title.configure(background="#2E2E2E") self.Title.configure(font="-family {SF Pro Display} -size 36 -weight bold") self.Title.configure(foreground="#FFFFFF") self.Title.configure(highlightbackground="#D9D9D9") self.Title.configure(highlightcolor="black") self.Title.configure(text='''FACE 2 UNLOCK - SMART ABSENSI''') self.Title.configure(width=1374) self.studentID = tk.Entry(top) self.studentID.place(relx=0.338, rely=0.345,height=33, relwidth=0.237) self.studentID.configure(background="#D9D9D9") self.studentID.configure(disabledforeground="#a3a3a3") self.studentID.configure(font="-family {SF Pro Display} -size 18 -weight bold") self.studentID.configure(foreground="#000000") self.studentID.configure(highlightbackground="#d9d9d9") self.studentID.configure(highlightcolor="black") self.studentID.configure(insertbackground="black") self.studentID.configure(relief="flat") self.studentID.configure(selectbackground="#007878d7d777") self.studentID.configure(selectforeground="black") self.studentID.configure(validate='key') self.studentID['validatecommand'] = (self.studentID.register(testVal), '%P', '%d') self.labelStudentID = tk.Label(top) self.labelStudentID.place(relx=0.067, rely=0.348, height=31, width=204) self.labelStudentID.configure(activebackground="#f9f9f9") self.labelStudentID.configure(activeforeground="black") self.labelStudentID.configure(background="#1B1B1B") self.labelStudentID.configure(disabledforeground="#a3a3a3") self.labelStudentID.configure(font="-family {SF Pro Display} -size 18 -weight bold") self.labelStudentID.configure(foreground="#FFFFFF") self.labelStudentID.configure(highlightbackground="#d9d9d9") self.labelStudentID.configure(highlightcolor="black") self.labelStudentID.configure(text='''Enter Student ID:''') self.labelStudentName = tk.Label(top) self.labelStudentName.place(relx=0.067, rely=0.454, height=35, width=245) self.labelStudentName.configure(activebackground="#f9f9f9") self.labelStudentName.configure(activeforeground="black") self.labelStudentName.configure(background="#1B1B1B") self.labelStudentName.configure(disabledforeground="#a3a3a3") self.labelStudentName.configure(font="-family {SF Pro Display} -size 18 -weight bold") self.labelStudentName.configure(foreground="#FFFFFF") self.labelStudentName.configure(highlightbackground="#d9d9d9") self.labelStudentName.configure(highlightcolor="black") self.labelStudentName.configure(text='''Enter Student Name:''') self.studentName = tk.Entry(top) self.studentName.place(relx=0.338, rely=0.46,height=33, relwidth=0.237) self.studentName.configure(background="#D9D9D9") self.studentName.configure(disabledforeground="#a3a3a3") self.studentName.configure(font="-family {SF Pro Display} -size 18 -weight bold") self.studentName.configure(foreground="#000000") self.studentName.configure(highlightbackground="#d9d9d9") self.studentName.configure(highlightcolor="black") self.studentName.configure(insertbackground="black") self.studentName.configure(selectbackground="#c4c4c4") self.studentName.configure(selectforeground="black") self.clearID = tk.Button(top) self.clearID.place(relx=0.636, rely=0.345, height=38, width=66) self.clearID.configure(activebackground="#ececec") self.clearID.configure(activeforeground="#000000") self.clearID.configure(background="#2E2E2E") self.clearID.configure(disabledforeground="#a3a3a3") self.clearID.configure(font="-family {SF Pro Display} -size 14 -weight bold") self.clearID.configure(foreground="#FFFFFF") self.clearID.configure(highlightbackground="#d9d9d9") self.clearID.configure(highlightcolor="black") self.clearID.configure(pady="0") self.clearID.configure(text='''Clear''') self.clearID.configure(command=deleteID) self.clearName = tk.Button(top) self.clearName.place(relx=0.636, rely=0.46, height=38, width=66) self.clearName.configure(activebackground="#ececec") self.clearName.configure(activeforeground="#000000") self.clearName.configure(background="#2E2E2E") self.clearName.configure(disabledforeground="#a3a3a3") self.clearName.configure(font="-family {SF Pro Display} -size 14 -weight bold") self.clearName.configure(foreground="#FFFFFF") self.clearName.configure(highlightbackground="#d9d9d9") self.clearName.configure(highlightcolor="black") self.clearName.configure(pady="0") self.clearName.configure(text='''Clear''') self.clearName.configure(command=deleteName) self.Notification = tk.Label(top) self.Notification.configure(text="Welcome, + Username") self.Notification.configure(background="#008000") self.Notification.configure(foreground="#FFFFFF") self.Notification.configure(width=64, height=2) self.Notification.configure(font="-family {SF Pro Display} -size 16 -weight bold") self.Notification.place(x=92, y=430) self.takeImages = tk.Button(top) self.takeImages.place(relx=0.150, rely=0.818, height=38, width=133) self.takeImages.configure(activebackground="#ececec") self.takeImages.configure(activeforeground="#000000") self.takeImages.configure(background="#2E2E2E") self.takeImages.configure(disabledforeground="#a3a3a3") self.takeImages.configure(font=font10) self.takeImages.configure(foreground="#FFFFFF") self.takeImages.configure(highlightbackground="#d9d9d9") self.takeImages.configure(highlightcolor="black") self.takeImages.configure(pady="0") self.takeImages.configure(text='''Take Images''') self.takeImages.configure(command=takeImage) self.trainStudent = tk.Button(top) self.trainStudent.place(relx=0.300, rely=0.818, height=38, width=139) self.trainStudent.configure(activebackground="#ececec") self.trainStudent.configure(activeforeground="#000000") self.trainStudent.configure(background="#2E2E2E") self.trainStudent.configure(disabledforeground="#a3a3a3") self.trainStudent.configure(font=font11) self.trainStudent.configure(foreground="#FFFFFF") self.trainStudent.configure(highlightbackground="#d9d9d9") self.trainStudent.configure(highlightcolor="black") self.trainStudent.configure(pady="0") self.trainStudent.configure(text='''Train Student''') self.trainStudent.configure(command=trainImage) self.automaticAttendance = tk.Button(top) self.automaticAttendance.place(relx=0.450, rely=0.818, height=38, width=220) self.automaticAttendance.configure(activebackground="#ececec") self.automaticAttendance.configure(activeforeground="#000000") self.automaticAttendance.configure(background="#2E2E2E") self.automaticAttendance.configure(disabledforeground="#a3a3a3") self.automaticAttendance.configure(font=font11) self.automaticAttendance.configure(foreground="#FFFFFF") self.automaticAttendance.configure(highlightbackground="#d9d9d9") self.automaticAttendance.configure(highlightcolor="black") self.automaticAttendance.configure(pady="0") self.automaticAttendance.configure(text='''Automatic Attendance''') self.automaticAttendance.configure(command=autoAttendance) self.authorDetails = tk.Message(top) self.authorDetails.place(relx=0.753, rely=0.46, relheight=0.407, relwidth=0.19) self.authorDetails.configure(background="#2E2E2E") self.authorDetails.configure(font=font12) self.authorDetails.configure(foreground="#ffffff") self.authorDetails.configure(highlightbackground="#d9d9d9") self.authorDetails.configure(highlightcolor="black") self.authorDetails.configure(text='''FACE 2 UNLOCK - TUGAS BESAR SISTEM PAKAR 1. Muhammad Fahmi - 1174021 2. M Dzihan Al-Banna - 1174095 3. M Tomy Nur Maulidy - 1174031 4. Choirul Anam - 1174004 5. Damara Benedikta S - 1174012 6. Dezha Aidil Martha - 1174025''') self.authorDetails.configure(width=260) if __name__ == '__main__': vp_start_gui()
import numpy as np from .seg_util import seg_iou2d from ..io import readVol from .region_graph import merge_id from skimage.morphology import remove_small_objects from tqdm import tqdm def predToSeg2d(seg, th_opt = [0, 0.9*255]): # https://www.frontiersin.org/articles/10.3389/fnana.2018.00092/full if th_opt[0] == 0: # cc from skimage.measure import label elif th_opt[0] == 1: # watershed from .seg import imToSeg_2d print('find global id') seg_cc=np.zeros(seg.shape, np.uint32) mid=0 for z in range(seg.shape[0]): if th_opt[0] == 0: # cc th_pred = th_opt[1] tmp = label(seg[z] > th_pred, 4) elif th_opt[0] == 1: # watershed th_hole, th_small,seed_footprint = th_opt[1], th_opt[2], th_opt[3] tmp = imToSeg_2d(seg[z], th_hole, th_small,seed_footprint) elif th_opt[0] == -1: # direct assignment tmp = seg[z] tmp[tmp>0] += mid seg_cc[z] = tmp mid = tmp.max() return seg_cc def seg2dToGlobalId(fn_seg, im_id): out = np.zeros(1+len(im_id), int) for i,zz in enumerate(tqdm(im_id)): out[i+1] = readVol(fn_seg % zz).max() out = np.cumsum(out) return out def iouToMatches(fn_iou, im_id, global_id=None, th_iou=0.1): # assume each 2d seg id is not overlapped mm=[None]*(len(im_id)) for z in tqdm(range(len(im_id))): iou = readVol(fn_iou % im_id[z]) sc = iou[:,4].astype(float)/(iou[:,2]+iou[:,3]-iou[:,4]) gid = sc>th_iou mm[z] = iou[gid,:2].T if global_id is not None: mm[z][0] += global_id[z] mm[z][1] += global_id[z+1] return np.hstack(mm) def seg2dToMatches(seg, th_iou=0.1): # assume each 2d seg id is not overlapped mm=[None]*(seg.shape[0]-1) for z in tqdm(range(1, seg.shape[0])): iou = seg_iou2d(seg[z-1],seg[z]) sc = iou[:,4].astype(float)/(iou[:,2]+iou[:,3]-iou[:,4]) gid = sc>th_iou mm[z-1] = iou[gid,:2].T print(z-1,mm[z-1].shape[1]) return np.hstack(mm) def seg2dMapping(seg, mapping): mapping_len = np.uint64(len(mapping)) mapping_max = mapping.max() ind = seg<mapping_len seg[ind] = mapping[seg[ind]] # if within mapping: relabel seg[np.logical_not(ind)] -= (mapping_len-mapping_max) # if beyond mapping range, shift left return seg def seg2dToGlobal(seg, mapping=None, mid=None, th_sz=-1): if mapping is None: mid = mid.astype(np.uint32) mapping = merge_id(mid[0],mid[1]) seg = seg2dMapping(seg, mapping) if th_sz>0: seg=remove_small_objects(seg, th_sz) return seg
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems import re class NfsNlmLocksLock(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ NfsNlmLocksLock - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'client': 'str', 'client_id': 'str', 'created': 'int', 'id': 'str', 'lin': 'str', 'lock_type': 'str', 'path': 'str', 'range': 'list[int]' } self.attribute_map = { 'client': 'client', 'client_id': 'client_id', 'created': 'created', 'id': 'id', 'lin': 'lin', 'lock_type': 'lock_type', 'path': 'path', 'range': 'range' } self._client = None self._client_id = None self._created = None self._id = None self._lin = None self._lock_type = None self._path = None self._range = None @property def client(self): """ Gets the client of this NfsNlmLocksLock. The client host name. :return: The client of this NfsNlmLocksLock. :rtype: str """ return self._client @client.setter def client(self, client): """ Sets the client of this NfsNlmLocksLock. The client host name. :param client: The client of this NfsNlmLocksLock. :type: str """ self._client = client @property def client_id(self): """ Gets the client_id of this NfsNlmLocksLock. The client ID. :return: The client_id of this NfsNlmLocksLock. :rtype: str """ return self._client_id @client_id.setter def client_id(self, client_id): """ Sets the client_id of this NfsNlmLocksLock. The client ID. :param client_id: The client_id of this NfsNlmLocksLock. :type: str """ self._client_id = client_id @property def created(self): """ Gets the created of this NfsNlmLocksLock. Time of lock creation. :return: The created of this NfsNlmLocksLock. :rtype: int """ return self._created @created.setter def created(self, created): """ Sets the created of this NfsNlmLocksLock. Time of lock creation. :param created: The created of this NfsNlmLocksLock. :type: int """ self._created = created @property def id(self): """ Gets the id of this NfsNlmLocksLock. The lock ID. :return: The id of this NfsNlmLocksLock. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this NfsNlmLocksLock. The lock ID. :param id: The id of this NfsNlmLocksLock. :type: str """ self._id = id @property def lin(self): """ Gets the lin of this NfsNlmLocksLock. The LIN in ifs that is locked. :return: The lin of this NfsNlmLocksLock. :rtype: str """ return self._lin @lin.setter def lin(self, lin): """ Sets the lin of this NfsNlmLocksLock. The LIN in ifs that is locked. :param lin: The lin of this NfsNlmLocksLock. :type: str """ self._lin = lin @property def lock_type(self): """ Gets the lock_type of this NfsNlmLocksLock. The type of lock. :return: The lock_type of this NfsNlmLocksLock. :rtype: str """ return self._lock_type @lock_type.setter def lock_type(self, lock_type): """ Sets the lock_type of this NfsNlmLocksLock. The type of lock. :param lock_type: The lock_type of this NfsNlmLocksLock. :type: str """ self._lock_type = lock_type @property def path(self): """ Gets the path of this NfsNlmLocksLock. The path in ifs that is locked. :return: The path of this NfsNlmLocksLock. :rtype: str """ return self._path @path.setter def path(self, path): """ Sets the path of this NfsNlmLocksLock. The path in ifs that is locked. :param path: The path of this NfsNlmLocksLock. :type: str """ self._path = path @property def range(self): """ Gets the range of this NfsNlmLocksLock. The byte range within the file that is locked. :return: The range of this NfsNlmLocksLock. :rtype: list[int] """ return self._range @range.setter def range(self, range): """ Sets the range of this NfsNlmLocksLock. The byte range within the file that is locked. :param range: The range of this NfsNlmLocksLock. :type: list[int] """ self._range = range def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
import math import matplotlib.pyplot as plt import numpy as np import seaborn as sns import pandas as pd # matches sb index to gate def match_sb(visitors): visitors['sb1'] = visitors['g1'].apply(lambda x: math.floor(x / 2)) visitors['sb2'] = visitors['g2'].apply(lambda x: math.floor(x / 2)) return visitors # calculates total visitors per sb def total_visitors_per_sb(visitors): notnull_visitors = match_sb(visitors)[visitors.visitors > 0].iloc[:, -3:] sum_visitors = notnull_visitors.groupby('sb2')['visitors'].sum() sns.set() plt.bar(sum_visitors.index, sum_visitors) plt.show() return result def heatmap(visitors): visitors = match_sb(visitors) visitors = visitors.iloc[:,-4:] grouped_visitors = visitors.groupby(['sb1', 'sb2']).agg('sum').reset_index() matrix = grouped_visitors.pivot(index='sb1', columns='sb2', values='visitors') plt.figure(figsize=(20,10)) sns.heatmap(matrix, cmap="BuPu", linewidths=.5) plt.show() return matrix # constructs grid def draw_gates(street_width, block_width, num_sb, visitors, centers): gates_p_row = int(np.sqrt(num_sb) * 2) # e.g 6 gates_p_col = int(np.sqrt(num_sb)) # e.g 3 locations = [] for index in range(gates_p_col): street_counter = 1 block_counter = 0 for index2 in range(gates_p_row): if index == 0 and index2 == 0: locations.append(tuple([street_width, block_width / 2 + street_width])) elif index2 % 2 != 0 and index == 0: block_counter += 1 locations.append(tuple( [block_counter * block_width + street_counter * street_width, block_width / 2 + street_width])) street_counter += 1 elif index2 % 2 != 0 and index != 0: block_counter += 1 locations.append((tuple([block_counter * block_width + street_counter * street_width, block_width / 2 + street_width + index * (block_width + street_width)]))) street_counter += 1 elif index2 % 2 == 0 and index == 0: locations.append(tuple( [block_counter * block_width + street_counter * street_width, block_width / 2 + street_width])) else: # index2 % 2 == 0 and index != 0 locations.append(tuple([block_counter * block_width + street_counter * street_width, block_width / 2 + street_width + index * (block_width + street_width)])) sns.set() plt.figure(figsize=(100, 100)) plt.xlim(0, 4000) plt.ylim(0, 4000) plt.plot(20, 0) for index in range(len(locations)): plt.scatter(locations[index][0], locations[index][1]) plt.show() return locations result = draw_gates(20, 1000, 9, 0, 0) print(result)
# Copyright (c) 2020 Adam Souzis # SPDX-License-Identifier: MIT import six from ..configurator import Configurator, Status from ..result import ResultsMap from ..util import registerShortNames # need to define these now because because these configurators are lazily imported # and so won't register themselves through AutoRegisterClass registerShortNames( { name: "unfurl.configurators.%s.%sConfigurator" % (name.lower(), name) for name in "Ansible Shell Supervisor Terraform".split() } ) class TemplateConfigurator(Configurator): def processResultTemplate(self, task, result): """ for both the ansible and shell configurators result can include: "returncode", "msg", "error", "stdout", "stderr" Ansible also includes "outputs" """ # get the resultTemplate without evaluating it resultTemplate = task.inputs._attributes.get("resultTemplate") if resultTemplate: # evaluate it now with the result if isinstance(resultTemplate, six.string_types): query = dict(template=resultTemplate) else: query = resultTemplate # workaround for jinja template processing setting Result when getting items if not isinstance(result, ResultsMap): vars = ResultsMap(result, task.inputs.context) vars.doFullResolve = True else: vars = result results = task.query({"eval": query}, vars=vars) if results: task.updateResources(results) def canDryRun(self, task): return not not task.inputs.get("dryrun") def run(self, task): if task.dryRun: runResult = task.inputs.get("dryrun") if not isinstance(runResult, dict): runResult = task.inputs.get("run") else: runResult = task.inputs.get("run") done = task.inputs.get("done", {}) if "result" not in done: if not isinstance(runResult, dict): done["result"] = {"run": runResult} else: done["result"] = runResult self.processResultTemplate(task, done.get("result")) yield task.done(**done) class DelegateConfigurator(Configurator): def canDryRun(self, task): return True # ok because this will also be called on the subtask def run(self, task): subtaskRequest = task.createSubTask( task.inputs["operation"], task.inputs.get("target") ) assert subtaskRequest # note: this will call canRun() and if needed canDryRun() on subtask but not shouldRun() subtask = yield subtaskRequest yield subtask.result
# coding: utf-8 from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.stem import RSLPStemmer import concurrent.futures import string import numpy class LSA: def __init__(self, ngram_max, min_freq, p_eig, phrases): self.ngram_max = ngram_max self.min_freq = min_freq self.p_eig = p_eig self.ngram_min = 1 self.stopwords = stopwords.words("portuguese") self.stopwords.append('é') self.phrases = phrases self.features_utterance = self.get_features_utterance() print("Parameters: Min_freq =", min_freq,"NGram_max =", ngram_max, "P_eig =", p_eig*100) @staticmethod def normalizer(x_abnormal): minimum = x_abnormal.min() maximum = x_abnormal.max() if minimum == maximum: return x_abnormal else: x_new = (x_abnormal - minimum) / (maximum - minimum) return x_new def tokenize(self, t): if t in self.stopwords: return [] sentence = t.lower() sentence = word_tokenize(sentence) aux = [] for word in sentence: if word not in self.stopwords and word not in string.punctuation: aux.append(RSLPStemmer().stem(word.lower())) phrase = [] for word in aux: phrase.append(word) return phrase def manage_keywords(self, keywords): tokens, vocabulary = [], [] for i in keywords: t = self.tokenize(i) if len(t) > 1: key_str = '' for j in t: key_str = key_str + ' ' + j tokens.append(key_str[1:]) else: tokens.extend(t) for i in tokens: repeat = False for v in vocabulary: if i == v: repeat = True break if not repeat: vocabulary.append(i) return vocabulary def get_features_utterance(self): vec = TfidfVectorizer(min_df=self.min_freq, stop_words=self.stopwords, tokenizer=self.tokenize, ngram_range=(self.ngram_min, self.ngram_max)) vec.fit_transform(self.phrases) return vec.get_feature_names() def tf_idf(self, examples, keywords): vec = TfidfVectorizer(stop_words=self.stopwords, vocabulary=keywords, tokenizer=self.tokenize, ngram_range=(self.ngram_min, self.ngram_max)) x = vec.fit_transform(examples) return x.todense() def eliminate_dimensions(self, tfidf): res = 0 eigen = numpy.linalg.svd(tfidf, compute_uv=False) normalized_eigenvalues = eigen / numpy.sum(eigen) eigenvalues = numpy.diag(eigen) for i in range(0, len(eigen)): res += normalized_eigenvalues[i] if res >= self.p_eig: svd = TruncatedSVD(n_components= (i+1), algorithm="arpack", tol=0) svd.fit(tfidf) u = svd.transform(tfidf) x = numpy.matrix.dot(u, svd.components_) return x def process_phrase(self, index, keywords, set_): examples = [] examples.extend(self.phrases) examples.append(set_.phrases[index]) tfidf_utterance = numpy.array(self.tf_idf(examples, self.features_utterance)) tfidf_keywords = numpy.array(self.tf_idf(examples, keywords)) x = numpy.round(self.eliminate_dimensions(numpy.concatenate([tfidf_utterance, tfidf_keywords], axis=1)), 10) set_.set_lsa_result(index, x[-1]) def process_examples(self, keywords, set_): with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: for i in range(len(set_.phrases)): executor.submit(self.process_phrase, i, keywords, set_) return executor def train_lsa(self, keywords, set_): tfidf_utterance = numpy.array(self.tf_idf(set_.phrases, self.features_utterance)) tfidf_keywords = numpy.array(self.tf_idf(set_.phrases, keywords)) x = numpy.round(self.eliminate_dimensions(numpy.concatenate([tfidf_utterance, tfidf_keywords], axis=1)), 10) for i in range(len(set_.phrases)): set_.set_lsa_result(i, x[i])
import numpy as np import pandas as pd from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC, SVR import math import Regression as regression forecast_days = 10 svm_clf = Pipeline([ ("scaler", StandardScaler()), ("linear_svc", LinearSVC(C=1, loss="hinge")) ]) def fit(df: pd.DataFrame): df['label'] = df.apply(createLabel, axis=1) X_temp = np.array(df.drop(['label'], 1)) X = X_temp[:-forecast_days] X_lately = X_temp[-forecast_days:] y = np.array(df['label'])[:-forecast_days] X_train, X_test, y_train, y_test = regression.get_train_test_split(X, y, 0.2) assert len(X_train) == len(y_train) assert len(X_test) == len(y_test) svm_clf = Pipeline([ ("scaler", StandardScaler()), ("linear_svc", LinearSVC(C=1, loss="hinge")) ]) svm_clf.fit(X_train, y_train) print("Svm : " + str(svm_clf.predict(X_lately))) print("Score: " + str(svm_clf.score(X_test, y_test))) return svm_clf.predict(X_lately) def createLabel(x): # You can create a label based on # 1. Max (High price) for next 10 days # 2. Average of (High-low) for next 10 days if math.isnan(x['label']): return np.nan elif x['label'] > x['Adj Close']: return 1 else: return -1
import sys import glob import collections import os import math input_path = sys.argv[1] file_list = glob.glob(os.path.join(input_path,'*/*/*/*.txt')) training_data = collections.defaultdict(list) for file in file_list: class1, class2, fold, file_name = file.split("/")[-4:] training_data[class1 + class2].append(file) def new_word_format(word): new_word = word.lower().strip() char_list = [] for char in new_word: if char not in ". , /": char_list.append(char) return "".join(char_list) def read_words_from_files(sentiment, file_data): word_dict = {} word_counter = 0 num_file = 0 for key in file_data: if sentiment in key: file_list = file_data[key] for file_text in file_list: num_file += 1 current_file = open(file_text, "r") for line in current_file: word_list = line.split(" ") for word in word_list: word = new_word_format(word) word_counter += 1 word_dict[word] = word_dict.get(word, 0) + 1 current_file.close() return word_dict, word_counter, num_file # ...WordDict{[word]:this word's number}, num...Word int(), num...File int() word_dict_total, num_word_toal, num_file_total = read_words_from_files("",training_data) word_dict_positive, num_word_positive, num_file_positive = read_words_from_files("positive", training_data) word_dict_negative, num_word_negative, num_file_negative = read_words_from_files("negative", training_data) word_dict_truthful, num_word_truthful, num_file_truthful = read_words_from_files("truthful", training_data) word_dict_deceptive, num_word_deceptive, num_file_deceptive = read_words_from_files("deceptive", training_data) abs_V = len(word_dict_total) # logprior c log_prior_positive = math.log(num_file_positive/num_file_total) log_prior_negative = math.log(num_file_negative/num_file_total) log_prior_truthful = math.log(num_file_truthful/num_file_total) log_prior_deceptive = math.log(num_file_deceptive/num_file_total) #print(log_prior_positive, log_prior_negative, log_prior_truthful, log_prior_deceptive) # calculate p(w/c) OR loglikelihood[w, c] def likelihood(word_dict_total, word_dict_sentiment, num_word_sentiment, abs_V): likelihood_dict = {} for word in word_dict_total: likelihood_dict[word] = math.log((word_dict_sentiment.get(word, 0) + 1)/(num_word_sentiment+abs_V)) return likelihood_dict likelihood_positive = likelihood(word_dict_total, word_dict_positive, num_word_positive, abs_V) likelihood_negative = likelihood(word_dict_total, word_dict_negative, num_word_negative, abs_V) likelihood_truthful = likelihood(word_dict_total, word_dict_truthful, num_word_truthful, abs_V) likelihood_deceptive = likelihood(word_dict_total, word_dict_deceptive, num_word_deceptive, abs_V) model_file = "nbmodel.txt" fileOut = open(model_file, "w") line = ",".join([str(log_prior_positive),str(log_prior_negative), str(log_prior_truthful), str(log_prior_deceptive)]) fileOut.writelines(line + "\n") for word in likelihood_positive: elementList = ["positive", word, str(likelihood_positive[word])] line = ",".join(elementList) fileOut.writelines(line + "\n") for word in likelihood_negative: elementList = ["negative", word, str(likelihood_negative[word])] line = ",".join(elementList) fileOut.writelines(line + "\n") for word in likelihood_truthful: elementList = ["truthful", word, str(likelihood_truthful[word])] line = ",".join(elementList) fileOut.writelines(line + "\n") for word in likelihood_deceptive: elementList = ["deceptive", word, str(likelihood_deceptive[word])] line = ",".join(elementList) fileOut.writelines(line + "\n") fileOut.close()
import boto3 def create_efs(token_name): """ A function to create efs file system """ conn = boto3.client('efs', region_name='ap-south-1') # create the file system response = conn.create_file_system( CreationToken=token_name, PerformanceMode='generalPurpose', Encrypted=False, ThroughputMode='bursting', ) # get the created file system info print(response) create_efs('tok1')
from bs4 import BeautifulSoup import requests from sqlalchemy import create_engine, Table, Column, Integer, String from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base name = 'postgres' database = 'stops' password = '1' db = create_engine('postgres://' + name + ':' + password + '@localhost:5432/' + database) db.execute('DROP TABLE IF EXISTS stops_list;') db.execute("CREATE TABLE stops_list(id NUMERIC CONSTRAINT stops_list_pk PRIMARY KEY," "name VARCHAR(50) CONSTRAINT list_name_nn NOT NULL," "stop_id NUMERIC CONSTRAINT list_stop_gz CHECK (stop_id > 0)," "city VARCHAR(30) CONSTRAINT list_city_nn NOT NULL," "link TEXT CONSTRAINT list_link_nn NOT NULL);") Base = declarative_base() class Stop(Base): __tablename__ = 'stops_list' id = Column(Integer, primary_key=True) name = Column(String) stop_id = Column(Integer) city = Column(String) link = Column(String) def __repr__(self): return "<Stop(id ='%s' name='%s', stop_id='%s', city='%s', link = '%s)>" % (self.id, self.name, self.stop_id, self.city, self.link) page = 'http://www.ztm.waw.pl/' r = requests.get('http://www.ztm.waw.pl/rozklad_nowy.php?c=183&l=1') r.encoding = 'utf-8' soup = BeautifulSoup(r.text, "html.parser") stops_list = soup.findAll('div', {'class': 'PrzystanekList'}) session = sessionmaker(bind=db) Session = session() i = 1 for stops in stops_list: for stop in stops: for sto in stop: if len(sto) > 1: print(i, sto.contents[0], sto.get('href')[-4:], sto.find('em').contents[0][1:-1], sto.get('href')) tStop = Stop(id=i, name=sto.contents[0], stop_id=sto.get('href')[-4:], city=sto.find('em').contents[0][1:-1], link=page+sto.get('href')) Session.add(tStop) i += 1 Session.commit()
import os import random import subprocess import sys ORACLE = "./oracle.out" # known working solution to produce expected output TARGET = "./target.out" # unreliable solution to test sys.setrecursionlimit(10 ** 9) def gen(): # returns a list of lines # to retry, return gen() recursively return [] def run(sol, lines): input_bytes = bytes(os.linesep.join(lines), "utf8") p = subprocess.Popen(sol, stdin=subprocess.PIPE, stdout=subprocess.PIPE) got, _ = p.communicate(input=input_bytes) return str(got, "utf8") cases = 0 while True: generated = gen() want, got = run(ORACLE, generated), run(TARGET, generated) if want != got: print("wrong output") print("input:") print("\n".join(generated)) print() print("expected:") print(want) print() print("got:") print(got) break cases += 1 if cases % 100 == 0: print(f"ran {cases} cases")
import sys import argparse import os import re MEM_FILE_NAME = 'memdump0.mem' # por consola(?) NEW_FILE_NAME = 'newtestcase.v' def generar_dump(verilog_f): f = open(MEM_FILE_NAME, 'w+') verilog_f.seek(0) verilog_txt = verilog_f.read() # Busca en el archivo original y devuelve los aciertos. El tercer grupo es el bloque donde se inicializa la memoria. # Si no hubo acierto, error de indice, entonces cierra el archivo. matches = re.findall(r' reg \[(.*)\] (\S*) \[(.*)\];\n initial begin\n(( \S*\[\S*\] = \S*;\n)*) end\n', verilog_txt) try: matches = matches[0][3] except IndexError: f.close() os.remove(MEM_FILE_NAME) verilog_f.close() sys.exit('Error en el archivo verilog') # Copia solo los valores al nuevo archivo. matches = matches.split('\n') for match in matches: try: match = re.search(r'\'\S(.*);', match).group(1) f.write(match+'\n') except AttributeError: continue f.close() def generar_nueva_sintaxis(verilog_f): # Abro el nuevo archivo a generar y guardo cada linea de texto del original f = open(NEW_FILE_NAME, 'w+') verilog_f.seek(0) verilog_txt = verilog_f.read().split('\n') rplcmnt = ' $readmemh("%s", mem);\n'%MEM_FILE_NAME # Itero sobre cada linea del texto original y va copiandolas al nuevo archivo; Si encuentra la sentencia tipo reg [] mem [], # entonces entra en el bloque de codigo de iniciacion de memoria. Lo primero que hace es copiar la nueva sentencia # y luego se saltea todo el bloque desde initial begin hasta end. mem_block = False for line in verilog_txt: if re.match(r' reg \[(.*)\] (\S*) \[(.*)\];', line) or mem_block: mem_block = True if re.match(r' initial begin', line) or re.match(r' \S*\[\S*\] = \S*;', line): continue if re.match(r' end', line): mem_block = False continue else: f.write(line + '\n' + rplcmnt) else: f.write(line + '\n') f.close() def expected(): # Valida si los nuevos archivos generados son iguales a los esperados en la carpeta expected. #with open(os.path.join(os.getcwd(), 'expected/expected.v')) as exp_f: with open('expected/expected.v') as exp_f, open(NEW_FILE_NAME) as f, open('expected/memdump0.mem') as expm_f, open(MEM_FILE_NAME) as fm: if f.read() == exp_f.read() and fm.read() == expm_f.read(): return True else: #os.remove(MEM_FILE_NAME) #os.remove(NEW_FILE_NAME) return False def main(file): try: verilog_f = open(file, 'r') except FileNotFoundError: sys.exit('Archivo no encontrado') generar_dump(verilog_f) generar_nueva_sintaxis(verilog_f) #assert expected() verilog_f.close() if __name__ == '__main__': args_parser = argparse.ArgumentParser() args_parser.add_argument('file', metavar = 'Nombre del archivo verilog a modificar') args = args_parser.parse_args() main(args.file)
# A variável __ name __ representa o nome do módulo. # Entretanto, quando o módulo é executado por si só como um programa __name__ é definido para main # diferente de quando o módulo é importado, no qual o valor fica de fato igual ao nome do módulo def testa_name() : print("Variável name contém:", __name__) # O if é muito utilizado quando seu programa é todo executado através de funções if(__name__ == "__main__"): testa_name()
def build_profile(first,last,**user_info): profile={} profile['first_name']=first profile['last_name']=last for key,value in user_info.items(): profile[key]=value return profile user_profile=build_profile("Harry","Potter",city='Beijing',country="China") print(user_profile)
""" This script preprocesses the 2020 NYC 311 data to allow for increased dashboard refresh speed """ import pandas as pd # read in cleaned 2020 data nyc_2020=pd.read_csv("../nyc_2020_final.csv") #create list of all zipcode unique_zips=list(nyc_2020['zipcode'].unique()) #remove nan from menu list del unique_zips[3] #convert all zipcodes into strings ans sort unique_zips = sorted([ str(int(x)) for x in unique_zips ]) #calculate averages of all zipcodes and store in a dictionary zip_avgs_dict ={} for z in unique_zips: df=nyc_2020.groupby('zipcode').get_group(float(z)) cur_zip=[] for i in range(1,10): try: cur_zip.append(df.groupby('month').get_group(i)['response_time'].mean()) except KeyError: cur_zip.append(0) zip_avgs_dict[z]=cur_zip # save df of monthly averages by zipcode pre_df=pd.DataFrame.from_dict(zip_avgs_dict) pre_df.to_csv('preprocessed.csv', index=False) # calculate overall response time averages by month avg_2020={} monthly_avgs=[] for i in range(1,10): monthly_avgs.append(nyc_2020.groupby('month').get_group(i)['response_time'].mean()) avg_2020['2020']=monthly_avgs df_2020=pd.DataFrame.from_dict(avg_2020) # save df of overall monthly averages df_2020.to_csv('all_avgs.csv', index=False)
# Copyright 2021 Richard Behan-Howell # Written by Richard Behan-Howell # All rights reserved from ..common import db, Field, T, auth import json from datetime import datetime from yatl.helpers import SCRIPT, XML import re from py4web import action, request, abort, redirect, URL, Field, DAL, response import os from .. import settings ########################################################### ### USAGE ########################################################## # Put this file in /<project folder>/libs/ # Recomended to use a format field correctly set in reference tables. E.g. format='%(name)s', # in the controller, use this format, for example a video table. """ MODELS.PY: ######################################## db.define_table('group', Field('name', type='string', unique=True, label=T('Name')), Field('f_desc', type='string', label=T('Description')), format='%(name)s', ) db.define_table('t_video', Field('f_name', type='string', label=T('Name')), Field('f_desc', type='string', label=T('Description')), Field('group', type='reference group', label=T('Group')), Field('f_vimeoid', type='string', label=T('VimeoID')), format='%(f_name)s', ) CONTROLLER.PY: ######################################## from .libs.datatables_API import dteditor_data @action("video", method=["GET", "POST"]) @action.uses(session, db, auth.user, "videos.html") def video(): page_title = 'Videos' page_subtitle = 'Videos' return dict(page_title=page_title, page_subtitle= page_subtitle) @action('video_data', method=['GET', 'POST']) @action.uses(session, db, auth.user) def video_data(): table = db.t_video return dteditor_data(table, request) TEMPLATE: questions.html: You need to lay out the datatables in javascript, E.G. Download and install Editor ######################################## <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs4-4.1.1/jq-3.3.1/jszip-2.5.0/dt-1.10.24/af-2.3.5/b-1.7.0/b-colvis-1.7.0/b-html5-1.7.0/b-print-1.7.0/cr-1.5.3/date-1.0.3/fc-3.3.2/fh-3.1.8/kt-2.6.1/r-2.2.7/rg-1.1.2/rr-1.2.7/sc-2.0.3/sb-1.0.1/sp-1.2.2/sl-1.3.3/datatables.min.css"/> <link rel="stylesheet" type="text/css" href="DataTables/Editor-2.0.1/css/editor.bootstrap4.css"/> <div class="container-fluid"> <div class="row"> <div class="col-sm w-98 p-2 border"> <h1>questions</h1> <table id="questions" class="table table-bordered table-sm table-hover" style="width:100%"> <thead> <tr > <th data-toggle="tooltip" data-placement="top" title="Name">Name</th> <th data-toggle="tooltip" data-placement="top" title="Description">Desc</th> <th data-toggle="tooltip" data-placement="top" title="Group">Group</th> <th data-toggle="tooltip" data-placement="top" title="Picture">Picture</th> </tr> </thead> </table> </div> </div> </div> <script> var editor; // use a global for the submit and return data rendering in the examples $(document).ready(function() { /////////////////////////////////////////////////////// //question /////////////////////////////////////////////////////// var questionEditor = new $.fn.dataTable.Editor ({ ajax: { url: "/MindTrax/question_data", data: function (d) { var selected = questionTable.row({selected: true}); if (selected.any()) { d.question = selected.data().t_question.id; } } }, table: '#questions', idSrc: "t_question.id", fields: [ { label: "Name:", name: "t_question.f_name", }, { label: "Desc:", name: "t_question.f_desc", }, { label: "group:", name: "group.name", type: "select", }, { label: "Picture:", name: "t_question.picture", type: "upload", display: function (file_id) { return file_id ? '<img src="'+ '/MindTrax/downloadfile/t_question_'+ file_id.toString() + '.png" width="25" height="25">' : null; }, }, ] } ); var questionTable = $('#questions').DataTable ( { dom: 'BfrtipQ', ajax: "/MindTrax/question_data", idSrc: "t_question.id", columns: [ { data: 't_question.f_name', title: "Name", }, { data: 't_question.f_desc', title: "Description", }, { data: "group.name" }, { data: "t_question.picture", render: function ( file_id ) { return file_id ? '<img src="/MindTrax/downloadfile/t_question_' + file_id.toString() + '.png" width="25" height="25">' : null; }, defaultContent: "No image", title: "Image", }, ], select: { style: 'single' }, buttons: [ { extend: 'create', editor: questionEditor }, { extend: 'edit', editor: questionEditor }, { extend: 'remove', editor: questionEditor }, ], } ); }); </script> <script type="text/javascript" src="https://cdn.datatables.net/v/bs4-4.1.1/jq-3.3.1/jszip-2.5.0/dt-1.10.24/af-2.3.5/b-1.7.0/b-colvis-1.7.0/b-html5-1.7.0/b-print-1.7.0/cr-1.5.3/date-1.0.3/fc-3.3.2/fh-3.1.8/kt-2.6.1/r-2.2.7/rg-1.1.2/rr-1.2.7/sc-2.0.3/sb-1.0.1/sp-1.2.2/sl-1.3.3/datatables.min.js"></script> <script type="text/javascript" src="DataTables/Editor-2.0.1/js/dataTables.editor.js"></script> <script type="text/javascript" src="DataTables/Editor-2.0.1/js/editor.bootstrap4.js"></script> """ ################################### #Helper functions ################################### def format(table, row): if not row: return T('Unknown') elif isinstance(table._format, str): return table._format % row elif callable(table._format): return table._format(row) else: return '#' + str(row.id) def plural(table): return table._plural or pluralize(table._singular.lower()).capitalize() def right(s, amount): if s == None: return None elif amount == None: return None # Or throw a missing argument error s = str(s) if amount > len(s): return s elif amount == 0: return "" else: return s[-amount:] ################################### # table_format_reference_list(table): # # Pass in a table e.g. db.group # Assumes that the table has a format field correctly set. E.g. format='%(name)s', # Generates a JSON list of lookup references from a table. format and id. # E.G. [{'label': 'Inadequate', 'value': 2}, {'label': 'Confused2', 'value': 3}, {'label': 'Hurt', 'value': 4}] # ################################### def table_format_reference_list(table): #print("table list: "+ table._tablename+ "\r") #print("table format:" + db[table]._format) list = [] query = (table.id > 0) if db(table).isempty(): data = ["empty"] else: if db(table.id > 0).count() > 0: for z in db(query).select(): list.append({'label': format(table, z), 'value': z.id}) print("list:" + str(list)) return list """ def record_data(table, z): if z: return { "DT_RowId":"row_" + str(z.id), table._tablename: { "id":z.id, "name":z.name, "code":z.code, } } else: return [] """ ################################### # files_record_data(table, z): # # Pass in a table e.g. db.group and row # Assumes that the table has a format field correctly set. E.g. format='%(name)s', #################################### def files_record_data(table, z): if z: return { str(z.id): { "id": str(z.id), "name": format(table, z), "filename": z.filename, "filesize": z.filesize, "web_path": z.web_path, "system_path": z.system_path, } } else: return [] ################################################################################################### ## OPTIONS #################################################################################################### def generic_options(table): # Takes a Table, finds any reference fields and populates a dictioanry of the [possible names and id for each reference field. #return: {"grower.name":table_format_reference_list(db.grower),"variety.name":table_format_reference_list(db.variety),"branch.name":table_format_reference_list(db.branch)} fields = [dict(name=f.name, type=f.type) for f in table if f.readable] optiondict = {} for f in fields: if 'reference' in f.get('type'): # get the name of the Field. I.e. grower_id fieldname = f.get('type').split()[0] # get the name of the reference table. I.e. Grower reftable = f.get('type').split()[1] #Get a list of name and id from that table using helper function table_format_reference_list #print("reftable.format: " + str(db[reftable]._format) + '\r') for t in db: if reftable == t._tablename and reftable != 'upload' and reftable != 'auth_user': print("t._tablename: " + str(t._tablename) + '\r') print("table: " + str(db[reftable]._tablename) + '\r') # print("%(name)s" % db[reftable].name) optiondict.update({f.get('name')+str('.name'): table_format_reference_list(db[reftable])}) # {reftable: {'name': getattr(z, f.get('name')).name if getattr(z, f.get('name')) else 'Null'}}) elif reftable == t._tablename and reftable == 'auth_user': print("t._tablename: " + str(t._tablename) + '\r') print("table: " + str(db[reftable]._tablename) + '\r') optiondict.update({f.get('name') + str('.username'): table_format_reference_list(db[reftable])}) # {reftable: {'name': getattr(z, f.get('name')).name if getattr(z, f.get('name')) else 'Null'}}) print("options tablelist: " + str(optiondict) + '\r') return optiondict ############################################################################ # generic_record_data ############################################################################# def generic_record_data(table, z): fieldlist = [] if z: fields = [dict(name=f.name, type=f.type) for f in table if f.readable] record = {} reflist = [] for f in fields: #finalise all types of fields if (f.get('type') == "datetime"): try: record.update({f.get('name'): z[f.get('name')].strftime('%Y-%m-%d')}) except: record.update({f.get('name'): ""}) elif f.get('type') == "boolean": record.update({f.get('name'): '1' if getattr(z, f.get('name')) else '0'}) elif 'reference' in f.get('type'): record.update({f.get('name'): getattr(z, f.get('name'))}) reftable = f.get('type').split()[1] # grower=dict(name=z.grower_id.name if z.grower_id else 'Null'), f.get('name') reflist.append( {f.get('name'): {'name': getattr(z, f.get('name')).name if getattr(z, f.get('name')) else 'Null'}}) else: record.update({f.get('name'): getattr(z, f.get('name'))}) #print("record: " + str(record) + '\r') #print("reflist: " + str(reflist) + '\r') data = {} data.update({'DT_RowId':'row_' + str(z.id), table._tablename: record}) #Iterate through all reference fields and fill in their "name option. for ref in reflist: data.update(ref) return data else: return [] def dteditor_data(table, request, query=None): """ datatables.net makes an ajax call to this method to get the data 1. read Send back JSON about the table with all records and the referenced fields in the format: 2. create 3. edit 4. remove :return: """ print("dteditor_data request method" + str(request.method)) data = [] files = [] files_dict = {} option = [] if query is None: query = (table.id > 0) if request.method == 'GET': #iterate through any fields here that are reference fields. options = generic_options(table) #{"grower.name":table_format_reference_list(db.grower),"variety.name":table_format_reference_list(db.variety),"branch.name":table_format_reference_list(db.branch)} if db(table).isempty(): data = [] files = [] else: if db(table.id > 0).count() > 0: data = [generic_record_data(table,z) for z in db(query).select()] for z in db(db.upload.table == table).select(): files_dict.update(files_record_data(db.upload,z)) files = {"files": files_dict} #formulate the response: return json.dumps(dict(data=data,options=options,files=files), indent=4, sort_keys=False, default=str) elif request.method == 'POST': action = request.forms.get("action") print("action: " +str(action)) if action=="create": kwargs = {} #Need to iterate through the table fields looking for data for each, if nothing exists then ignore it, if it does then set it. fields = [dict(name=f.name, type=f.type) for f in table if f.readable] for f in fields: # finalise for all types of fields # datetime, date, time if (f.get('type') == "datetime") or (f.get('type') == "date") or (f.get('type') == "time"): for key in request.forms.keys(): if "data[0]["+table._tablename+"][" + f.get('name') + "]" in key: print("if " + "data[0]["+table._tablename+"][" + f.get('name') + "]" + " in key") print("True key:" + key) try: dt = datetime.strptime(request.forms.get("data[0]["+table._tablename+"][" + f.get('name') + "]"), '%Y-%m-%d').date() kwargs.update({f.get('name'): dt}) except: print("Datetime format error:" + request.forms.get("data[0]["+table._tablename+"][" + f.get('name') + "]")) pass elif (f.get('type') == "boolean"): for key in request.forms.keys(): if "data[0]["+table._tablename+"][" + f.get('name') + "]" in key: print("if " + "data[0]["+table._tablename+"][" + f.get('name') + "]" + " in key") print("True key:" + key) try: kwargs.update({f.get('name'): True if ( request.forms.get("data[0]["+table._tablename+"][" + f.get('name') + "]") == "1") else False}) except: pass elif 'reference' in f.get('type'): # reftable = f.get('type').split()[1] for key in request.forms.keys(): if "data[0][" + f.get('name') + "][name]" in key: print("if " + "data[0][" + f.get('name') + "][name]" + " in key") print("True key:" + key) try: kwargs.update({f.get('name'): int(request.forms.get("data[0][" + f.get('name') + "][name]"))}) except: pass #Integers, elif ((f.get('type') == "blob") or (f.get('type') == "integer") or (f.get('type') == "double") or (f.get('type') == "bigint") or ('decimal' in f.get('type'))): # blob, integer, double, decimal(n, m), bigint for key in request.forms.keys(): if "data[0]["+table._tablename+"][" + f.get('name') + "]" in key: print("if " + "data[0]["+table._tablename+"][" + f.get('name') + "]" + " in key") print("True key:" + key) try: kwargs.update({f.get('name'): int(request.forms.get("data[0]["+table._tablename+"][" + f.get('name') + "]"))}) except: pass else: #Stinrg, Text, password, json for key in request.forms.keys(): if "data[0]["+table._tablename+"][" + f.get('name') + "]" in key: print("if " + "data[0]["+table._tablename+"][" + f.get('name') + "]" + " in key") print("True key:" + key) try: kwargs.update({f.get('name'): request.forms.get("data[0]["+table._tablename+"][" + f.get('name') + "]")}) except: pass print("kwargs:" + str(kwargs)+"\r\r") #grower_id = request.forms.get("data[0][grower_id][name]") #variety_id = request.forms.get("data[0][variety_id][name]") #branch_id = request.forms.get("data[0][branch_id][name]") #commencement_dt = datetime.strptime(request.forms.get("data[0][growercontract][commencement_dt]"), '%Y-%m-%d').date() #expiry_dt = datetime.strptime(request.forms.get("data[0][growercontract][expiry_dt]"), '%Y-%m-%d').date() #name = db.grower[grower_id].name + ',' + db.variety[variety_id].name + ',' + db.branch[branch_id].name #contract_id = request.forms.get("data[0][growercontract][contract_id]") #contractsigned = True if (request.forms.get("data[0][growercontract][contractsigned]")=="1") else False #Create new #Do other specific error checking here. newid = table.insert(**kwargs) """ newid = table.insert( name = name, grower_id=grower_id, variety_id=variety_id, branch_id=branch_id, commencement_dt=commencement_dt, expiry_dt=expiry_dt, contract_id=contract_id, contractsigned=contractsigned, ) """ print("newid: "+str(newid)) db.commit() z = db(table.id == int(newid)).select().first() data = [generic_record_data(table,z)] return json.dumps(dict(data=data), indent=4, sort_keys=True, default=str) elif action == "edit": id = 0 for key in request.forms.keys(): if "data" in key: try: a = key.split('[') #print(str(a)) id = int(a[1].strip("][")) print("id:"+str(id)) break except: return #id = request.forms.get("growercontract") kwargs = {} # Need to iterate through the table fields looking for data for each, if nothing exists then ignore it, if it does then set it. fields = [dict(name=f.name, type=f.type) for f in table if f.readable] for f in fields: # finalise for all types of fields # datetime, date, time if (f.get('type') == "datetime") or (f.get('type') == "date") or (f.get('type') == "time"): for key in request.forms.keys(): if "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]" in key: print("if " + "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]" + " in key") print("True key:" + key) try: dt = datetime.strptime( request.forms.get("data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]"), '%Y-%m-%d').date() kwargs.update({f.get('name'): dt}) except: print("Datetime format error:" + request.forms.get( "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]")) pass elif (f.get('type') == "boolean"): for key in request.forms.keys(): if "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]" in key: print("if " + "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]" + " in key") print("True key:" + key) try: kwargs.update({f.get('name'): True if (request.forms.get( "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]") == "1") else False}) except: pass elif 'reference' in f.get('type'): # reftable = f.get('type').split()[1] for key in request.forms.keys(): if "data["+str(id)+"][" + f.get('name') + "][name]" in key: print("if " + "data["+str(id)+"][" + f.get('name') + "][name]" + " in key") print("True key:" + key) try: kwargs.update( {f.get('name'): int(request.forms.get("data["+str(id)+"][" + f.get('name') + "][name]"))}) except: pass # Integers, elif ((f.get('type') == "blob") or (f.get('type') == "integer") or (f.get('type') == "double") or (f.get('type') == "bigint") or ('decimal' in f.get('type'))): # blob, integer, double, decimal(n, m), bigint for key in request.forms.keys(): if "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]" in key: print("if " + "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]" + " in key") print("True key:" + key) try: kwargs.update({f.get('name'): int( request.forms.get("data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]"))}) except: pass else: # Stinrg, Text, password, json for key in request.forms.keys(): if "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]" in key: print("STRING if " + "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]" + " in key") print("True key:" + key) try: kwargs.update({f.get('name'): request.forms.get( "data["+str(id)+"][" + table._tablename + "][" + f.get('name') + "]")}) except: pass print("kwargs:" + str(kwargs) + "\r\r") """ grower_id = request.forms.get("data["+str(id)+"][grower_id][name]") variety_id = request.forms.get("data["+str(id)+"][variety_id][name]") branch_id = request.forms.get("data["+str(id)+"][branch_id][name]") commencement_dt = datetime.strptime(request.forms.get("data["+str(id)+"][growercontract][commencement_dt]"), '%Y-%m-%d').date() expiry_dt = datetime.strptime(request.forms.get("data["+str(id)+"][growercontract][expiry_dt]"), '%Y-%m-%d').date() name = request.forms.get("data["+str(id)+"][growercontract][name]") contract_id = request.forms.get("data["+str(id)+"][growercontract][contract_id]") contractsigned = True if (request.forms.get("data["+str(id)+"][growercontract][contractsigned]")=="1") else False """ gc_query = (table.id == id) z = db(gc_query).select().first() z.update_record(**kwargs) """ name=name, grower_id=grower_id, variety_id=variety_id, branch_id=branch_id, commencement_dt=commencement_dt, contract_id=contract_id, expiry_dt=expiry_dt, contractsigned=contractsigned, ) """ db.commit() z = db(gc_query).select().first() data = [ generic_record_data(table,z) ] print("data: "+str(data)) return json.dumps(dict(data=data), indent=4, sort_keys=True, default=str) elif action == "remove": id = 0 for key in request.forms.keys(): if "data" in key: try: a = key.split('[') # print(str(a)) id = int(a[1].strip("][")) print("id:" + str(id)) break except: return # id = request.forms.get("growercontract") result = db(table.id == id).delete() print("result:"+str(result)) return json.dumps(dict(data={})) elif action == "upload": """Handle file upload form""" upload = request.files.get('upload') print("upload: "+str(upload.filename)+' '+str(upload.name)) # only allow upload of pdf files #if upload.content_type != 'application/pdf':#'text/plain': # return "Only PDF files allowed" newid = db.upload.insert( name = '', filename = '', filesize = -1, web_path = '', system_path = '', table = str(table), ) db.commit() print("newid:"+str(newid)) upload.filename = table._tablename + '-' + str(newid) + right(upload.filename, 4) web_path = str(URL('downloadfile/'+ upload.filename)) save_path = os.path.join(settings.UPLOAD_PATH,upload.filename) print("webpath:" + str(web_path))#os.path.join('downloadfile', upload.filename)))) print("save_path:" + str(save_path))#os.path.join('downloadfile', upload.filename)))) print("newid:" + str(newid))#os.path.join('downloadfile', upload.filename)))) z = db(db.upload.id == newid).select(db.upload.ALL).first() print("z.id:" + str(z.id))#os.path.join('downloadfile', upload.filename)))) ret = z.update_record( name = upload.filename, filename = upload.filename, filesize = upload.content_length, web_path = web_path, system_path = save_path, table=str(table), ) db.commit() z = db(db.upload.id == newid).select(db.upload.ALL).first() print("newid:" + str(newid))#os.path.join('downloadfile', upload.filename)))) print("com z.id:" + str(z.id))#os.path.join('downloadfile', upload.filename)))) print("com z.filename:" + str(z.filename))#os.path.join('downloadfile', upload.filename)))) print("com z.webpath:" + str(z.web_path))#os.path.join('downloadfile', upload.filename)))) upload.save(save_path) #formulate the response: data = [] files = {"uploadedfiles": {str(z.id): { "id": str(z.id), "filename": z.filename, "filesize": z.filesize, "web_path": z.web_path, "system_path": z.system_path, } }} upload = {"id": z.id,"filename":z.filename } return json.dumps(dict(data=data, files=files, upload=upload), indent=4, sort_keys=True, default=str) else: return json.dumps(dict(data={}), indent=4, sort_keys=True, default=str)
# This file is part of beets. # Copyright 2016, Peter Schnebel and Johann Klähn. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. import mpd import time import os from beets import ui from beets import config from beets import plugins from beets import library from beets.util import displayable_path from beets.dbcore import types # If we lose the connection, how many times do we want to retry and how # much time should we wait between retries? RETRIES = 10 RETRY_INTERVAL = 5 mpd_config = config['mpd'] def is_url(path): """Try to determine if the path is an URL. """ if isinstance(path, bytes): # if it's bytes, then it's a path return False return path.split('://', 1)[0] in ['http', 'https'] class MPDClientWrapper: def __init__(self, log): self._log = log self.music_directory = mpd_config['music_directory'].as_str() self.strip_path = mpd_config['strip_path'].as_str() # Ensure strip_path end with '/' if not self.strip_path.endswith('/'): self.strip_path += '/' self._log.debug('music_directory: {0}', self.music_directory) self._log.debug('strip_path: {0}', self.strip_path) self.client = mpd.MPDClient() def connect(self): """Connect to the MPD. """ host = mpd_config['host'].as_str() port = mpd_config['port'].get(int) if host[0] in ['/', '~']: host = os.path.expanduser(host) self._log.info('connecting to {0}:{1}', host, port) try: self.client.connect(host, port) except OSError as e: raise ui.UserError(f'could not connect to MPD: {e}') password = mpd_config['password'].as_str() if password: try: self.client.password(password) except mpd.CommandError as e: raise ui.UserError( f'could not authenticate to MPD: {e}' ) def disconnect(self): """Disconnect from the MPD. """ self.client.close() self.client.disconnect() def get(self, command, retries=RETRIES): """Wrapper for requests to the MPD server. Tries to re-connect if the connection was lost (f.ex. during MPD's library refresh). """ try: return getattr(self.client, command)() except (OSError, mpd.ConnectionError) as err: self._log.error('{0}', err) if retries <= 0: # if we exited without breaking, we couldn't reconnect in time :( raise ui.UserError('communication with MPD server failed') time.sleep(RETRY_INTERVAL) try: self.disconnect() except mpd.ConnectionError: pass self.connect() return self.get(command, retries=retries - 1) def currentsong(self): """Return the path to the currently playing song, along with its songid. Prefixes paths with the music_directory, to get the absolute path. In some cases, we need to remove the local path from MPD server, we replace 'strip_path' with ''. `strip_path` defaults to ''. """ result = None entry = self.get('currentsong') if 'file' in entry: if not is_url(entry['file']): file = entry['file'] if file.startswith(self.strip_path): file = file[len(self.strip_path):] result = os.path.join(self.music_directory, file) else: result = entry['file'] self._log.debug('returning: {0}', result) return result, entry.get('id') def status(self): """Return the current status of the MPD. """ return self.get('status') def events(self): """Return list of events. This may block a long time while waiting for an answer from MPD. """ return self.get('idle') class MPDStats: def __init__(self, lib, log): self.lib = lib self._log = log self.do_rating = mpd_config['rating'].get(bool) self.rating_mix = mpd_config['rating_mix'].get(float) self.time_threshold = 10.0 # TODO: maybe add config option? self.now_playing = None self.mpd = MPDClientWrapper(log) def rating(self, play_count, skip_count, rating, skipped): """Calculate a new rating for a song based on play count, skip count, old rating and the fact if it was skipped or not. """ if skipped: rolling = (rating - rating / 2.0) else: rolling = (rating + (1.0 - rating) / 2.0) stable = (play_count + 1.0) / (play_count + skip_count + 2.0) return (self.rating_mix * stable + (1.0 - self.rating_mix) * rolling) def get_item(self, path): """Return the beets item related to path. """ query = library.PathQuery('path', path) item = self.lib.items(query).get() if item: return item else: self._log.info('item not found: {0}', displayable_path(path)) def update_item(self, item, attribute, value=None, increment=None): """Update the beets item. Set attribute to value or increment the value of attribute. If the increment argument is used the value is cast to the corresponding type. """ if item is None: return if increment is not None: item.load() value = type(increment)(item.get(attribute, 0)) + increment if value is not None: item[attribute] = value item.store() self._log.debug('updated: {0} = {1} [{2}]', attribute, item[attribute], displayable_path(item.path)) def update_rating(self, item, skipped): """Update the rating for a beets item. The `item` can either be a beets `Item` or None. If the item is None, nothing changes. """ if item is None: return item.load() rating = self.rating( int(item.get('play_count', 0)), int(item.get('skip_count', 0)), float(item.get('rating', 0.5)), skipped) self.update_item(item, 'rating', rating) def handle_song_change(self, song): """Determine if a song was skipped or not and update its attributes. To this end the difference between the song's supposed end time and the current time is calculated. If it's greater than a threshold, the song is considered skipped. Returns whether the change was manual (skipped previous song or not) """ diff = abs(song['remaining'] - (time.time() - song['started'])) skipped = diff >= self.time_threshold if skipped: self.handle_skipped(song) else: self.handle_played(song) if self.do_rating: self.update_rating(song['beets_item'], skipped) return skipped def handle_played(self, song): """Updates the play count of a song. """ self.update_item(song['beets_item'], 'play_count', increment=1) self._log.info('played {0}', displayable_path(song['path'])) def handle_skipped(self, song): """Updates the skip count of a song. """ self.update_item(song['beets_item'], 'skip_count', increment=1) self._log.info('skipped {0}', displayable_path(song['path'])) def on_stop(self, status): self._log.info('stop') # if the current song stays the same it means that we stopped on the # current track and should not record a skip. if self.now_playing and self.now_playing['id'] != status.get('songid'): self.handle_song_change(self.now_playing) self.now_playing = None def on_pause(self, status): self._log.info('pause') self.now_playing = None def on_play(self, status): path, songid = self.mpd.currentsong() if not path: return played, duration = map(int, status['time'].split(':', 1)) remaining = duration - played if self.now_playing: if self.now_playing['path'] != path: self.handle_song_change(self.now_playing) else: # In case we got mpd play event with same song playing # multiple times, # assume low diff means redundant second play event # after natural song start. diff = abs(time.time() - self.now_playing['started']) if diff <= self.time_threshold: return if self.now_playing['path'] == path and played == 0: self.handle_song_change(self.now_playing) if is_url(path): self._log.info('playing stream {0}', displayable_path(path)) self.now_playing = None return self._log.info('playing {0}', displayable_path(path)) self.now_playing = { 'started': time.time(), 'remaining': remaining, 'path': path, 'id': songid, 'beets_item': self.get_item(path), } self.update_item(self.now_playing['beets_item'], 'last_played', value=int(time.time())) def run(self): self.mpd.connect() events = ['player'] while True: if 'player' in events: status = self.mpd.status() handler = getattr(self, 'on_' + status['state'], None) if handler: handler(status) else: self._log.debug('unhandled status "{0}"', status) events = self.mpd.events() class MPDStatsPlugin(plugins.BeetsPlugin): item_types = { 'play_count': types.INTEGER, 'skip_count': types.INTEGER, 'last_played': library.DateType(), 'rating': types.FLOAT, } def __init__(self): super().__init__() mpd_config.add({ 'music_directory': config['directory'].as_filename(), 'strip_path': '', 'rating': True, 'rating_mix': 0.75, 'host': os.environ.get('MPD_HOST', 'localhost'), 'port': int(os.environ.get('MPD_PORT', 6600)), 'password': '', }) mpd_config['password'].redact = True def commands(self): cmd = ui.Subcommand( 'mpdstats', help='run a MPD client to gather play statistics') cmd.parser.add_option( '--host', dest='host', type='string', help='set the hostname of the server to connect to') cmd.parser.add_option( '--port', dest='port', type='int', help='set the port of the MPD server to connect to') cmd.parser.add_option( '--password', dest='password', type='string', help='set the password of the MPD server to connect to') def func(lib, opts, args): mpd_config.set_args(opts) # Overrides for MPD settings. if opts.host: mpd_config['host'] = opts.host.decode('utf-8') if opts.port: mpd_config['host'] = int(opts.port) if opts.password: mpd_config['password'] = opts.password.decode('utf-8') try: MPDStats(lib, self._log).run() except KeyboardInterrupt: pass cmd.func = func return [cmd]
import base64 from datetime import datetime from io import BytesIO import os from urllib.parse import urlencode, urlparse, parse_qsl, urlunparse from flask import abort, flash, make_response, redirect, render_template, \ request, Response, session, url_for, get_flashed_messages from flask_login import current_user, login_user, logout_user from flask_jwt_extended import create_access_token, create_refresh_token, \ set_access_cookies, unset_jwt_cookies from flask_mail import Message import pyotp import qrcode import i18n from qwc_services_core.database import DatabaseEngine from qwc_services_core.config_models import ConfigModels from qwc_services_core.runtime_config import RuntimeConfig from forms import LoginForm, NewPasswordForm, EditPasswordForm, VerifyForm POST_PARAM_LOGIN = os.environ.get("POST_PARAM_LOGIN", default="False") if POST_PARAM_LOGIN.lower() in ("f", "false"): POST_PARAM_LOGIN = False # max number of failed login attempts before sign in is blocked MAX_LOGIN_ATTEMPTS = int(os.environ.get('MAX_LOGIN_ATTEMPTS', 20)) # enable two factor authentication using TOTP TOTP_ENABLED = os.environ.get('TOTP_ENABLED', 'False') == 'True' # issuer name for QR code URI TOTP_ISSUER_NAME = os.environ.get('TOTP_ISSUER_NAME', 'QWC Services') class DBAuth: """DBAuth class Provide user login and password reset with local user database. """ # name of default admin user DEFAULT_ADMIN_USER = 'admin' # authentication form fields USERNAME = 'username' PASSWORD = 'password' def __init__(self, tenant, mail, app): """Constructor :param str tenant: Tenant ID :param flask_mail.Mail mail: Application mailer :param App app: Flask application """ self.tenant = tenant self.mail = mail self.app = app self.logger = app.logger config_handler = RuntimeConfig("dbAuth", self.logger) config = config_handler.tenant_config(tenant) db_url = config.get('db_url') # get password constraints from config self.password_constraints = { 'min_length': config.get('password_min_length', 8), 'max_length': config.get('password_max_length', -1), 'constraints': config.get('password_constraints', []), 'min_constraints': config.get('password_min_constraints', 0), 'constraints_message': config.get( 'password_constraints_message', "Password does not match constraints" ) } db_engine = DatabaseEngine() self.config_models = ConfigModels(db_engine, db_url) self.User = self.config_models.model('users') def tenant_prefix(self): """URL prefix for tentant""" # Updates config['JWT_ACCESS_COOKIE_PATH'] as side effect return self.app.session_interface.get_cookie_path(self.app) def login(self): """Authorize user and sign in.""" target_url = url_path(request.args.get('url', self.tenant_prefix())) retry_target_url = url_path(request.args.get('url', None)) if POST_PARAM_LOGIN: # Pass additional parameter specified req = request.form queryvals = {} for key, val in req.items(): if key not in (self.USERNAME, self.PASSWORD): queryvals[key] = val parts = urlparse(target_url) target_query = dict(parse_qsl(parts.query)) target_query.update(queryvals) parts = parts._replace(query=urlencode(target_query)) target_url = urlunparse(parts) self.clear_verify_session() # create session for ConfigDB db_session = self.db_session() if POST_PARAM_LOGIN: username = req.get(self.USERNAME) password = req.get(self.PASSWORD) if username: user = self.find_user(db_session, name=username) if self.__user_is_authorized(user, password, db_session): return self.response( self.__login_response(user, target_url), db_session ) else: self.logger.info( "POST_PARAM_LOGIN: Invalid username or password") return self.response( redirect(url_for('login', url=retry_target_url)), db_session ) form = LoginForm(meta=wft_locales()) if form.validate_on_submit(): user = self.find_user(db_session, name=form.username.data) # force password change on first sign in of default admin user # NOTE: user.last_sign_in_at will be set after successful auth force_password_change = ( user and user.name == self.DEFAULT_ADMIN_USER and user.last_sign_in_at is None ) if self.__user_is_authorized(user, form.password.data, db_session): if not force_password_change: if TOTP_ENABLED: session['login_uid'] = user.id session['target_url'] = target_url if user.totp_secret: # show form for verification token return self.response( self.__verify(db_session, False), db_session ) else: # show form for TOTP setup on first sign in return self.response( self.__setup_totp(db_session, False), db_session ) else: # login successful return self.response( self.__login_response(user, target_url), db_session ) else: return self.response( self.require_password_change( user, target_url, db_session ), db_session ) else: form.username.errors.append(i18n.t('auth.auth_failed')) form.password.errors.append(i18n.t('auth.auth_failed')) # Maybe different message when # user.failed_sign_in_count >= MAX_LOGIN_ATTEMPTS return self.response( render_template('login.html', form=form, i18n=i18n, title=i18n.t("auth.login_page_title")), db_session ) def verify(self): """Handle submit of form for TOTP verification token.""" # create session for ConfigDB db_session = self.db_session() return self.response(self.__verify(db_session), db_session) def __verify(self, db_session, submit=True): """Show form for TOTP verification token. :param Session db_session: DB session :param bool submit: Whether form was submitted (False if shown after login form) """ if not TOTP_ENABLED or 'login_uid' not in session: # TOTP not enabled or not in login process return redirect(url_for('login')) user = self.find_user(db_session, id=session.get('login_uid', None)) if user is None: # user not found return redirect(url_for('login')) form = VerifyForm(meta=wft_locales()) if submit and form.validate_on_submit(): if self.user_totp_is_valid(user, form.token.data, db_session): # TOTP verified target_url = session.pop('target_url', self.tenant_prefix()) self.clear_verify_session() return self.__login_response(user, target_url) else: flash(i18n.t('auth.verfication_invalid')) form.token.errors.append(i18n.t('auth.verfication_invalid')) form.token.data = None if user.failed_sign_in_count >= MAX_LOGIN_ATTEMPTS: # redirect to login after too many login attempts return redirect(url_for('login')) return render_template('verify.html', form=form, i18n=i18n, title=i18n.t("auth.verify_page_title")) def logout(self): """Sign out.""" target_url = url_path(request.args.get('url', self.tenant_prefix())) self.clear_verify_session() resp = make_response(redirect(target_url)) unset_jwt_cookies(resp) logout_user() return resp def setup_totp(self): """Handle submit of form with TOTP QR Code and token confirmation.""" # create session for ConfigDB db_session = self.db_session() return self.response(self.__setup_totp(db_session), db_session) def __setup_totp(self, db_session, submit=True): """Show form with TOTP QR Code and token confirmation. :param Session db_session: DB session :param bool submit: Whether form was submitted (False if shown after login form) """ if not TOTP_ENABLED or 'login_uid' not in session: # TOTP not enabled or not in login process return redirect(url_for('login')) user = self.find_user(db_session, id=session.get('login_uid', None)) if user is None: # user not found return redirect(url_for('login')) totp_secret = session.get('totp_secret', None) if totp_secret is None: # generate new secret totp_secret = pyotp.random_base32() # store temp secret in session session['totp_secret'] = totp_secret form = VerifyForm(meta=wft_locales()) if submit and form.validate_on_submit(): if pyotp.totp.TOTP(totp_secret).verify( form.token.data, valid_window=1 ): # TOTP confirmed # save TOTP secret user.totp_secret = totp_secret # update last sign in timestamp and reset failed attempts # counter user.last_sign_in_at = datetime.utcnow() user.failed_sign_in_count = 0 db_session.commit() target_url = session.pop('target_url', self.tenant_prefix()) self.clear_verify_session() return self.__login_response(user, target_url) else: flash(i18n.t('auth.verfication_invalid')) form.token.errors.append(i18n.t('auth.verfication_invalid')) form.token.data = None # enable one-time loading of QR code image session['show_qrcode'] = True # show form resp = make_response(render_template( 'qrcode.html', form=form, i18n=i18n, title=i18n.t("auth.qrcode_page_title"), totp_secret=totp_secret )) # do not cache in browser resp.headers.set( 'Cache-Control', 'no-cache, no-store, must-revalidate' ) resp.headers.set('Pragma', 'no-cache') resp.headers.set('Expires', '0') return resp def qrcode(self): """Return TOTP QR code.""" if not TOTP_ENABLED or 'login_uid' not in session: # TOTP not enabled or not in login process abort(404) # check presence of show_qrcode # to allow one-time loading from TOTP setup form if 'show_qrcode' not in session: # not in TOTP setup form abort(404) # remove show_qrcode from session session.pop('show_qrcode', None) totp_secret = session.get('totp_secret', None) if totp_secret is None: # temp secret not set abort(404) # create session for ConfigDB db_session = self.db_session() # find user by ID user = self.find_user(db_session, id=session.get('login_uid', None)) # close session db_session.close() if user is None: # user not found abort(404) # generate TOTP URI email = user.email or user.name uri = pyotp.totp.TOTP(totp_secret).provisioning_uri( email, issuer_name=TOTP_ISSUER_NAME ) # generate QR code img = qrcode.make(uri, box_size=6, border=1) stream = BytesIO() img.save(stream, 'PNG') return Response( stream.getvalue(), content_type='image/png', headers={ # do not cache in browser 'Cache-Control': 'no-cache, no-store, must-revalidate', 'Pragma': 'no-cache', 'Expires': '0' }, status=200 ) def new_password(self): """Show form and send reset password instructions.""" form = NewPasswordForm(meta=wft_locales()) if form.validate_on_submit(): # create session for ConfigDB db_session = self.db_session() user = self.find_user(db_session, email=form.email.data) if user: # generate and save reset token user.reset_password_token = self.generate_token() db_session.commit() # send password reset instructions try: self.send_reset_passwort_instructions(user) except Exception as e: self.logger.error( "Could not send reset password instructions to " "user '%s':\n%s" % (user.email, e) ) flash(i18n.t("auth.reset_mail_failed")) return self.response( render_template( 'new_password.html', form=form, i18n=i18n, title=i18n.t("auth.new_password_page_title") ), db_session ) # NOTE: show message anyway even if email not found flash(i18n.t("auth.reset_message")) return self.response( redirect(url_for('login')), db_session ) return render_template( 'new_password.html', form=form, i18n=i18n, title=i18n.t("auth.new_password_page_title") ) def edit_password(self, token): """Show form and reset password. :param str: Password reset token """ form = self.edit_password_form() if form.validate_on_submit(): # create session for ConfigDB db_session = self.db_session() user = self.find_user( db_session, reset_password_token=form.reset_password_token.data ) if user: # save new password user.set_password(form.password.data) # clear token user.reset_password_token = None if user.last_sign_in_at is None: # set last sign in timestamp after required password change # to mark as password changed user.last_sign_in_at = datetime.utcnow() db_session.commit() flash(i18n.t("auth.edit_password_successful")) return self.response( redirect(url_for('login')), db_session ) else: # invalid reset token flash(i18n.t("auth.edit_password_invalid_token")) return self.response( render_template( 'edit_password.html', form=form, i18n=i18n, title=i18n.t("auth.edit_password_page_title") ), db_session ) if token: # set hidden field form.reset_password_token.data = token return render_template( 'edit_password.html', form=form, i18n=i18n, title=i18n.t("auth.edit_password_page_title") ) def require_password_change(self, user, target_url, db_session): """Show form for required password change. :param User user: User instance :param str target_url: URL for redirect :param Session db_session: DB session """ # clear last sign in timestamp and generate reset token # to mark as requiring password change user.last_sign_in_at = None user.reset_password_token = self.generate_token() db_session.commit() # show password reset form form = self.edit_password_form() # set hidden field form.reset_password_token.data = user.reset_password_token flash(i18n.t('auth.edit_password_message')) return render_template( 'edit_password.html', form=form, i18n=i18n, title=i18n.t("auth.edit_password_page_title") ) def edit_password_form(self): """Return password reset form with constraints from config.""" return EditPasswordForm( self.password_constraints['min_length'], self.password_constraints['max_length'], self.password_constraints['constraints'], self.password_constraints['min_constraints'], self.password_constraints['constraints_message'], meta=wft_locales() ) def db_session(self): """Return new session for ConfigDB.""" return self.config_models.session() def response(self, response, db_session): """Helper for closing DB session before returning response. :param obj response: Response :param Session db_session: DB session """ # close session db_session.close() return response def find_user(self, db_session, **kwargs): """Find user by filter. :param Session db_session: DB session :param **kwargs: keyword arguments for filter (e.g. name=username) """ return db_session.query(self.User).filter_by(**kwargs).first() def load_user(self, id): """Load user by id. :param int id: User ID """ # create session for ConfigDB db_session = self.db_session() # find user by ID user = self.find_user(db_session, id=id) # close session db_session.close() return user def token_exists(self, token): """Check if password reset token exists. :param str: Password reset token """ # create session for ConfigDB db_session = self.db_session() # find user by password reset token user = self.find_user(db_session, reset_password_token=token) # close session db_session.close() return user is not None def __user_is_authorized(self, user, password, db_session): """Check credentials, update user sign in fields and return whether user is authorized. :param User user: User instance :param str password: Password :param Session db_session: DB session """ if user is None or user.password_hash is None: # invalid username or no password set return False elif user.check_password(password): # valid credentials if user.failed_sign_in_count < MAX_LOGIN_ATTEMPTS: if not TOTP_ENABLED: # update last sign in timestamp and reset failed attempts # counter user.last_sign_in_at = datetime.utcnow() user.failed_sign_in_count = 0 db_session.commit() return True else: # block sign in due to too many login attempts return False else: # invalid password # increase failed login attempts counter user.failed_sign_in_count += 1 db_session.commit() return False def user_totp_is_valid(self, user, token, db_session): """Check TOTP token, update user sign in fields and return whether user is authorized. :param User user: User instance :param str token: TOTP token :param Session db_session: DB session """ if user is None or not user.totp_secret: # invalid user ID or blank TOTP secret return False elif pyotp.totp.TOTP(user.totp_secret).verify(token, valid_window=1): # valid token # update last sign in timestamp and reset failed attempts counter user.last_sign_in_at = datetime.utcnow() user.failed_sign_in_count = 0 db_session.commit() return True else: # invalid token # increase failed login attempts counter user.failed_sign_in_count += 1 db_session.commit() return False def clear_verify_session(self): """Clear session values for TOTP verification.""" session.pop('login_uid', None) session.pop('target_url', None) session.pop('totp_secret', None) session.pop('show_qrcode', None) def __login_response(self, user, target_url): self.logger.info("Logging in as user '%s'" % user.name) # flask_login stores user in session login_user(user) # Create the tokens we will be sending back to the user access_token = create_access_token(identity=user.name) # refresh_token = create_refresh_token(identity=username) resp = make_response(redirect(target_url)) # Set the JWTs and the CSRF double submit protection cookies # in this response set_access_cookies(resp, access_token) return resp def generate_token(self): """Generate new token.""" token = None while token is None: # generate token token = base64.urlsafe_b64encode(os.urandom(15)). \ rstrip(b'=').decode('ascii') # check uniqueness of token if self.token_exists(token): # token already present token = None return token def send_reset_passwort_instructions(self, user): """Send mail with reset password instructions to user. :param User user: User instance """ # generate full reset password URL reset_url = url_for( 'edit_password', reset_password_token=user.reset_password_token, _external=True ) msg = Message( i18n.t('auth.reset_mail_subject'), recipients=[user.email] ) # set message body from template msg.body = render_template( 'reset_password_instructions.%s.txt' % i18n.get('locale'), user=user, reset_url=reset_url ) # send message self.logger.debug(msg) self.mail.send(msg) def wft_locales(): return {'locales': [i18n.get('locale')]} def url_path(url): """ Extract path and query parameters from URL """ o = urlparse(url) parts = list(filter(None, [o.path, o.query])) return '?'.join(parts)
#기본자료형 : int, float, str, bool #list, tuple, dict, set #c언어의 배열 jumsu = [100, 97, 99, 85, 91] print(jumsu) a = [] #비어있는 리스트 b = [1, 2, 3] #정수형 리스트 c = ["like", "python", "happy"] #문자열 리스트 d = [1, 2, 3, "Like", "Happy"] #혼합형 리스트 e = [1, 2, 3, ["Like", "Happy"]] #리스트는 모든 자료형의 데이터를 다 저장할 수 있다. print(a) print(b) print(c) print(d) print(e)
#CNN from __future__ import print_function import numpy as np import matplotlib.pyplot as plt get_ipython().magic('matplotlib inline') import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras import backend as K # set paramters batch_size = 128 num_classes = 10 epochs = 10 img_rows, img_cols = 28, 28 # load MNIST data (x_train, y_train), (x_test, y_test) = mnist.load_data() # data preprocessing if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') scale = np.max(x_train) # 255 x_train /= scale x_test /= scale mean = np.std(x_train) x_train -= mean x_test -= mean print('x_train shape:', x_train.shape) # (60000, 28, 28, 1) print(x_train.shape[0], 'train samples') # 60000 print(x_test.shape[0], 'test samples') # 10000 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) # construct CNN model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) # train CNN history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) # evaluate on test set score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # visualize the loss function in each epoch history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] epochs = range(1, len(loss_values) + 1) plt.plot(epochs, loss_values, 'bo') plt.plot(epochs, val_loss_values, 'b+') plt.xlabel('Epochs') plt.ylabel('Loss') plt.show() # visualize accuracy in each epoch plt.clf() acc_values = history_dict['acc'] val_acc_values = history_dict['val_acc'] plt.plot(epochs, acc_values, 'bo') plt.plot(epochs, val_acc_values, 'b+') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.show()
# pip install boto3 import boto3 #Instance Specifications license_model = 'license-included' #Oracle db_engine = 'oracle-se2' db_engine_version = '12.1.0.2.v16' db_instance_class = 'db.t2.medium' availability_zone = 'us-west-2a' multi_az = False storage_type = 'GP2' allocated_storage = 100 storage_autoscaling = True max_storage_size = 1000 db_instance_idenfifier = 'AWOR-DVRDSORA04' master_username = 'admin' master_password = 'B57M383J9RFlAuROmQ3H' # Network & Security db_subnet_group_name = 'vpcnasharedservices-protected' publicy_accessible = False # VPC Shared Services sg_1 = 'sg-85375fe3' sg_2 = 'sg-f7262991' sg_3 = 'sg-c00961a6' sg_4 = 'sg-ed08608b' # Database Options database_name ='EUMCMPD' # Oracle port = 1521 db_paramater_group = 'default.oracle-se2-12.1' option_group = 'default:oracle-se2-12-1' character_set_name = 'AL32UTF8' rds_client = boto3.client('rds', region_name='us-west-2') rds_instance = rds_client.create_db_instance( AvailabilityZone=availability_zone, AutoMinorVersionUpgrade=False, CopyTagsToSnapshot=True, DeletionProtection=False, MultiAZ=multi_az, PubliclyAccessible=publicly_accessible, StorageEncrypted=True, AllocatedStorage=allocated_storage, BackupRetentionPeriod=7, Iops=0, MonitoringInterval=0, Port=port, DBInstanceClass=db_instance_class, DBInstanceIdentifier=db_instance_identifier, DBName=db_name, DBParameterGroupName=db_paramater_group, DBSubnetGroupName=db_subnet_group_name, Engine=db_engine, EngineVersion=db_engine_version, LicenseModel=license_model, MasterUserPassword=master_password, MasterUsername=master_username, OptionGroupName=option_group, StorageType=storage_type, CharacterSetName=character_set_name, SupportsStorageAutoscaling=storage_autoscaling, MaxStorageSize=max_storage_size, VPCSecurityGroupIds= [ sg_1, sg_2, sg_3, sg_4 ], Tags= [ { "Key": "Application", "Value": "Oracle" }, { "Key": "ApplicationTier", "Value": "Database" }, { "Key": "ApplicationTierLevel", "Value": "No Tier" }, { "Key": "Managed", "Value": "Yes" }, { "Key": "Environment", "Value": "Development" }, { "Key": "Name", "Value": "AWOR-DVRDSORA04" }, { "Key": "CorpInfoMSP:TakeNightlySnapshot", "Value": "No" }, { "Key": "FileLevelBackup", "Value": "No" }, { "Key": "MonitoredServices", "Value": "No" }, { "Key":"RequestNumber", "Value":"SNOW Ticket" }, { "Key": "OperationalHours", "Value": "24x7" }, { "Key": "ReviewDate", "Value": "6/20/2019" }, { "Key": "CostCenter", "Value": "1001596013" }, { "Key": "ServiceLocation", "Value": "Irvine" }, { "Key": "ServiceOwner", "Value": "Amir Memaran/Mike Lockwood" }, { "Key": "TechnicalOwner", "Value": "Alek Slavuk/Haihao Li" }, { "Key": "ContactPreference", "Value": "Email" }, { "Key": "PatchGroup", "Value": "PilotAutoReboot" }, { "Key": "Schedule", "Value": "24x7" }, { "Key": "Purpose", "Value": "Windchill 11.2 Sandbox System" }, { "Key": "Validated", "Value": "No" } ] )
A relational database is a collection of data organised in tables. There are relations among the tables. The tables are formally described. They consist of rows and columns. SQL (Structured Query Language) is a database computer language designed for managing data in relational database management systems. A table is a set of values that is organised using a model of vertical columns and horizontal rows. The columns are identified by their names. A schema of a database system is its structure described in a formal language. It defines the tables, the fields, relationships, views, indexes, procedures, functions, queues, triggers, and other elements. A database row represents a single, implicitly structured data item in a table. It is also called a tuple or a record. sqlite> .help .backup ?DB? FILE Backup DB (default "main") to FILE .bail on|off Stop after hitting an error. Default OFF .binary on|off Turn binary output on or off. Default OFF .clone NEWDB Clone data into NEWDB from the existing database .databases List names and files of attached databases .dbinfo ?DB? Show status information about the database .dump ?TABLE? ... Dump the database in an SQL text format If TABLE specified, only dump tables matching LIKE pattern TABLE. .echo on|off Turn command echo on or off .eqp on|off Enable or disable automatic EXPLAIN QUERY PLAN .exit Exit this program .explain ?on|off? Turn output mode suitable for EXPLAIN on or off. With no args, it turns EXPLAIN on. .fullschema Show schema and the content of sqlite_stat tables .headers on|off Turn display of headers on or off .help Show this message .import FILE TABLE Import data from FILE into TABLE .indexes ?TABLE? Show names of all indexes If TABLE specified, only show indexes for tables matching LIKE pattern TABLE. .limit ?LIMIT? ?VAL? Display or change the value of an SQLITE_LIMIT .log FILE|off Turn logging on or off. FILE can be stderr/stdout .mode MODE ?TABLE? Set output mode where MODE is one of: ascii Columns/rows delimited by 0x1F and 0x1E csv Comma-separated values column Left-aligned columns. (See .width) html HTML <table> code insert SQL insert statements for TABLE line One value per line list Values delimited by .separator strings tabs Tab-separated values tcl TCL list elements .nullvalue STRING Use STRING in place of NULL values .once FILENAME Output for the next SQL command only to FILENAME .open ?FILENAME? Close existing database and reopen FILENAME .output ?FILENAME? Send output to FILENAME or stdout .print STRING... Print literal STRING .prompt MAIN CONTINUE Replace the standard prompts .quit Exit this program .read FILENAME Execute SQL in FILENAME .restore ?DB? FILE Restore content of DB (default "main") from FILE .save FILE Write in-memory database into FILE .scanstats on|off Turn sqlite3_stmt_scanstatus() metrics on or off .schema ?TABLE? Show the CREATE statements If TABLE specified, only show tables matching LIKE pattern TABLE. .separator COL ?ROW? Change the column separator and optionally the row separator for both the output mode and .import .shell CMD ARGS... Run CMD ARGS... in a system shell .show Show the current values for various settings .stats on|off Turn stats on or off .system CMD ARGS... Run CMD ARGS... in a system shell .tables ?TABLE? List names of tables If TABLE specified, only list tables matching LIKE pattern TABLE. .timeout MS Try opening locked tables for MS milliseconds .timer on|off Turn SQL timer on or off .trace FILE|off Output each SQL statement as it is run .vfsname ?AUX? Print the name of the VFS stack .width NUM1 NUM2 ... Set column widths for "column" mode Negative values right-justify sqlite> CREATE statement is used to create tables, indexes, views, and triggers ALTER TABLE statement changes the structure of a table. DROP statement removes tables, indexes, views, or triggers. DATA TYPES: NULL — The value is a NULL value INTEGER — a signed integer REAL — a floating point value TEXT — a text string BLOB — a blob of data With boolean operators we perform logical operations. SQLite has three boolean operators: AND, OR, and NOT. Boolean operators return true or false. In SQLite, 1 is true, 0 is false. The AND operator evaluates to true if both operands are true. Relational operators are used to compare values. Symbol Meaning < strictly less than <= less than or equal to > greater than >= greater than or equal to = or == equal to != or <> not equal to These operators always result in a boolean value. on the foreign ID page: click on gym, click create Foreign key on the MANY, links the gym to the members will show foreign key association ERD IS ON THE FINAL, rows represent column names Foreign key should always be in the MANY conn = sqlit3.conn ('fitness.db') cursor = db.cursor() by adding AUTOINCREMENT: will auto create numbers for the columns Foreign Key column should be a unique key will need to put the identifying table if you are looking at more than two tables OOP, MVC, SQL, reversing strings by adding '%email%' will give you all the ones with THAT specific email address by adding LIKE 'V%' it will give you all variables that start with that letter by adding LIKE '%V' it will give you all variables that end with that letter
from re import compile, match REGEX = compile(r'\s*$') def whitespace(string): return bool(match(REGEX, string))
import time import traceback from logging import Logger from core import Data from core.schema import S1 from providers.bitly_short import BitlyShorten from providers.google_fetch import GoogleFetch, GoogleFetchRetry from providers.google_rss import GoogleRSS class GooglePollAgent(object): def __init__(self, logger, data, config_path): """ @type logger: Logger @type data: Data """ self.logger = logger self.data = data self.google_fetch = GoogleFetch(logger, config_path) self.shortener = BitlyShorten(logger, config_path) def validate_user_name(self, user_name): # retry 1 time for n in range(0, 1): try: person_doc = self.google_fetch.get_plus_user_info(user_name) if not person_doc or not 'id' in person_doc: self.logger.error('Error: validate_user_name no result for {0}'.format(user_name)) return None # store account info if no info available gid_info = self.data.get_gid_info(person_doc['id']) # TODO: Update account info for existing account! if not gid_info: self.logger.warning('New account info for {0}:{1}'.format(user_name, person_doc['id'])) self.data.set_gid_info(person_doc['id'], person_doc) # return GID return person_doc['id'] except GoogleFetchRetry: self.logger.warning('RetryError in validate_user_name for {0}'.format(user_name)) continue except Exception as e: msg = 'Exception while validate_user_name for {0}, [{1}], {2}' self.logger.error(msg.format(user_name, e, traceback.format_exc())) return None def poll(self, gid): """ requests list of activities for the GID @rtype : bool @return : True on success, False on system error """ try: self.logger.info('Poll request for {0}...'.format(gid)) # fetch data activities_doc = self.fetch(gid) if activities_doc: # process the dataset self.process_activities_doc(gid, activities_doc, False) else: self.logger.warning('Nothing to process for {0}'.format(gid)) return True except GoogleFetchRetry: self.logger.warning('RetryError for {0}'.format(gid)) except Exception as e: msg = 'Exception while fetching data for {0}, [{1}], {2}' self.logger.error(msg.format(gid, e, traceback.format_exc())) return False def fetch(self, gid): #fetch activities from google max_results = self.data.cache.get_gid_max_results(gid) activities_doc = self.google_fetch.get_activities(gid, max_results) # validate received data if not activities_doc: self.logger.warning('Nothing received for [{0}]'.format(gid)) return None return activities_doc def process_activities_doc(self, gid, activities_doc, force=False): # validate received data updated = GoogleRSS.get_update_timestamp(activities_doc) if not updated: self.logger.warning('Received empty data set for [{0}]'.format(gid)) return # set last successful poll timestamp # users with no posts in Google Plus feeds will not be able to connect # as FE monitors this timestamp before accepting new account link self.data.cache.set_poll_stamp(gid, time.time()) # set cache-specific meta-data last_updated = self.data.get_destination_update(gid, 'cache', gid) self.logger.info('Received data for [{0}], updated [{1}], last_updated [{2}]'.format(gid, updated, last_updated)) if updated < last_updated: # Incomplete data? self.logger.warning('Warning: Updated timestamp jumped to past!') return # check if new update is in last_etag = self.data.get_destination_param(gid, 'cache', gid, S1.etag_key()) etag = GoogleRSS.get_item_etag(activities_doc) if not force and last_etag == etag: self.logger.debug('Same data for {0}, last_updated={1}'.format(gid, last_updated)) return # save etag self.data.set_destination_param(gid, 'cache', gid, S1.etag_key(), etag) # set cache destination updated self.data.set_destination_update(gid, 'cache', gid, updated) # shorten reshares urls items = GoogleRSS.get_updated_since(activities_doc, last_updated) shorten = self.data.get_gid_shorten_urls(gid) urls = set([item for item in items if shorten or GoogleRSS.get_item_is_share(item) for item in GoogleRSS.get_long_urls(item)]) for url in urls: u = self.data.cache.get_short_url(url) if not u: u = self.shortener.get_short_url(url) self.data.cache.cache_short_url(url, u) # store the dataset self.data.cache.cache_activities_doc(gid, activities_doc) # notify publishers self.data.flush_updates(gid) # process stats data # new user ? if not last_updated: self.logger.warning('Building new user activity map for {0}'.format(gid)) self._build_user_activity_map(gid, activities_doc) # fake an update now as user is likely online when this code is executed self.data.cache.incr_num_minute_updates(gid, time.time()) elif last_updated < updated: # increment update count for this minute self.logger.debug('Updating user activity map for {0}, data updated={1}'.format(gid, updated)) self._build_user_activity_map(gid, activities_doc, last_updated=last_updated) else: self.logger.debug('No activity map updates for {0}, data updated={1}'.format(gid, updated)) def _build_user_activity_map(self, gid, activities_doc, last_updated=0): """ Creates user daily activity map from activities doc @type activities_doc: dict """ for item in activities_doc.get('items', []): updated = GoogleRSS.get_item_updated_stamp(item) if updated > last_updated: self.data.cache.incr_num_minute_updates(gid, updated)
""" 直方图的绘制: """ from matplotlib import pyplot as plt import numpy as np y1 = np.random.randn(100) y = y1 + 100 # edgecolor用于指定边框颜色 plt.hist(y, bins=10, edgecolor='r') plt.show()
# Generated by Django 3.0.2 on 2020-02-21 16:43 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('osmcal', '0015_auto_20200215_1726'), ] operations = [ migrations.RemoveField( model_name='participationquestion', name='choices', ), migrations.CreateModel( name='ParticipationQuestionChoice', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.CharField(max_length=200)), ('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='osmcal.ParticipationQuestion')), ], ), migrations.CreateModel( name='ParticipationAnswers', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('answer', models.CharField(max_length=200)), ('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='osmcal.ParticipationQuestion')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ], ), ]
# O(n^2) def sel_sort_rec(seq, i): if i == 0: return max_j = i for j in range(i): if seq[j] > seq[max_j]: max_j = j seq[i], seq[max_j] = seq[max_j], seq[i] sel_sort_rec(seq, i - 1) def main(): seq = [1, 5, 3, 4, 6, 2] sel_sort_rec(seq, len(seq) - 1) print("".join(str(seq))) if __name__ == '__main__': main()
import math class Circle: def __init__(self, radius, height): self.radius = radius self.height = height # conceptually, I "belong" to the class (but a subtle point) @staticmethod def compute_area(radius): return math.pi * (radius ** 2) def compute_volume(self): return self.height * Circle.compute_area(self.radius) # or self. c = Circle(10, 1) print(c.compute_volume()) print(Circle.compute_area(12))
from flask import Blueprint, request from utils.decorators import ErrorHandler from utils.errors import ConflictError import datetime DATE_FORMAT = '%Y-%m-%d' NOW = datetime.datetime.now() HOURS_ADDED = datetime.timedelta( hours=NOW.hour, minutes=NOW.minute, seconds=NOW.second) def from_date(date: str): return datetime.datetime.strptime(date, DATE_FORMAT).isoformat(sep=' ') def to_date(date: str): return (datetime.datetime.strptime(date, DATE_FORMAT) + HOURS_ADDED).isoformat() def to_bool(deleted: str): return (deleted in ['True', 'true', 't', '1', 'yes', 'y', '']) def to_list(q: str): print(q.split(' '), type(q)) filters_list = q.split(' ') return q def page_boundires(page_size: int): if int(page_size) > 100: raise ConflictError(user_err_msg='Superior Boundary Exceeded.') elif int(page_size) < 0: raise ConflictError(user_err_msg='Inferior Boundary Exceeded.') return page_size def get_url_params(request, *args, **kwargs): filters = dict() page_size = request.args.get( 'pageSize', default=10, type=int, ) page_boundires(page_size) page = request.args.get( 'page', default=1, type=int ) date_from = request.args.get( 'from', default=datetime.datetime.strptime( '1990-01-01', DATE_FORMAT).isoformat(), type=from_date ) date_to = request.args.get( 'to', default=datetime.datetime.now(), type=to_date ) deleted = request.args.get( 'deleted', default=False, type=to_bool ) for k in kwargs.keys(): filters[str(k)] = kwargs[k] filters['page_size'] = page_size filters['page'] = page filters['deleted'] = deleted filters['to'] = date_to filters['from'] = date_from return filters # filters = request.args.get('filters', None) # q = request.args.get('q', type=to_list) # page = request.args.get('page', default = 1, type = int) # filter = request.args.get('filter', default = '*', type = str)
# calc_dos_fermi.py from ..model import green from ..model import periodize import json def calc_dos_fermi(fname): """ """ with open("statsparams0.json") as fin: mu = json.load(fin)["mu"][0] (w_vec, sEvec_cw) = green.read_green_c(fname) model = periodize.Model(1.0, 0.4, mu, w_vec, sEvec_cw) model.calc_dos("dos_calcdos.dat") #model.fermi_surface(0.0, "fermi_surface.dat")
#Homework 5 #ElaineKooikerP2 #I pledge my honor that I have abided by the Stevens Honor Code. #Write and test a Python program which has a function #which accepts a list of numbers and returns the sum of the numbers in the list. def sum(numbers): sum=0 for i in range (len(numbers)): sum=sum+numbers[i] return sum def main(): list = [1, 2, 3, 4, 5, 6, 7, 8, 9] list=sum(list) print(list) main()
from django.shortcuts import render, redirect from .forms import EmployeeForm, PassportForm, StatementForm from .models import Employee, Passport, Statement def get_employee_list(request): context = {'employee_list': Employee.objects.all()} return render(request, 'employee_register/employee_list.html', context) def employee_form(request, id=0): if request.method == "GET": #При запросе пользователя отобразить пустые формы if id == 0: eForm = EmployeeForm pForm = PassportForm #При запросе существующей записи #Заполнить поля этой формой else: employee = Employee.objects.get(pk=id) pForm = PassportForm(instance=employee.passport) eForm = EmployeeForm(instance=employee) #Отправка форм в html return render(request, 'employee_register/employee_form.html', {'eForm': eForm, 'pForm': pForm}) elif request.method == "POST": #При отправке пустой формой ничего не меняем if id == 0: pForm = PassportForm(request.POST) eForm = EmployeeForm(request.POST, files=request.FILES) #При отправке изменённой формы, мы пересоздаём её в модели else: employee = Employee.objects.get(pk=id) pForm = PassportForm(request.POST, instance=employee.passport) eForm = EmployeeForm(request.POST, instance=employee, files=request.FILES) #Проверка и сохранение форм if eForm.is_valid() and pForm.is_valid(): pas = pForm.save() emp = eForm.save() emp.passport = pas emp.save() return redirect('/employee/list/') def statement_form(request, id): if request.method == "GET": sForm = StatementForm return render(request, 'employee_register/statement_form.html', {'sForm': sForm}) elif request.method == "POST": sForm = StatementForm(request.POST) if sForm.is_valid(): employee = Employee.objects.get(pk=id) stat = sForm.save() stat.employee = employee stat.save() return redirect('/employee/list/') def employee_delete(request, id): employee = Employee.objects.get(pk=id) passport = employee.passport employee.delete() passport.delete() return redirect('/employee/list/') def get_passport(request, id): employee = Employee.objects.get(pk=id) passport = employee.passport return render(request, 'employee_register/employee_passport.html', {'passport': passport}) def get_statement_list(request, id): employee = Employee.objects.get(pk=id) statement = employee.statement_set.all() return render(request, 'employee_register/employee_statement.html', {'statement': statement, 'employee': employee})
from datetime import datetime from operator import or_ from uuid import uuid4 from flask import request from flask_login import current_user from sqlalchemy import func from sqlalchemy.sql.functions import coalesce from bitcoin_acks.database import session_scope from bitcoin_acks.logging import log from bitcoin_acks.models import Bounties, PullRequests from bitcoin_acks.webapp.formatters import humanize_date_formatter, \ pr_link_formatter, payable_satoshi_formatter, invoices_formatter from bitcoin_acks.webapp.views.authenticated_model_view import \ AuthenticatedModelView class BountiesPayableModelView(AuthenticatedModelView): def __init__(self, model, session, *args, **kwargs): super(BountiesPayableModelView, self).__init__(model, session, *args, **kwargs) self.static_folder = 'static' self.endpoint = 'bounties-payable' self.name = 'Bounties Payable' form_columns = ['amount', 'pull_request'] def get_query(self): return ( self.session .query(self.model) .filter(self.model.payer_user_id == current_user.id) ) def get_count_query(self): return ( self.session .query(func.count('*')) .filter(self.model.payer_user_id == current_user.id) ) def create_form(self, **kwargs): form = super().create_form() if 'pull_request_number' in request.args.keys(): pull_request = self.session.query(PullRequests).filter(PullRequests.number == request.args['pull_request_number']).one() form.pull_request.data = pull_request # form.amount.data = 1000 return form def on_model_change(self, form, model: Bounties, is_created: bool): model.id = uuid4().hex model.published_at = datetime.utcnow() model.payer_user_id = current_user.id model.recipient_user_id = model.pull_request.author_id with session_scope() as session: total_bounty_amount = ( session .query(coalesce(func.sum(Bounties.amount), 0)) .filter(Bounties.pull_request_id == model.pull_request.id) .one() )[0] log.debug('total_satoshis', total_bounty_amount=total_bounty_amount) model.pull_request.total_bounty_amount = total_bounty_amount + model.amount can_create = True named_filter_urls = True column_list = [ 'pull_request.number', 'amount', 'published_at', 'invoices' ] column_labels = { 'pull_request.number': 'Pull Request', 'amount': 'satoshis' } column_formatters = { 'pull_request.number': pr_link_formatter, 'published_at': humanize_date_formatter, 'amount': payable_satoshi_formatter, 'invoices': invoices_formatter } form_ajax_refs = { 'pull_request': { 'fields': ['number', 'title'], 'page_size': 10, 'minimum_input_length': 0, # show suggestions, even before any user input 'placeholder': 'Please select', } }
WEEK_LIST = ['月', '火', '水', '木', '金', '土', '日'] SUBJECT_LIST = ['Python', '数学', '機械学習', '深層学習','エンジニアプロジェクト'] def output_schedule(study_time_list): '''今週の勉強予定を出力します''' #曜日リスト内の何番目の曜日を扱うかを表す変数 day_count = 0 #教科リスト内の何番目の教科を扱うかを表す変数 subject = 1 #教科の数を取得 number_of_subjects = len(SUBJECT_LIST) #曜日ごとに予定を出力する for day in WEEK_LIST: #その日の勉強時間の値を取得 study_times = study_time_list[day_count] #勉強時間が1時間以上あれば授業の科目を表示する if study_times != 0: print("{}曜日は、{}時間勉強する予定です。".format(day,study_times)) for hour in range(study_times): print("{}限目 {}".format(hour+1,SUBJECT_LIST[subject])) subject = (subject + 1) % number_of_subjects #勉強時間が0の日は休みと表示する else: print("{}曜日は、お休みです。".format(day)) day_count += 1 def main(): '''勉強情報をoutput_scheduleに渡します''' # 1日に何時間勉強するか(1週間 月曜日〜日曜日) study_time_list = [3, 1, 3, 0, 4, 2, 2] output_schedule(study_time_list) if __name__ == '__main__': main()
import socket import time from select import select from threading import Thread from collections import deque class EventLoop: def __init__(self): self.tasks = deque() self.stopped = {} def add_task(self, task): self.tasks.append(task) def add_future(self, future): self.tasks.append(future.monitor()) def run_forever(self): while any([self.tasks, self.stopped]): while not self.tasks: ready_to_read, _, _ = select(self.stopped.keys(), [], [], 1.0) for r in ready_to_read: self.tasks.append(self.stopped.pop(r)) while self.tasks: task = self.tasks.popleft() try: sock = next(task) self.stopped[sock] = task except StopIteration: pass class AsyncSocket(socket.socket): def AsyncRead(self, capacity=100): yield self return self.recv(100) class Future: def __init__(self, done_callback): self.notify, self.event = socket.socketpair() self.done_callback = done_callback self.result = None def set_done(self, result): self.result = result self.notify.send(b'done') self.done_callback(self.result) def monitor(self): yield self.event self.event.recv(100) def make_request(): start_time = time.time() sock = AsyncSocket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(('localhost', 8000)) sock.send(b'GET /\n\n') resp = yield from sock.AsyncRead(100) sock.close() end_time = time.time() print(time.strftime("%H:%M:%S"), end_time-start_time) ev = EventLoop() def future_producer(): while True: f = Future(lambda x: ev.add_task(make_request())) ev.add_future(f) time.sleep(1.0) f.set_done(1.0) t = Thread(target=future_producer) t.start() ev.run_forever()
#! /usr/local/bin/python import subprocess import json import uuid import pycollector from datetime import datetime """ Should be run only with media with autosupport capabilities involved. Additional configuration needs to be done in /etc/autosupport/collector/collector.conf """ # Fields that are used in the response fields = ['JobID', 'JobType', 'JobState', 'JobStatus', 'JobPolicy', 'JobScheduleName', 'ClientName', 'MediaServer', 'StartTime', 'ElapsedTime', 'EndTime'] # Plugin Name which is given the /etc/autosupport/collector/collector.conf PLUGIN_NAME = 'BackupPerformance' COMPONENT_TYPE = 'BackupPerformance' # Default Interval for the collector plugin to execute : 10 seconds INTERVAL = 10 # Create the default JSON structure in which the data has to be sent def create_json_structure(): """ This will create a response with Plugin Name and ID which is a UUID Properties will contain the actual data Returns the dictionary structure """ response = {} response['pluginName'] = PLUGIN_NAME response['componentList'] = [ {'type': COMPONENT_TYPE, 'id': str(uuid.uuid4()), 'properties': {}}] return response # Get the Backup Job for the today for the current timestamp def get_backup_job(): backup = create_json_structure() res = [] # Date from which data has to be retrieved d = datetime.strftime(datetime.now(), "%m/%d/%Y %H:%M:%S") date_ret = d[:3] + str(int(d[3:5]) - 1) + d[5:] cmd = 'bpdbjobs -all_columns -t ' + date_ret bp = subprocess.Popen(cmd, bufsize=0, shell=True, stdout=subprocess.PIPE) for line in bp.stdout: response = {} line = line.replace('\,', '') bp = line.split(',') line_len = len(fields) i = 0 while i < line_len: # If they represent time fields convert to timestamp if i == 8 or i == 9 or i == 10: response[fields[i]] = bp[i] + '000' else: response[fields[i]] = bp[i] i = i + 1 res.append(response) # Get the IP config cmd = 'ifconfig eth1 | grep "inet\ addr" | cut -d: -f2 | cut -d" " -f1' ifcnfg = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, bufsize=0) ip = ifcnfg.stdout.readlines()[0].rstrip() backup['componentList'][0]['properties'] = {'master':ip,'backup':res} pycollector.collector_send_json( pycollector.COLLECT_TELEMETRY, json.dumps(backup)) def Init(conf): """ Initialize function which the collector will call. """ global INTERVAL pycollector.collector_log( pycollector.INFO, "%s: Initializing Backup Data Extraction!" % PLUGIN_NAME) configuration = json.loads(conf) interval = INTERVAL if "interval" in configuration: interval = configuration["interval"] pycollector.collector_register( PLUGIN_NAME, pycollector.COLLECT_TELEMETRY, get_backup_job, interval) pycollector.collector_log(pycollector.INFO, "Collector Registered for getting backup data")
#!/usr/bin/env python # encoding: utf-8 """ @author: ShengGW @time: 20/08/28 16:25 @file: FileManager.py @version: ?? @software: PyCharm @contact: shenggw95@gmail.com """ import os def getFileName(dirName, postfix, abspath=1): """ 寻找给定文件夹下符合条件的文件,并添加到imgFileList中 :param dirName: 给定文件夹 :param postfix: 给定筛选条件集合, 筛选条件应该是集合中元素 :param abspath: 是否返回绝对路径,默认返回 :return: """ # 存储符合条件的影像路径 out_file_list = [] # 寻找给定文件夹下符合条件的文件,并添加到imgFileList中 for maindir, subdir, fileList in os.walk(dirName): for fileName in fileList: if fileName != '': if abspath == 1: outPath = os.path.join(maindir, fileName) else: outPath = fileName for label in postfix: if fileName[-len(label):] == label: try: out_file_list.append(outPath) except: pass else: print('文件路径存在空值!') return out_file_list def getFiles(dirName, abspath=1): """ 寻找给定文件夹下所有的文件,并添加到imgFileList中 :param dirName: 给定文件夹 :param postfix: 给定筛选条件集合, 筛选条件应该是集合中元素 :param abspath: 是否返回绝对路径,默认返回 :return: """ # 存储符合条件的影像路径 out_file_list = [] # 寻找给定文件夹下符合条件的文件,并添加到imgFileList中 for maindir, subdir, fileList in os.walk(dirName): for fileName in fileList: if fileName != '': if abspath == 1: outPath = os.path.join(maindir, fileName) else: outPath = fileName try: out_file_list.append(outPath) except: pass else: print('文件路径存在空值!') return out_file_list def getFileName_withstart(dirName, start, end, abspath=1): """ 获取给定文件夹下给定开头和结尾的文件列表 :param dirName: 给定文件夹路径 :param start: 给定开头筛选条件集合 :param end: 给定结尾筛选条件集合 :param abspath: 是否返回绝对路径,默认返回 :return: 符合条件的文件路径组成的list """ out_file_list = [] for maindir, subdir, file_name_list in os.walk(dirName): for filename in file_name_list: if filename != '': if abspath == 1: outPath = os.path.join(maindir, filename) else: outPath = filename for start_label in start: for end_label in end: try: if filename.endswith(end_label) and filename.startswith(start_label): out_file_list.append(outPath) except: pass return out_file_list def csvSpliting(dirName, newDirName, csvSize): """ 按照行数拆分csv文件 Args: dirName: 待拆分csv文件 newDirName: 拆分后csv文件存储路径 csvSize: csv拆分行数 Returns: """ # 文件的绝对路径 filePaths = getFiles(dirName) # 文件名 fileNames = getFiles(dirName, abspath=0) blockNames = fileNames.copy() # 拆分 for i in range(len(filePaths)): f = open(filePaths[i], "r", encoding='gbk') lines = f.readlines() # 确定拆分成的文件数目 blockNum = math.ceil(len(lines) / csvSize) print(len(lines), blockNum) # 根据待拆分文件新建文件夹 tempPath = os.path.join(newDirName, blockNames[i][:-4]) if not os.path.exists(tempPath): os.mkdir(tempPath) # 每csvSize行输出为一个csv文件 for j in range(blockNum): if j < blockNum - 1: newFilePath = os.path.join(tempPath, fileNames[i][:-4] + "_" + str(j + 1) + ".csv") print(newFilePath) new_f = open(newFilePath, "w", encoding='gbk') # 每csvSize行为一个循环 for k in range(csvSize): lineNum = k + csvSize * j new_f.write(lines[lineNum]) new_f.close() elif j == blockNum - 1: newFilePath = os.path.join(tempPath, fileNames[i][:-4] + "_" + str(j + 1) + ".csv") print(newFilePath) new_f = open(newFilePath, "w", encoding='gbk') # 计算剩余的行数 remainLines = len(lines) - csvSize * j for k in range(remainLines): lineNum = k + csvSize * j new_f.write(lines[lineNum]) new_f.close() f.close()
"""Treadmill REST APIs. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import flask import flask_restplus as restplus import pkg_resources from treadmill import api as api_mod from treadmill import authz from treadmill import plugin_manager from treadmill import rest from treadmill import webutils from treadmill.rest import error_handlers _LOGGER = logging.getLogger(__name__) def base_api(title=None, cors_origin=None): """Create base_api object""" blueprint = flask.Blueprint('v1', __name__) api = restplus.Api(blueprint, version='1.0', title=title, description='Treadmill REST API Documentation') error_handlers.register(api) # load up any external error_handlers for module in plugin_manager.load_all('treadmill.rest.error_handlers'): module.init(api) @blueprint.route('/docs/', endpoint='docs') def _swagger_ui(): """Swagger documentation route""" return restplus.apidoc.ui_for(api) # Need to create our own Apidoc, as the restplus one uses relative path to # their module to serve up the content for the Swagger UI. tmpl_dir = pkg_resources.resource_filename('flask_restplus', 'templates') static_dir = pkg_resources.resource_filename('flask_restplus', 'static') # This is a hack that overrides all templates and static folders for our # Flask app, but as it stands, only flask-restplus is using # render_template, so this is fine for now. # The main problem is that restplus.Api() internally refers to it's # restplus.apidoc.apidoc and that is created on load time, which all kinds # of Flask rule and defering going on. Ideally restplus should allow # creating your own Apidoc and sending that in the above Api() constructor. rest.FLASK_APP.template_folder = tmpl_dir rest.FLASK_APP.static_folder = static_dir rest.FLASK_APP.register_blueprint(blueprint) rest.FLASK_APP.register_blueprint(restplus.apidoc.apidoc) cors = webutils.cors(origin=cors_origin, content_type='application/json', credentials=True) @rest.FLASK_APP.before_request def _before_request_user_handler(): user = flask.request.environ.get('REMOTE_USER') if user: flask.g.user = user @rest.FLASK_APP.after_request def _after_request_cors_handler(response): """Process all OPTIONS request, thus don't need to add to each app""" if flask.request.method != 'OPTIONS': return response _LOGGER.debug('This is an OPTIONS call') def _noop_options(): """No noop response handler for all OPTIONS""" pass headers = flask.request.headers.get('Access-Control-Request-Headers') options_cors = webutils.cors(origin=cors_origin, credentials=True, headers=headers) response = options_cors(_noop_options)() return response return (api, cors) def get_authorizer(authz_arg=None): """Get authozrizer by argujents""" def user_clbk(): """Get current user from the request.""" return flask.g.get('user') if authz_arg is None: authorizer = authz.NullAuthorizer() else: authorizer = authz.ClientAuthorizer(user_clbk, authz_arg) return authorizer def init(apis, title=None, cors_origin=None, authz_arg=None): """Module initialization.""" (api, cors) = base_api(title, cors_origin) authorizer = get_authorizer(authz_arg) ctx = api_mod.Context(authorizer=authorizer) endpoints = [] for apiname in apis: try: _LOGGER.info('Loading api: %s', apiname) api_cls = plugin_manager.load('treadmill.api', apiname).API api_impl = ctx.build_api(api_cls) endpoint = plugin_manager.load( 'treadmill.rest.api', apiname).init(api, cors, api_impl) if endpoint is None: endpoint = apiname.replace('_', '-').replace('.', '/') if not endpoint.startswith('/'): endpoint = '/' + endpoint endpoints.append(endpoint) except ImportError as err: _LOGGER.warning('Unable to load %s api: %s', apiname, err) return endpoints
from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect,FileResponse from django.urls import reverse from .models import Account,paycheck from reportlab.pdfgen import canvas from reportlab.lib.units import inch import io token ='' def login(request): global token token = '' return render(request, 'paroll/login.html') def transfer(request, trans_type): global token if trans_type == 'login': try: account = Account.objects.get(identity=request.POST['identity']) except (KeyError, Account.DoesNotExist): return render(request, 'paroll/error.html', {'message':'ID or password is wrong!', 'type':0}) else: if account.password != request.POST['password']: return render(request, 'paroll/error.html', {'message':'ID or password is wrong!', 'type':0}) token = account.identity return HttpResponseRedirect(reverse('main')) def main(request): level = Account.objects.get(identity = token).account_type if level == 'employee': level = 1 elif level == 'fiance': level = 2 else: level = 3 return render(request,"paroll/main.html",{'identity':token,'level':level}) def input_data(request): account = Account.objects.all() return render(request,"paroll/changedata.html",{'accounts':account}) #return account #3 space to input email,bank accout and address def show_paycheck(request): level = Account.objects.get(identity = token).account_type alls = [] if level == 'employee': account = Account.objects.get(identity = token) alls = account.paycheck_set.all() else: alls = paycheck.objects.all() return render(request,"paroll/showpaycheck.html",{'temp':alls}) def report(request): pass def approvement(request): return render(request,"paroll/approvement.html") #return alls #one bottom after all account showed def check(request): temp=request.POST['toke'] if temp == 'one': #one by one account = Account.objects.all() return render(request,"paroll/approvement_one.html",{'accounts':account}) #return alls #one bottom after one account else: #all at once alls = Account.objects.all() for i in alls: i.approved = 1 i.save() return render(request,"paroll/main.html") #return alls #one bottom after all account showed #press bottom to approve paycheck account = Account.objects.get(identity = token) #id should be return account.approved = 1 account.save() def check_one(request): temp=request.POST['toke'] account = Account.objects.get(identity = temp) account.approved = 1 account.save() account = Account.objects.all() return render(request,"paroll/approvement_one.html",{'accounts':account}) def unapproved(request): alls = Account.objects.all() for i in alls: i.approved = 0 i.save() return render(request,"paroll/main.html") def change_data(request): temp=request.POST['toke'] account = Account.objects.get(identity = temp) #id should be return temp=request.POST['mail'] if temp != '': account.email = temp account.save() temp=request.POST['bank'] if temp != '': account.bank_account = temp account.save() temp=request.POST['address'] if temp != '': account.address = temp account.save() #_email,_bank , _address shold be reurn return input_data(request) def export(request): account = paycheck.objects.all() buffer = io.BytesIO() p = canvas.Canvas(buffer) j=11 count = 0 for i in account: temp = 'ID : ' temp += i.identity p.drawString(inch,j*inch,temp) j-=0.5 temp = 'work hour : ' temp += str(i.work_hour) p.drawString(inch,j*inch,temp) j-=0.5 temp = 'deduction : ' temp += str(i.deduction) p.drawString(inch,j*inch,temp) j-=0.5 temp = 'salary : ' temp += str(i.salary) p.drawString(inch,j*inch,temp) j-=0.5 count+=1 if count == 4: j = 10 count = 0 p.showPage() p.save() buffer.seek(0) return FileResponse(buffer, as_attachment=True, filename='hello.pdf') # Create your views here.
import collections import json import os import socket import sys PORT = os.environ.get('AP_PORT') HOST = os.environ.get('AP_HOST') HANDSHAKE = os.environ.get('AP_HANDSHAKE') Result = collections.namedtuple('Result', 'id,result,error') class Android(object): def __init__(self, addr=None): if addr is None: addr = HOST, PORT self.conn = socket.create_connection(addr) self.client = self.conn.makefile() self.id = 0 if HANDSHAKE is not None: self._authenticate(HANDSHAKE) def _rpc(self, method, *args): data = {'id': self.id, 'method': method, 'params': args} request = json.dumps(data) self.client.write(request+'\n') self.client.flush() response = self.client.readline() self.id += 1 result = json.loads(response) if result['error'] is not None: print result['error'] # namedtuple doesn't work with unicode keys. return Result(id=result['id'], result=result['result'], error=result['error'], ) def __getattr__(self, name): def rpc_call(*args): return self._rpc(name, *args) return rpc_call
# Defining the nested function only def print_msg(): def printer(): # This is the nested function print("I am nested function") print("i am outer function") printer() # calling the nested function # driver code print_msg()
import logging import mercurial.repo repo_logger = logging.getLogger("hg_cvc.cvc_repo") repo_logger.setLevel(logging.DEBUG) class cvcRemoteRepo(mercurial.repo.repository): def __init__(self, ui, path): repo_logger.debug("in cvcRepo.init, path=%s" % path) # declare this remote repo is a cvc repop self.capabilities = set(['cvc']) def local(self): # needed by hg return False def cancopy(self): # needed by hg return False def instance(ui, path, create): '''called when hg needs to create a repository for the cvc:// scheme ''' repo_logger.debug("in cvc_repo.instance, path=%s, create=%s" % (path, create)) return cvcRemoteRepo(ui, path) __all__ = ('instance')
from twitter_database import mysql_rds_database_authentication import pandas as pd import os from twitter_api import twitter_authentication from datetime import datetime, timedelta import time from dotenv import load_dotenv load_dotenv() inicio = datetime.now() api = twitter_authentication() id, name, arroba, retweets, likes, text, date, location, hashtags, links, language, search = [], [], [], [], [], [], [], [], [], [], [], [] mydb = mysql_rds_database_authentication(os.environ.get('MYSQL_TWITTER_DATABASE')) users = set(pd.read_sql('SELECT arroba FROM gestao_usuarios_arrobamodel;', con=mydb)['arroba'].values) mydb.close() count = 0 for userID in users: mydb = mysql_rds_database_authentication(os.environ.get('MYSQL_TWITTER_DATABASE')) try: newest_date = pd.read_sql(f"SELECT date FROM tweets where arroba = '{userID}' order by date desc limit 1;", con=mydb).date[0] except: newest_date = datetime(2020, 1, 1) mydb.close() tweets = api.user_timeline(screen_name=userID, # 200 is the maximum allowed count count=1, include_rts = True, # Necessary to keep full_text # otherwise only the first 140 words are extracted tweet_mode = 'extended' ) oldest_id = tweets[-1].id while len(tweets) > 0: count += 1 if count % 50 == 0: time.sleep(10) mydb = mysql_rds_database_authentication(os.environ.get('MYSQL_TWITTER_DATABASE')) cursor = mydb.cursor() tweets = api.user_timeline(screen_name=userID, # 200 is the maximum allowed count count=200, include_rts = True, # Necessary to keep full_text # otherwise only the first 140 words are extracted max_id = int(oldest_id), tweet_mode = 'extended', ) if len(tweets) == 0: continue for tweet in tweets: if hasattr(tweet, "quoted_status"): full_text = "QT {} \n QUOTED: {}".format(tweet.full_text, tweet.quoted_status.full_text) if hasattr(tweet, "retweeted_status") is False: full_text = "{}".format(tweet.full_text) if hasattr(tweet, "retweeted_status") is True: full_text = "RT @{}: {}".format(tweet.retweeted_status.user.screen_name, tweet.retweeted_status.full_text) id.append(tweet.id_str), name.append(tweet.user.name), arroba.append(tweet.user.screen_name), retweets.append(tweet.retweet_count), likes.append(tweet.favorite_count), text.append(full_text), date.append(tweet.created_at - timedelta(hours=3)), location.append(tweet.user.location), hashtags.append(str(tweet.entities.get("hashtags"))), links.append(str(tweet.entities.get("urls"))), language.append(tweet.lang), search.append(userID) sql = """ INSERT IGNORE INTO `tweets` (id, name, arroba, retweets, likes, text, date, location, hashtags, links, language, search) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s); """ cursor.execute( sql, (tweet.id_str, tweet.user.name, tweet.user.screen_name, tweet.retweet_count, tweet.favorite_count, full_text, tweet.created_at - timedelta(hours=3), tweet.user.location, str(tweet.entities.get("hashtags")), str(tweet.entities.get("urls")), tweet.lang, userID) ) mydb.commit() mydb.close() print(len(id)) print(len(id), date[-1], date[0], oldest_id, id[-1], date[-1], newest_date) oldest_id = tweets[-1].id - 1 if date[-1] < newest_date: break tweets_df = pd.DataFrame({ 'id': id, 'name': name, 'arroba': arroba, 'retweets': retweets, 'likes': likes, 'text': text, 'date': date, 'location': location, 'hashtags': hashtags, 'links': links, 'language': language, 'search': search }) final = datetime.now() print(final - inicio) tweets_df print(tweets_df.shape)
import math as m import time import cvrp.const as const import cvrp.ReadWrite as rw import cvrp.utile as utile import cvrp.learning as learn import cvrp.route as route import cvrp.linKernighan as LK import cvrp.ejectionChain as EC import cvrp.crossExchange as CE import cvrp.ClarkeWright as CW import cvrp.optimisation as opt def learning_heuristic(instance, demand,capacity, l): # compute global variables namefile = "resultats/Heuristic_results/Values/all/golden7.txt" all_sol = [] tps_deb = time.time() max_d = opt.max_depth(instance) v = utile.voisins(const.KNN, instance) initial = CW.init_routes(instance, demand) edges, param = learn.learning_results(0.5, 2, 100, instance, demand,capacity, initial) initial_routes = learn.complete(learn.destruction(learn.ignore_0(edges)), instance, demand,capacity) tps_learn = time.time() rw.writef(namefile, 'Time = ' + str(tps_learn-tps_deb)) base = [] costs = 0 fixed_edges = [] best_cost = route.cost_sol(initial_routes, instance,const.quality_cost) for i in range(40): print(i) (lam, mu, nu) = param[0] init, sol = opt.optimisation_heuristic( route.copy_sol(initial_routes), instance, demand,capacity, lam, mu, nu, l, max_d, v,fixed_edges) base.append(sol) c_sol = route.cost_sol(sol, instance,const.quality_cost) all_sol.append((c_sol, sol)) if c_sol < best_cost: best_sol = sol best_cost = c_sol if i%4 == 0 and i!=0 : print("learn") edges = [] fixed_edges = [] base = [] mat_qual = learn.init_matrix(len(instance)) mat_qual = learn.learn(mat_qual, base) e_qual = learn.mat_info_rg(int(len(demand)*0.8), mat_qual) for e in e_qual: if not learn.is_edge_in(e, edges) and not learn.unfeasable_edge(e, edges): edges.append(e) initial_routes = learn.complete(learn.destruction( learn.ignore_0(edges)), instance, demand,capacity) edges, param = learn.learning_results( 0.8, 2, 100, instance, demand,capacity, initial_routes) initial_routes = learn.complete(learn.destruction( learn.ignore_0(edges)), instance, demand,capacity) else : print("best learn") edges = utile.fixed_alea(learn.all_edges(best_sol),0.95) initial_routes = learn.complete(learn.destruction( learn.ignore_0(edges)), instance, demand,capacity) """ edges,param = learning_results(0.95,2,100,instance,demand,initial_routes) initial_routes = complete(destruction2( ignore_0(edges)), instance, demand) """ all_sol.sort() tps_fin = time.time() print(tps_fin-tps_deb) costs = 0 for i in range(10): c_sol, sol = all_sol[i] costs += c_sol rw.writef(namefile, '') rw.writef(namefile, 'res = ' + str(round(c_sol, 3))) rw.writef(namefile, 'res_int = ' + str(round(route.cost_sol(sol, instance, "Int")))) rw.writef(namefile, 'solution = ' + str(sol)) rw.writef(namefile, '') rw.writef(namefile, 'Mean = ' + str(costs/10)) rw.writef(namefile, 'Execution = ' + str(tps_fin-tps_deb)) rw.writef(namefile, '')
# from keras.models import Sequential # from keras.layers import Dense, SimpleRNN, LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM import numpy as np import logging ## class LSTM_model(): def __init__(self): ##batch size self.batch_size = 10 ## Number of epochs self.epochs = 1000 ##number of Neuron at Input layer self.n_in = 1 ##number of Neuron at Hidden layer self.n_mid = 20 ##number of Neuron at Output layer self.n_out = 1 ##length of timeseries self.n_rnn = 50 ##Create Model self.model = Sequential() ##Create LSTM Model def config_model(self): self.model.add(LSTM(self.n_mid, input_shape=(self.n_rnn, self.n_in), return_sequences=True)) self.model.add(Dense(self.n_out, activation="linear")) self.model.compile(loss="mean_squared_error", metrics=['mean_squared_error'], optimizer="sgd") def train_model(self, data): if (len(data) < self.n_rnn): return False, 0 else: logging.info('Training!') self.config_model() logging.debug('make dataset!') x, t = self.make_dataset(data) self.history = self.model.fit(x, t, epochs=self.epochs, batch_size=self.batch_size) return True, self.history.history['loss'] def predict(self, data): if (len(data) < self.n_rnn): return False, 0 else: predicted = self.model.predict(data[-self.n_rnn:].reshape(1, self.n_rnn, 1)) # predicted = self.model.predict(data[-self.n_rnn:]) print(data[-self.n_rnn:]) return True, predicted def make_dataset(self, data): logging.debug('Create dataset!') ##Number of sample n_sample = len(data) - self.n_rnn ##Normalize for i in range(len(data[0])): sigma =np.sqrt(np.average((np.average(data[:,i])-data[:,i])**2)) data[:,i] = (np.average(data[:,i])-data[:,i])/sigma ##Input x = np.zeros((n_sample, self.n_rnn)) ##answer t = np.zeros((n_sample, self.n_rnn)) ##Create Input and Answer data for i in range(0, n_sample): x[i] = data[i: i + self.n_rnn, 3] t[i] = data[i + 1: i + self.n_rnn + 1, 3] ## Sample, n_rnn, number of Neuron at Input layer x = x.reshape(n_sample, self.n_rnn, self.n_in).astype(np.float32) t = t.reshape(n_sample, self.n_rnn, self.n_in).astype(np.float32) logging.debug('Succsess to create dataset!') return x, t # x_data = np.linspace(-2*np.pi, 2*np.pi) # -2πから2πまで # sin_data = np.sin(x_data) + 0.1*np.random.randn(len(x_data)) # # logging.info("length "+str(len(sin_data))) # model = LSTM_model() # # model.config_model() # boo,test= model.train_model(sin_data) # logging.debug(test) # success, predict0=model.predict(sin_data) # print(predict0)
# Copyright 2018 Davide Spadini # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pydriller.repository import Repository import logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO) def test_between_revisions(): from_tag = 'tag1' to_tag = 'tag3' lc = list(Repository('test-repos/tags', from_tag=from_tag, to_tag=to_tag).traverse_commits()) assert len(lc) == 5 assert '6bb9e2c6a8080e6b5b34e6e316c894b2ddbf7fcd' == lc[0].hash assert 'f1a90b8d7b151ceefd3e3dfc0dc1d0e12b5f48d0' == lc[1].hash assert '4638730126d40716e230c2040751a13153fb1556' == lc[2].hash assert 'a26f1438bd85d6b22497c0e5dae003812becd0bc' == lc[3].hash assert '627e1ad917a188a861c9fedf6e5858b79edbe439' == lc[4].hash def test_multiple_repos_with_tags(): from_tag = 'tag2' to_tag = 'tag3' repos = [ 'test-repos/tags', 'test-repos/tags', 'test-repos/tags' ] lc = list(Repository(path_to_repo=repos, from_tag=from_tag, to_tag=to_tag).traverse_commits()) assert len(lc) == 9
import os import torchvision as tv import numpy as np from PIL import Image def get_dataset(args, transform_train, transform_test): cifar_train = Cifar100Train(args, train=True, transform=transform_train, download = args.download) testset = tv.datasets.CIFAR100(root='./data', train=False, download=False, transform=transform_test) return cifar_train, testset class Cifar100Train(tv.datasets.CIFAR100): def __init__(self, args, train=True, transform=None, target_transform=None, download=False): super(Cifar100Train, self).__init__(args.train_root, train=train, transform=transform, target_transform=target_transform, download=download) self.root = os.path.expanduser(args.train_root) self.transform = transform self.target_transform = target_transform self.args = args self.num_classes = self.args.num_classes self.data = self.train_data self.labels = np.asarray(self.train_labels, dtype=np.long) self.train_samples_idx = [] self.train_probs = np.ones(len(self.labels))*(-1) self.avg_probs = np.ones(len(self.labels))*(-1) self.times_seen = np.ones(len(self.labels))*1e-6 def __getitem__(self, index): img, labels = self.data[index], self.labels[index] img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: labels = self.target_transform(labels) return img, labels, index
question_data = [ {"category": "Science: Computers", "type": "multiple", "difficulty": "medium", "question": "Nvidia&#039;s headquarters are based in which Silicon Valley city?", "correct_answer": "Santa Clara", "incorrect_answers": ["Palo Alto", "Cupertino", "Mountain View"] }, {"category": "Science: Computers", "type": "multiple", "difficulty": "hard", "question": "Which of these names was an actual codename for a cancelled Microsoft project?", "correct_answer": "Neptune", "incorrect_answers": ["Enceladus", "Pollux", "Saturn"]}, {"category": "Science: Computers", "type": "multiple", "difficulty": "hard", "question": "What port does HTTP run on?", "correct_answer": "80", "incorrect_answers": ["53", "443", "23"]}, {"category": "Science: Computers", "type": "boolean", "difficulty": "easy", "question": "&quot;HTML&quot; stands for Hypertext Markup Language.", "correct_answer": "True", "incorrect_answers": ["False"]}, {"category": "Science: Computers", "type": "multiple", "difficulty": "medium", "question": "In programming, the ternary operator is mostly defined with what symbol(s)?", "correct_answer": "?:", "incorrect_answers": ["??", "if then", "?"]}, {"category": "Science: Computers", "type": "multiple", "difficulty": "hard", "question": "Which data structure does FILO apply to?", "correct_answer": "Stack", "incorrect_answers": ["Queue", "Heap", "Tree"]}, {"category": "Science: Computers", "type": "multiple", "difficulty": "hard", "question": "Which of these is not a key value of Agile software development?", "correct_answer": "Comprehensive documentation", "incorrect_answers": ["Individuals and interactions", "Customer collaboration", "Responding to change"]}, {"category": "Science: Computers", "type": "multiple", "difficulty": "medium", "question": "Laserjet and inkjet printers are both examples of what type of printer?", "correct_answer": "Non-impact printer", "incorrect_answers": ["Impact printer", "Daisywheel printer", "Dot matrix printer"]}, {"category": "Science: Computers", "type": "multiple", "difficulty": "hard", "question": "What was the name of the first Bulgarian personal computer?", "correct_answer": "IMKO-1", "incorrect_answers": ["Pravetz 82", "Pravetz 8D", "IZOT 1030"]}, {"category": "Science: Computers", "type": "multiple", "difficulty": "hard", "question": "Which of the following computer components can be built using only NAND gates?", "correct_answer": "ALU", "incorrect_answers": ["CPU", "RAM", "Register"]}]
from scipy import linalg import numpy as np two_d_array = np.array([[4,5],[3,2]]) print(linalg.det( two_d_array )) print(linalg.inv( two_d_array )) eg_val, eg_vect = linalg.eig(two_d_array) #get eigenvalues print(eg_val) #get eigenvectors print(eg_vect)
# -*- coding: utf-8 -*- """ GPU CUDA 加速 cctpy 束流跟踪 2020年12月8日 12点15分 核心束流跟踪功能已经完成,对比成功 track cpu p=p=[7.347173281024637, -5.038232430353374, -0.008126589272623864],v=[157601.42662973067, -174317561.422342, -223027.84656550566],v0=174317774.94179922 track gpu32 p=p=[7.347180366516113, -5.038208484649658, -0.008126441389322281],v=[157731.0625, -174317456.0, -223028.46875],v0=174317776.0 track gpu64 p=p=[7.347173281024622, -5.03823243035337, -0.008126589272624135],v=[157601.42662950495, -174317561.42234194, -223027.8465655048],v0=174317774.94179922 利用 32 位计算,则误差约为 0.05 mm 和 0.01 mr 利用 64 位计算,误差约为 1e-10 mm """ import pycuda.autoinit import pycuda.driver as drv from pycuda.compiler import SourceModule import numpy import time import sys from cctpy import * class GPU_ACCELERATOR: FLOAT32: str = "FLOAT32" FLOAT64: str = "FLOAT64" def __init__(self, float_number_type: str = FLOAT32, block_dim_x: int = 1024, max_current_element_number: int = 2000*120) -> None: """ float_number_type 浮点数类型,取值为 FLOAT32 或 FLOAT64,即 32 位运行或 64 位,默认 32 位。 64 位浮点数精度更高,但是计算的速度可能比 32 位慢 2-10 倍 block_dim_x 块线程数目,默认 1024 个,必须是 2 的幂次。如果采用 64 位浮点数,取 1024 可能会报错,应取 512 或更低 不同大小的 block_dim_x,可能对计算效率有影响 在抽象上,GPU 分为若干线程块,每个块内有若干线程 块内线程,可以使用 __shared__ 使用共享内存(访问速度快),同时具有同步机制,因此可以方便的分工合作 块之间,没有同步机制,所以线程通讯无从谈起 max_current_element_number 最大电流元数目,在 GPU 加速中,CCT 数据以电流元的形式传入显存。 默认值 2000*120 (可以看作一共 2000 匝,每匝分 120 段) """ self.float_number_type = float_number_type self.max_current_element_number = max_current_element_number if block_dim_x > 1024 or block_dim_x < 0: raise ValueError( f"block_dim_x 应 >=1 and <=1024 内取,不能是{block_dim_x}") if block_dim_x & (block_dim_x-1) != 0: raise ValueError(f"block_dim_x 应该取 2 的幂次,不能为{block_dim_x}") self.block_dim_x: int = int(block_dim_x) cuda_code_00_include = """ #include <stdio.h> """ cuda_code_01_float_type_define: str = None if float_number_type == GPU_ACCELERATOR.FLOAT32: cuda_code_01_float_type_define = """ // 定义为 32 位浮点数模式 #define FLOAT32 """ self.numpy_dtype = numpy.float32 elif float_number_type == GPU_ACCELERATOR.FLOAT64: cuda_code_01_float_type_define = """ // 定义为 64 位浮点数模式 #define FLOAT64 """ self.numpy_dtype = numpy.float64 if self.block_dim_x > 512: print(f"当前 GPU 设置为 64 位模式,块线程数({self.block_dim_x})可能过多,内核可能无法启动\n" + "典型异常为 pycuda._driver.LaunchError: cuLaunchKernel failed: too many resources requested for launch\n" + "遇到此情况,可酌情调小块线程数") else: raise ValueError( "float_number_type 必须是 GPU_ACCELERATOR.FLOAT32 或 GPU_ACCELERATOR.FLOAT64") # 头信息 # CUDA 代码和 C 语言几乎一模一样。只要有 C/C++ 基础,就能看懂 CUDA 代码 cuda_code_02_define = """ // 根据定义的浮点数模式,将 FLOAT 宏替换为 float 或 double #ifdef FLOAT32 #define FLOAT float #else #define FLOAT double #endif // 维度 三维 #define DIM (3) // 维度索引 0 1 2 表示 X Y Z,这样对一个数组取值,看起来清晰一些 #define X (0) #define Y (1) #define Z (2) // 粒子参数索引 (px0, py1, pz2, vx3, vy4, vz5, rm6 相对质量, e7 电荷量, speed8 速率, distance9 运动距离) #define PARTICLE_DIM (10) #define PX (0) #define PY (1) #define PZ (2) #define VX (3) #define VY (4) #define VZ (5) #define RM (6) #define E (7) #define SPEED (8) #define DISTANCE (9) // 块线程数目 #define BLOCK_DIM_X ({block_dim_x}) #define QS_DATA_LENGTH (16) #define MAX_CURRENT_ELEMENT_NUMBER ({max_current_element_number}) """.format(block_dim_x=self.block_dim_x, max_current_element_number=self.max_current_element_number) # 向量运算内联函数 cuda_code_03_vct_functions = """ // 向量叉乘 __device__ __forceinline__ void vct_cross(FLOAT *a, FLOAT *b, FLOAT *ret) { ret[X] = a[Y] * b[Z] - a[Z] * b[Y]; ret[Y] = -a[X] * b[Z] + a[Z] * b[X]; ret[Z] = a[X] * b[Y] - a[Y] * b[X]; } // 向量原地加法 __device__ __forceinline__ void vct_add_local(FLOAT *a_local, FLOAT *b) { a_local[X] += b[X]; a_local[Y] += b[Y]; a_local[Z] += b[Z]; } // 向量原地加法 __device__ __forceinline__ void vct6_add_local(FLOAT *a_local, FLOAT *b) { a_local[X] += b[X]; a_local[Y] += b[Y]; a_local[Z] += b[Z]; a_local[X+DIM] += b[X+DIM]; a_local[Y+DIM] += b[Y+DIM]; a_local[Z+DIM] += b[Z+DIM]; } // 向量加法 __device__ __forceinline__ void vct_add(FLOAT *a, FLOAT *b, FLOAT *ret) { ret[X] = a[X] + b[X]; ret[Y] = a[Y] + b[Y]; ret[Z] = a[Z] + b[Z]; } // 向量加法 __device__ __forceinline__ void vct6_add(FLOAT *a, FLOAT *b, FLOAT *ret) { ret[X] = a[X] + b[X]; ret[Y] = a[Y] + b[Y]; ret[Z] = a[Z] + b[Z]; ret[X+DIM] = a[X+DIM] + b[X+DIM]; ret[Y+DIM] = a[Y+DIM] + b[Y+DIM]; ret[Z+DIM] = a[Z+DIM] + b[Z+DIM]; } // 向量*常数,原地操作 __device__ __forceinline__ void vct_dot_a_v(FLOAT a, FLOAT *v) { v[X] *= a; v[Y] *= a; v[Z] *= a; } // 向量*常数,原地操作 __device__ __forceinline__ void vct6_dot_a_v(FLOAT a, FLOAT *v) { v[X] *= a; v[Y] *= a; v[Z] *= a; v[X+DIM] *= a; v[Y+DIM] *= a; v[Z+DIM] *= a; } // 向量*常数 __device__ __forceinline__ void vct_dot_a_v_ret(FLOAT a, FLOAT *v, FLOAT *ret) { ret[X] = v[X] * a; ret[Y] = v[Y] * a; ret[Z] = v[Z] * a; } // 向量*常数 __device__ __forceinline__ void vct6_dot_a_v_ret(FLOAT a, FLOAT *v, FLOAT *ret) { ret[X] = v[X] * a; ret[Y] = v[Y] * a; ret[Z] = v[Z] * a; ret[X+DIM] = v[X+DIM] * a; ret[Y+DIM] = v[Y+DIM] * a; ret[Z+DIM] = v[Z+DIM] * a; } __device__ __forceinline__ FLOAT vct_dot_v_v(FLOAT *v,FLOAT *w){ return v[X] * w[X] + v[Y] * w[Y] + v[Z] * w[Z]; } // 向量拷贝赋值 __device__ __forceinline__ void vct_copy(FLOAT *src, FLOAT *des) { des[X] = src[X]; des[Y] = src[Y]; des[Z] = src[Z]; } // 向量拷贝赋值 __device__ __forceinline__ void vct6_copy(FLOAT *src, FLOAT *des) { des[X] = src[X]; des[Y] = src[Y]; des[Z] = src[Z]; des[X+DIM] = src[X+DIM]; des[Y+DIM] = src[Y+DIM]; des[Z+DIM] = src[Z+DIM]; } // 求向量长度 __device__ __forceinline__ FLOAT vct_len(FLOAT *v) { #ifdef FLOAT32 return sqrtf(v[X] * v[X] + v[Y] * v[Y] + v[Z] * v[Z]); #else return sqrt(v[X] * v[X] + v[Y] * v[Y] + v[Z] * v[Z]); #endif } // 将矢量 v 置为 0 __device__ __forceinline__ void vct_zero(FLOAT *v) { v[X] = 0.0; v[Y] = 0.0; v[Z] = 0.0; } // 打印矢量,一般用于 debug __device__ __forceinline__ void vct_print(FLOAT *v) { #ifdef FLOAT32 printf("%.15f, %.15f, %.15f\\n", v[X], v[Y], v[Z]); #else printf("%.15lf, %.15lf, %.15lf\\n", v[X], v[Y], v[Z]); #endif } // 打印矢量,一般用于 debug __device__ __forceinline__ void vct6_print(FLOAT *v) { #ifdef FLOAT32 printf("%.15f, %.15f, %.15f, %.15f, %.15f, %.15f\\n", v[X], v[Y], v[Z], v[X+DIM], v[Y+DIM], v[Z+DIM]); #else printf("%.15lf, %.15lf, %.15lf, %.15lf, %.15lf, %.15lf\\n", v[X], v[Y], v[Z] ,v[X+DIM], v[Y+DIM], v[Z+DIM]); #endif } // 矢量减法 __device__ __forceinline__ void vct_sub(FLOAT *a, FLOAT *b, FLOAT *ret) { ret[X] = a[X] - b[X]; ret[Y] = a[Y] - b[Y]; ret[Z] = a[Z] - b[Z]; } """ cuda_code_04_dB = """ // 计算电流元在 p 点产生的磁场 // 其中 p0 表示电流元的位置 // kl 含义见下 // 返回值放在 ret 中 // // 原本电流元的计算公式如下: // dB = (miu0/4pi) * Idl × r / (r^3) // 其中 r = p - p0,p0 是电流元的位置 // // 如果考虑极小一段电流(起点s0,终点s1)则产生的磁场为 // ΔB = (miu0/4pi) * I * (s1-s2)*r / (r^3) // 同样的,r = p - p0,p0 = (s1+s2)/2 // // 因为 (miu0/4pi) * I * (s1-s2) 整体已知,所以提前计算为 kl // p0 提前已知,即 (s1+s2)/2,也提前给出 // 这样可以减少无意义的重复计算 // // 补充:坐标均是全局坐标 __device__ __forceinline__ void dB(FLOAT *kl, FLOAT *p0, FLOAT *p, FLOAT *ret){ FLOAT r[DIM]; FLOAT rr; vct_sub(p, p0, r); // r = p - p0 rr = vct_len(r); // rr = abs(r) rr = rr*rr*rr; // rr = rr^3 vct_cross(kl, r, ret); // ret = kl × r vct_dot_a_v(1.0/rr, ret); // ret = (kl × r)/(rr^3) } // 计算所有的电流元在 p 点产生的磁场 // number 表示电流元数目 // kls 每 DIM = 3 组表示一个 kl // p0s 每 DIM = 3 组表示一个 p0 // shared_ret 应该是一个 shared 量,保存返回值 // 调用该方法后,应该同步处理 __syncthreads(); __device__ void current_element_B(FLOAT *kls, FLOAT *p0s, int number, FLOAT *p, FLOAT *shared_ret){ int tid = threadIdx.x; // 0-1023 (decide by BLOCK_DIM_X) FLOAT db[DIM]; __shared__ FLOAT s_dbs[DIM*BLOCK_DIM_X]; vct_zero(s_dbs + tid*DIM); // 计算每个电流元产生的磁场 for(int i = tid*DIM; i < number*DIM; i += BLOCK_DIM_X*DIM){ dB(kls + i, p0s + i, p, db); vct_add_local(s_dbs + tid*DIM, db); } // 规约求和(from https://www.bilibili.com/video/BV15E411x7yT) for(int step = BLOCK_DIM_X>>1; step >= 1; step>>=1){ __syncthreads(); // 求和前同步 if(tid<step) vct_add_local(s_dbs + tid * DIM, s_dbs + (tid + step) * DIM); } if(tid == 0) vct_copy(s_dbs, shared_ret); } """ cuda_code_05_QS = """ // 计算 QS 在 p 点产生的磁场 // origin xi yi zi 分别是 QS 的局部坐标系 // 这个函数只需要单线程计算 __device__ __forceinline__ void magnet_at_qs(FLOAT *origin, FLOAT *xi, FLOAT *yi, FLOAT *zi, FLOAT length, FLOAT gradient, FLOAT second_gradient, FLOAT aper_r, FLOAT *p, FLOAT* ret){ FLOAT temp1[DIM]; FLOAT temp2[DIM]; vct_sub(p, origin, temp1); // temp1 = p - origin temp2[X] = vct_dot_v_v(xi, temp1); temp2[Y] = vct_dot_v_v(yi, temp1); temp2[Z] = vct_dot_v_v(zi, temp1); // 这时 temp2 就是全局坐标 p 点在 QS 局部坐标系中的坐标 vct_zero(ret); if(temp2[Z]<0 || temp2[Z]>length){ return; // 无磁场 }else{ if( temp2[X] > aper_r || temp2[X] < -aper_r || temp2[Y] > aper_r || temp2[Y] < -aper_r || #ifdef FLOAT32 sqrtf(temp2[X]*temp2[X]+temp2[Y]*temp2[Y]) > aper_r #else sqrt(temp2[X]*temp2[X]+temp2[Y]*temp2[Y]) > aper_r #endif ){ return; // 无磁场 }else{ temp1[X] = gradient * temp2[Y] + second_gradient * (temp2[X] * temp2[Y]); temp1[Y] = gradient * temp2[X] + 0.5 * second_gradient * (temp2[X] * temp2[X] - temp2[Y] * temp2[Y]); vct_dot_a_v_ret(temp1[X], xi, ret); vct_dot_a_v_ret(temp1[Y], yi, temp2); vct_add_local(ret, temp2); } } } """ cuda_code_06_magnet_at = """ // 整个束线在 p 点产生得磁场(只有一个 QS 磁铁!) // FLOAT *kls, FLOAT* p0s, int current_element_number 和 CCT 电流元相关 // FLOAT *qs_data 表示 QS 磁铁所有参数,分别是局部坐标系(原点origin,三个轴xi yi zi,长度 梯度 二阶梯度 孔径) // p 表示要求磁场得全局坐标点 // shared_ret 表示磁场返回值(应该是一个 __shared__) // 本方法已经完成同步了,不用而外调用 __syncthreads(); __device__ void magnet_with_single_qs(FLOAT *kls, FLOAT* p0s, int current_element_number, FLOAT *qs_data, FLOAT *p, FLOAT *shared_ret){ int tid = threadIdx.x; FLOAT qs_magnet[DIM]; current_element_B(kls, p0s, current_element_number, p, shared_ret); __syncthreads(); // 块内同步 if(tid == 0){ // 计算 QS 的磁场确实不能并行 // 也没有必要让每个线程都重复计算一次 // 虽然两次同步有点麻烦,但至少只有一个线程束参与运行 magnet_at_qs( qs_data, // origin qs_data + 3, //xi qs_data + 6, //yi qs_data + 9, //zi *(qs_data + 12), // len *(qs_data + 13), // g *(qs_data + 14), // sg *(qs_data + 15), // aper r p, qs_magnet ); vct_add_local(shared_ret, qs_magnet); } __syncthreads(); // 块内同步 } """ cuda_code_07_runge_kutta4 = """ // runge_kutta4 代码和 cctpy 中的 runge_kutta4 一模一样 // Y0 数组长度为 6 // Y0 会发生变化,既是输入也是输出 // 为了分析包络等,会出一个记录全部 YO 的函数 // 这个函数单线程运行 // void (*call)(FLOAT,FLOAT*,FLOAT*) 表示 tn Yn 到 Yn+1 的转义,实际使用中还会带更多参数(C 语言没有闭包) // 所以这个函数仅仅是原型 __device__ void runge_kutta4(FLOAT t0, FLOAT t_end, FLOAT *Y0, void (*call)(FLOAT,FLOAT*,FLOAT*), FLOAT dt){ #ifdef FLOAT32 int number = (int)(ceilf((t_end - t0) / dt)); #else int number = (int)(ceil((t_end - t0) / dt)); #endif dt = (t_end - t0) / ((FLOAT)(number)); FLOAT k1[DIM*2]; FLOAT k2[DIM*2]; FLOAT k3[DIM*2]; FLOAT k4[DIM*2]; FLOAT temp[DIM*2]; for(int ignore = 0; ignore < number; ignore++){ (*call)(t0, Y0, k1); vct6_dot_a_v_ret(dt / 2., k1, temp); // temp = dt / 2 * k1 vct6_add_local(temp, Y0); // temp = Y0 + temp (*call)(t0 + dt / 2., temp, k2); vct6_dot_a_v_ret(dt / 2., k2, temp); // temp = dt / 2 * k2 vct6_add_local(temp, Y0); // temp = Y0 + temp (*call)(t0 + dt / 2., temp, k3); vct6_dot_a_v_ret(dt, k3, temp); // temp = dt * k3 vct6_add_local(temp, Y0); // temp = Y0 + temp (*call)(t0 + dt, temp, k4); t0 += dt; vct6_add(k1, k4, temp); // temp = k1 + k4 vct6_dot_a_v(2.0, k2); vct6_dot_a_v(2.0, k3); vct6_add(k2, k3, k1); // k1 已经没用了,所以装 k1 = k2 + k3 vct6_add_local(temp, k1); vct6_dot_a_v(dt / 6.0, temp); vct6_add_local(Y0, temp); // Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4); } } """ cuda_code_08_run_only = """ // runge_kutta4_for_magnet_with_single_qs 函数用到的回调 // FLOAT t0, FLOAT* Y0, FLOAT* Y1 微分计算 // 其中 Y = [P, V] // FLOAT k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass // FLOAT *kls, FLOAT* p0s, int current_element_number, 表示所有电流元 // FLOAT *qs_data 表示一个 QS 磁铁 __device__ void callback_for_runge_kutta4_for_magnet_with_single_qs( FLOAT t0, FLOAT* Y0, FLOAT* Y1, FLOAT k, FLOAT *kls, FLOAT* p0s, int current_element_number, FLOAT *qs_data ) { int tid = threadIdx.x; __shared__ FLOAT m[DIM]; // 磁场 magnet_with_single_qs(kls, p0s, current_element_number, qs_data, Y0, m); //Y0 只使用前3项,表示位置。已同步 if(tid == 0){ // 单线程完成即可 // ------------ 以下两步计算加速度,写入 Y1 + 3 中 ---------- // Y0 + 3 是原速度 v // Y1 + 3 用于存加速度,即 v × m,还没有乘 k = e/rm vct_cross(Y0 + 3, m, Y1 + 3); vct_dot_a_v(k, Y1 + 3); // 即 (v × m) * a,并且把积存在 Y1 + 3 中 // ------------- 以下把原速度复制到 Y1 中 ------------ vct_copy(Y0 + 3, Y1); // Y0 中后三项,速度。复制到 Y1 的前3项 } __syncthreads(); // 块内同步 } // 单个粒子跟踪 // runge_kutta4 函数用于 magnet_with_single_qs 的版本,即粒子跟踪 // Y0 即是 [P, v] 粒子位置、粒子速度 // void (*call)(FLOAT,FLOAT*,FLOAT*,FLOAT,FLOAT*,FLOAT*,int,FLOAT*) 改为 callback_for_runge_kutta4_for_magnet_with_single_qs // 前 3 项 FLOAT,FLOAT*,FLOAT* 和函数原型 runge_kutta4 函数一样,即 t0 Y0 Y1 // 第 4 项,表示 k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass // 第 567 项,FLOAT*,FLOAT*,int 表示所有电流源,FLOAT *kls, FLOAT* p0s, int current_element_number // 最后一项,表示 qs_data // particle 表示粒子 (px0, py1, pz2, vx3, vy4, vz5, rm6, e7, speed8, distance9) len = 10 /*__global__*/ __device__ void track_for_magnet_with_single_qs(FLOAT *distance, FLOAT *footstep, FLOAT *kls, FLOAT* p0s, int *current_element_number, FLOAT *qs_data, FLOAT *particle) { int tid = threadIdx.x; FLOAT t0 = 0.0; // 开始时间为 0 FLOAT t_end = (*distance) / particle[SPEED]; // 用时 = 距离/速率 #ifdef FLOAT32 int number = (int)(ceilf( (*distance) / (*footstep) )); #else int number = (int)(ceil( (*distance) / (*footstep))); #endif FLOAT dt = (t_end - t0) / ((FLOAT)(number)); FLOAT k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass __shared__ FLOAT Y0[DIM*2]; // Y0 即是 [P, v] 粒子位置、粒子速度,就是 particle 前两项 __shared__ FLOAT k1[DIM*2]; __shared__ FLOAT k2[DIM*2]; __shared__ FLOAT k3[DIM*2]; __shared__ FLOAT k4[DIM*2]; __shared__ FLOAT temp[DIM*2]; if(tid == 0){ vct6_copy(particle, Y0); // 写 Y0 } for(int ignore = 0; ignore < number; ignore++){ __syncthreads(); // 循环前同步 callback_for_runge_kutta4_for_magnet_with_single_qs(t0, Y0, k1, k, kls, p0s, *current_element_number, qs_data); // 已同步 if(tid == 0){ vct6_dot_a_v_ret(dt / 2., k1, temp); // temp = dt / 2 * k1 vct6_add_local(temp, Y0); // temp = Y0 + temp } __syncthreads(); callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt / 2., temp, k2, k, kls, p0s, *current_element_number, qs_data); if(tid == 0){ vct6_dot_a_v_ret(dt / 2., k2, temp); // temp = dt / 2 * k2 vct6_add_local(temp, Y0); // temp = Y0 + temp } __syncthreads(); callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt / 2., temp, k3, k, kls, p0s, *current_element_number, qs_data); if(tid == 0){ vct6_dot_a_v_ret(dt, k3, temp); // temp = dt * k3 vct6_add_local(temp, Y0); // temp = Y0 + temp } __syncthreads(); callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt, temp, k4, k, kls, p0s, *current_element_number, qs_data); t0 += dt; if(tid == 0){ vct6_add(k1, k4, temp); // temp = k1 + k4 vct6_dot_a_v(2.0, k2); vct6_dot_a_v(2.0, k3); vct6_add(k2, k3, k1); // k1 已经没用了,所以装 k1 = k2 + k3 vct6_add_local(temp, k1); vct6_dot_a_v(dt / 6.0, temp); vct6_add_local(Y0, temp); // Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4); } } // 写回 particle if(tid == 0){ vct6_copy(Y0 ,particle); // 写 Y0 particle[DISTANCE] = *distance; } __syncthreads(); } // 上函数的 global 版本 __global__ void track_for_magnet_with_single_qs_g(FLOAT *distance, FLOAT *footstep, FLOAT *kls, FLOAT* p0s, int *current_element_number, FLOAT *qs_data, FLOAT *particle) { int tid = threadIdx.x; FLOAT t0 = 0.0; // 开始时间为 0 FLOAT t_end = (*distance) / particle[SPEED]; // 用时 = 距离/速率 #ifdef FLOAT32 int number = (int)(ceilf( (*distance) / (*footstep) )); #else int number = (int)(ceil( (*distance) / (*footstep))); #endif FLOAT dt = (t_end - t0) / ((FLOAT)(number)); FLOAT k = particle[E] / particle[RM]; // k: float = particle.e / particle.relativistic_mass __shared__ FLOAT Y0[DIM*2]; // Y0 即是 [P, v] 粒子位置、粒子速度,就是 particle 前两项 __shared__ FLOAT k1[DIM*2]; __shared__ FLOAT k2[DIM*2]; __shared__ FLOAT k3[DIM*2]; __shared__ FLOAT k4[DIM*2]; __shared__ FLOAT temp[DIM*2]; if(tid == 0){ vct6_copy(particle, Y0); // 写 Y0 } for(int ignore = 0; ignore < number; ignore++){ __syncthreads(); // 循环前同步 callback_for_runge_kutta4_for_magnet_with_single_qs(t0, Y0, k1, k, kls, p0s, *current_element_number, qs_data); // 已同步 if(tid == 0){ vct6_dot_a_v_ret(dt / 2., k1, temp); // temp = dt / 2 * k1 vct6_add_local(temp, Y0); // temp = Y0 + temp } __syncthreads(); callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt / 2., temp, k2, k, kls, p0s, *current_element_number, qs_data); if(tid == 0){ vct6_dot_a_v_ret(dt / 2., k2, temp); // temp = dt / 2 * k2 vct6_add_local(temp, Y0); // temp = Y0 + temp } __syncthreads(); callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt / 2., temp, k3, k, kls, p0s, *current_element_number, qs_data); if(tid == 0){ vct6_dot_a_v_ret(dt, k3, temp); // temp = dt * k3 vct6_add_local(temp, Y0); // temp = Y0 + temp } __syncthreads(); callback_for_runge_kutta4_for_magnet_with_single_qs(t0 + dt, temp, k4, k, kls, p0s, *current_element_number, qs_data); t0 += dt; if(tid == 0){ vct6_add(k1, k4, temp); // temp = k1 + k4 vct6_dot_a_v(2.0, k2); vct6_dot_a_v(2.0, k3); vct6_add(k2, k3, k1); // k1 已经没用了,所以装 k1 = k2 + k3 vct6_add_local(temp, k1); vct6_dot_a_v(dt / 6.0, temp); vct6_add_local(Y0, temp); // Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4); } } // 写回 particle if(tid == 0){ vct6_copy(Y0 ,particle); // 写 Y0 particle[DISTANCE] = *distance; } __syncthreads(); } """ cuda_code_09_run_multi_particle = """ __device__ void track_multi_particle_for_magnet_with_single_qs(FLOAT *distance, FLOAT *footstep, FLOAT *kls, FLOAT *p0s, int *current_element_number, FLOAT *qs_data, FLOAT *particle, int *particle_number) { for(int i = 0; i< (*particle_number);i++){ track_for_magnet_with_single_qs(distance, footstep, kls, p0s, current_element_number, qs_data, particle + i * PARTICLE_DIM); } } __global__ void track_multi_particle_for_magnet_with_single_qs_g(FLOAT *distance, FLOAT *footstep, FLOAT *kls, FLOAT *p0s, int *current_element_number, FLOAT *qs_data, FLOAT *particle, int *particle_number) { for(int i = 0; i< (*particle_number);i++){ track_for_magnet_with_single_qs(distance, footstep, kls, p0s, current_element_number, qs_data, particle + i * PARTICLE_DIM); } } """ cuda_code_10_run_multi_particle_multi_beamline = """ __global__ void track_multi_particle_beamlime_for_magnet_with_single_qs(FLOAT *distance, FLOAT *footstep, FLOAT *kls, FLOAT *p0s, int *current_element_number, FLOAT *qs_data, FLOAT *particle, int *particle_number) { int bid = blockIdx.x; track_multi_particle_for_magnet_with_single_qs( distance, // 全局相同 footstep, // 全局相同 kls + MAX_CURRENT_ELEMENT_NUMBER * DIM * bid, p0s + MAX_CURRENT_ELEMENT_NUMBER * DIM * bid, // 当前组电流元参数 current_element_number + bid, // 当前组电流元数目 qs_data + QS_DATA_LENGTH * bid, // 当前组 QS 参数 particle + (*particle_number) * PARTICLE_DIM * bid, // 当前组粒子 particle_number // 全局相同 ); } """ self.cuda_code: str = ( cuda_code_00_include + cuda_code_01_float_type_define + cuda_code_02_define + cuda_code_03_vct_functions + cuda_code_04_dB + cuda_code_05_QS + cuda_code_06_magnet_at + cuda_code_07_runge_kutta4 + cuda_code_08_run_only + cuda_code_09_run_multi_particle + cuda_code_10_run_multi_particle_multi_beamline ) def vct_length(self, p3: P3): """ 测试用函数,计算矢量长度 示例: ga32 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT32) ga64 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT64) v = P3(1,1,1) print(f"diff={ga32.vct_length(v) - v.length()}") # diff=-3.1087248775207854e-08 print(f"diff={ga64.vct_length(v) - v.length()}") # diff=0.0 """ code = """ __global__ void vl(FLOAT* v, FLOAT* ret){ *ret = vct_len(v); } """ mod = SourceModule(self.cuda_code + code) vl = mod.get_function("vl") ret = numpy.empty((1,), dtype=self.numpy_dtype) vl(drv.In(p3.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)), drv.Out(ret), grid=(1, 1, 1), block=(1, 1, 1)) return float(ret[0]) def vct_print(self, p3: P3): """ 测试用函数,打印矢量 示例: ga32 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT32) ga64 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT64) v = P3(1/3, 1/6, 1/7) ga32.vct_print(v) ga64.vct_print(v) >>> 0.333333343267441, 0.166666671633720, 0.142857149243355 0.333333333333333, 0.166666666666667, 0.142857142857143 """ code = """ __global__ void vp(FLOAT* v){ vct_print(v); } """ mod = SourceModule(self.cuda_code + code) vp = mod.get_function("vp") vp(drv.In(p3.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)), grid=(1, 1, 1), block=(1, 1, 1)) def current_element_B(self, kls: numpy.ndarray, p0s: numpy.ndarray, number: int, p: P3): """ 计算电流元集合,在 p 点产生的磁场 对比代码如下: """ code = """ __global__ void ce(FLOAT *kls, FLOAT *p0s, int* number, FLOAT *p, FLOAT *ret){ __shared__ FLOAT s_ret[DIM]; int tid = threadIdx.x; current_element_B(kls,p0s,*number,p,s_ret); if(tid == 0) vct_copy(s_ret, ret); } """ mod = SourceModule(self.cuda_code + code) ce = mod.get_function("ce") ret = numpy.empty((3,), dtype=self.numpy_dtype) ce(drv.In(kls.astype(self.numpy_dtype)), drv.In(p0s.astype(self.numpy_dtype)), drv.In(numpy.array([number], dtype=numpy.int32)), drv.In(p.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)), drv.Out(ret), grid=(1, 1, 1), block=(self.block_dim_x, 1, 1)) return P3.from_numpy_ndarry(ret) def magnet_at_qs(self, qs_data, p3: P3): """ qs 磁铁在 p 点产生的磁场 p 点是 全局坐标点 """ code = """ __global__ void mq(FLOAT *qs_data, FLOAT *p, FLOAT *ret){ magnet_at_qs( qs_data, // origin qs_data + 3, //xi qs_data + 6, //yi qs_data + 9, //zi *(qs_data + 12), // len *(qs_data + 13), // g *(qs_data + 14), // sg *(qs_data + 15), // aper r p, ret ); } """ mod = SourceModule(self.cuda_code + code) mq = mod.get_function("mq") ret = numpy.empty((3,), dtype=self.numpy_dtype) mq(drv.In(qs_data.astype(self.numpy_dtype)), drv.In(p3.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)), drv.Out(ret), grid=(1, 1, 1), block=(1, 1, 1) ) return P3.from_numpy_ndarry(ret) def magnet_at(self, bl: Beamline, p: P3) -> P3: """ CCT 和 QS 合起来测试 """ code = """ __global__ void ma(FLOAT *kls, FLOAT* p0s, int* current_element_number, FLOAT *qs_data, FLOAT *p, FLOAT *ret){ int tid = threadIdx.x; __shared__ FLOAT shared_ret[DIM]; magnet_with_single_qs(kls, p0s, *current_element_number, qs_data, p, shared_ret); if(tid == 0) vct_copy(shared_ret, ret); } """ mod = SourceModule(self.cuda_code + code) ma = mod.get_function('ma') ret = numpy.empty((3,), dtype=self.numpy_dtype) kls_list: List[numpy.ndarray] = [] p0s_list: List[numpy.ndarray] = [] current_element_number = 0 qs_data = None for m in bl.magnets: if isinstance(m, CCT): cct = m kls, p0s = cct.global_current_elements_and_elementary_current_positions( numpy_dtype=self.numpy_dtype) current_element_number += cct.total_disperse_number kls_list.append(kls) p0s_list.append(p0s) elif isinstance(m, QS): qs = m qs_data = numpy.array( qs.local_coordinate_system.location.to_list( ) + qs.local_coordinate_system.XI.to_list() + qs.local_coordinate_system.YI.to_list() + qs.local_coordinate_system.ZI.to_list() + [qs.length, qs.gradient, qs.second_gradient, qs.aperture_radius], dtype=self.numpy_dtype) else: raise ValueError(f"{m} 无法用 GOU 加速") kls_all = numpy.concatenate(tuple(kls_list)) p0s_all = numpy.concatenate(tuple(p0s_list)) ma( drv.In(kls_all), drv.In(p0s_all), drv.In(numpy.array([current_element_number], dtype=numpy.int32)), drv.In(qs_data), drv.In(p.to_numpy_ndarry3(numpy_dtype=self.numpy_dtype)), drv.Out(ret), grid=(1, 1, 1), block=(self.block_dim_x, 1, 1) ) return P3.from_numpy_ndarry(ret) def track_one_particle_with_single_qs(self, bl: Beamline, p: RunningParticle, distance: float, footstep: float): """ 粒子跟踪,电流元 + 单个 QS """ mod = SourceModule(self.cuda_code) track = mod.get_function('track_for_magnet_with_single_qs_g') particle = p.to_numpy_array_data(numpy_dtype=self.numpy_dtype) kls_list: List[numpy.ndarray] = [] p0s_list: List[numpy.ndarray] = [] current_element_number = 0 qs_data = None for m in bl.magnets: if isinstance(m, CCT): cct = m kls, p0s = cct.global_current_elements_and_elementary_current_positions( numpy_dtype=self.numpy_dtype) current_element_number += cct.total_disperse_number kls_list.append(kls) p0s_list.append(p0s) elif isinstance(m, QS): qs = m qs_data = numpy.array( qs.local_coordinate_system.location.to_list( ) + qs.local_coordinate_system.XI.to_list() + qs.local_coordinate_system.YI.to_list() + qs.local_coordinate_system.ZI.to_list() + [qs.length, qs.gradient, qs.second_gradient, qs.aperture_radius], dtype=self.numpy_dtype) else: raise ValueError(f"{m} 无法用 GOU 加速") kls_all = numpy.concatenate(tuple(kls_list)) p0s_all = numpy.concatenate(tuple(p0s_list)) track( drv.In(numpy.array([distance], dtype=self.numpy_dtype)), drv.In(numpy.array([footstep], dtype=self.numpy_dtype)), drv.In(kls_all), drv.In(p0s_all), drv.In(numpy.array([current_element_number], dtype=numpy.int32)), drv.In(qs_data), drv.InOut(particle), grid=(1, 1, 1), block=(self.block_dim_x, 1, 1) ) p.populate(RunningParticle.from_numpy_array_data(particle)) def track_multi_particle_for_magnet_with_single_qs(self, bl: Beamline, ps: List[RunningParticle], distance: float, footstep: float): """ 多粒子跟踪,电流元 + 单个 QS """ mod = SourceModule(self.cuda_code) track = mod.get_function( 'track_multi_particle_for_magnet_with_single_qs_g') kls_list: List[numpy.ndarray] = [] p0s_list: List[numpy.ndarray] = [] particle_list: List[numpy.ndarray] = [ p.to_numpy_array_data(numpy_dtype=self.numpy_dtype) for p in ps] current_element_number = 0 qs_data = None for m in bl.magnets: if isinstance(m, CCT): cct = m kls, p0s = cct.global_current_elements_and_elementary_current_positions( numpy_dtype=self.numpy_dtype) current_element_number += cct.total_disperse_number kls_list.append(kls) p0s_list.append(p0s) elif isinstance(m, QS): qs = m qs_data = numpy.array( qs.local_coordinate_system.location.to_list( ) + qs.local_coordinate_system.XI.to_list() + qs.local_coordinate_system.YI.to_list() + qs.local_coordinate_system.ZI.to_list() + [qs.length, qs.gradient, qs.second_gradient, qs.aperture_radius], dtype=self.numpy_dtype) else: raise ValueError(f"{m} 无法用 GOU 加速") kls_all = numpy.concatenate(tuple(kls_list)) p0s_all = numpy.concatenate(tuple(p0s_list)) particles_all = numpy.concatenate(tuple(particle_list)) track( drv.In(numpy.array([distance], dtype=self.numpy_dtype)), drv.In(numpy.array([footstep], dtype=self.numpy_dtype)), drv.In(kls_all), drv.In(p0s_all), drv.In(numpy.array([current_element_number], dtype=numpy.int32)), drv.In(qs_data), drv.InOut(particles_all), drv.In(numpy.array([len(ps)], dtype=numpy.int32)), grid=(1, 1, 1), block=(self.block_dim_x, 1, 1) ) particles_all = particles_all.reshape((-1, 10)) for i in range(len(ps)): ps[i].populate( RunningParticle.from_numpy_array_data(particles_all[i])) def track_multi_particle_beamlime_for_magnet_with_single_qs( self, bls: List[Beamline], ps: List[RunningParticle], distance: float, footstep: float)->List[List[RunningParticle]]: """ 多粒子多束线跟踪,电流元 + 单个 QS """ mod = SourceModule(self.cuda_code) track = mod.get_function( 'track_multi_particle_beamlime_for_magnet_with_single_qs') kls_all_all_beamline: List[numpy.ndarray] = [] p0s_all_all_beamline: List[numpy.ndarray] = [] qs_data_all_beamline: List[numpy.ndarray] = [] particles_all_all_beamline: List[numpy.ndarray] = [] current_element_numbers: List[int] = [] for bl in bls: kls_list: List[numpy.ndarray] = [] p0s_list: List[numpy.ndarray] = [] particle_list: List[numpy.ndarray] = [ p.to_numpy_array_data(numpy_dtype=self.numpy_dtype) for p in ps] current_element_number = 0 qs_data = None for m in bl.magnets: if isinstance(m, CCT): cct = m kls, p0s = cct.global_current_elements_and_elementary_current_positions( numpy_dtype=self.numpy_dtype) current_element_number += cct.total_disperse_number kls_list.append(kls) p0s_list.append(p0s) elif isinstance(m, QS): qs = m qs_data = numpy.array( qs.local_coordinate_system.location.to_list( ) + qs.local_coordinate_system.XI.to_list() + qs.local_coordinate_system.YI.to_list() + qs.local_coordinate_system.ZI.to_list() + [qs.length, qs.gradient, qs.second_gradient, qs.aperture_radius], dtype=self.numpy_dtype) else: raise ValueError(f"{m} 无法用 GOU 加速") kls_all = numpy.concatenate(tuple(kls_list)) p0s_all = numpy.concatenate(tuple(p0s_list)) kls_all_pad = numpy.zeros( (self.max_current_element_number*3,), dtype=self.numpy_dtype) p0s_all_pad = numpy.zeros( (self.max_current_element_number*3,), dtype=self.numpy_dtype) kls_all_pad[0:len(kls_all)] = kls_all p0s_all_pad[0:len(p0s_all)] = p0s_all particles_all = numpy.concatenate(tuple(particle_list)) kls_all_all_beamline.append(kls_all_pad) p0s_all_all_beamline.append(p0s_all_pad) qs_data_all_beamline.append(qs_data) particles_all_all_beamline.append(particles_all) current_element_numbers.append(current_element_number) kls_all_all_beamline = numpy.concatenate(tuple(kls_all_all_beamline)) p0s_all_all_beamline = numpy.concatenate(tuple(p0s_all_all_beamline)) qs_data_all_beamline = numpy.concatenate(tuple(qs_data_all_beamline)) particles_all_all_beamline = numpy.concatenate( tuple(particles_all_all_beamline)) track( drv.In(numpy.array([distance], dtype=self.numpy_dtype)), # 运动路程 drv.In(numpy.array([footstep], dtype=self.numpy_dtype)), # 步长 drv.In(kls_all_all_beamline), drv.In(p0s_all_all_beamline), drv.In(numpy.array(current_element_numbers, dtype=numpy.int32)), drv.In(qs_data_all_beamline), drv.InOut(particles_all_all_beamline), drv.In(numpy.array([len(ps)], dtype=numpy.int32)), # 粒子数 grid=(len(bls), 1, 1), block=(self.block_dim_x, 1, 1) ) particles_all_all_beamline = particles_all_all_beamline.reshape( (len(bls), len(ps), 10)) ret: List[List[RunningParticle]] = [] for bid in range(len(bls)): ps_ran: List[RunningParticle] = [] for pid in range(len(ps)): ps_ran.append(RunningParticle.from_numpy_array_data( particles_all_all_beamline[bid][pid])) ret.append(ps_ran) return ret if __name__ == "__main__": # 测试 import unittest gantry = HUST_SC_GANTRY() bl = gantry.create_beamline() ga64 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT64) ga32 = GPU_ACCELERATOR(float_number_type=GPU_ACCELERATOR.FLOAT32) ga64_b128 = GPU_ACCELERATOR( float_number_type=GPU_ACCELERATOR.FLOAT64, block_dim_x=128) ga32_b128 = GPU_ACCELERATOR( float_number_type=GPU_ACCELERATOR.FLOAT32, block_dim_x=128) ga64_b256 = GPU_ACCELERATOR( float_number_type=GPU_ACCELERATOR.FLOAT64, block_dim_x=256) ga32_b256 = GPU_ACCELERATOR( float_number_type=GPU_ACCELERATOR.FLOAT32, block_dim_x=256) ga64_b512 = GPU_ACCELERATOR( float_number_type=GPU_ACCELERATOR.FLOAT64, block_dim_x=512) ga32_b512 = GPU_ACCELERATOR( float_number_type=GPU_ACCELERATOR.FLOAT32, block_dim_x=512) class Test(unittest.TestCase): def test_vct_length(self): v = P3(1, 1, 1) length_cpu = v.length() length_gpu_32 = ga32.vct_length(v) length_gpu_64 = ga64.vct_length(v) print(f"test_vct_length 32 ={length_gpu_32 - length_cpu}") print(f"test_vct_length 64 ={length_gpu_64 - length_cpu}") self.assertTrue((length_gpu_32 - length_cpu) < 1e-6) self.assertTrue((length_gpu_64 - length_cpu) < 1e-14) def test_print(self): v = P3(1/3, 1/6, 1/7) ga32.vct_print(v) ga64.vct_print(v) self.assertTrue(True) def test_cct(self): cct: CCT = bl.magnets[15] p_cct = bl.trajectory.point_at( gantry.first_bending_part_length()+gantry.DL2+0.5).to_p3() + P3(1E-3, 1E-4, 1E-5) magnet_cpu = cct.magnetic_field_at(p_cct) kls, p0s = cct.global_current_elements_and_elementary_current_positions( numpy_dtype=numpy.float64) magnet_gpu_32 = ga32.current_element_B( kls.flatten(), p0s.flatten(), cct.total_disperse_number, p_cct, ) magnet_gpu_64 = ga64.current_element_B( kls.flatten(), p0s.flatten(), cct.total_disperse_number, p_cct, ) print(f"test_cct, diff_32={magnet_cpu-magnet_gpu_32}") print(f"test_cct, diff_64={magnet_cpu-magnet_gpu_64}") self.assertTrue((magnet_cpu-magnet_gpu_32).length() < 1e-6) self.assertTrue((magnet_cpu-magnet_gpu_64).length() < 1e-14) def test_qs(self): qs: QS = bl.magnets[23] p_qs = (bl.trajectory.point_at(gantry.first_bending_part_length()+gantry.DL2 + 1.19+gantry.GAP1+gantry.qs3_length/2).to_p3() + P3(10*MM, 10*MM, 10*MM)) magnet_cpu = qs.magnetic_field_at(p_qs) qs_data = numpy.array( qs.local_coordinate_system.location.to_list( ) + qs.local_coordinate_system.XI.to_list() + qs.local_coordinate_system.YI.to_list() + qs.local_coordinate_system.ZI.to_list() + [qs.length, qs.gradient, qs.second_gradient, qs.aperture_radius], dtype=numpy.float64) magnet_gpu_32 = ga32.magnet_at_qs( qs_data=qs_data, p3=p_qs ) magnet_gpu_64 = ga64.magnet_at_qs( qs_data=qs_data, p3=p_qs ) print(f"test_qs, diff_32={magnet_cpu-magnet_gpu_32}") print(f"test_qs, diff_64={magnet_cpu-magnet_gpu_64}") self.assertTrue((magnet_cpu-magnet_gpu_32).length() < 1e-6) self.assertTrue((magnet_cpu-magnet_gpu_64).length() < 1e-14) def test_magnet_at0(self): p_cct = bl.trajectory.point_at( gantry.first_bending_part_length()+gantry.DL2+0.5).to_p3() + P3(1E-3, 1E-4, 1E-5) magnet_cpu = bl.magnetic_field_at(p_cct) magnet_gpu_64 = ga64.magnet_at(bl, p_cct) magnet_gpu_32 = ga32.magnet_at(bl, p_cct) print(f"test_magnet_at0 diff32 = {magnet_cpu - magnet_gpu_32}") print(f"test_magnet_at0 diff64 = {magnet_cpu - magnet_gpu_64}") print(f"-- test_magnet_at0 all beamline--") print(f"magnet_cpu = {magnet_cpu}") print(f"magnet_gpu_32 = {magnet_gpu_32}") print(f"magnet_gpu_64 = {magnet_gpu_64}") # test_magnet_at0 diff32 = [-9.995611723045972e-08, -2.9023106392321585e-07, -2.0517209438075668e-06] # test_magnet_at0 diff64 = [-1.5404344466674047e-15, -2.1805474093028465e-15, 0.0] self.assertTrue((magnet_cpu-magnet_gpu_32).length() < 1e-5) self.assertTrue((magnet_cpu-magnet_gpu_64).length() < 1e-14) def test_magnet_at1(self): p_cct = bl.trajectory.point_at( gantry.first_bending_part_length()+gantry.DL2+0.5).to_p3() + P3(1E-3, 1E-4, 1E-5) magnet_cpu = bl.magnetic_field_at(p_cct) magnet_gpu_64 = ga64_b128.magnet_at(bl, p_cct) magnet_gpu_32 = ga32_b128.magnet_at(bl, p_cct) print(f"test_magnet_at1 diff32 = {magnet_cpu - magnet_gpu_32}") print(f"test_magnet_at1 diff64 = {magnet_cpu - magnet_gpu_64}") print(f"-- test_magnet_at0 all beamline--") print(f"magnet_cpu = {magnet_cpu}") print(f"magnet_gpu_32 = {magnet_gpu_32}") print(f"magnet_gpu_64 = {magnet_gpu_64}") # test_cct, diff_32=[2.5088516841798025e-07, -2.2562693963168456e-07, -4.375363960029688e-08] # test_cct, diff_64=[2.4424906541753444e-15, 9.43689570931383e-16, 8.881784197001252e-16] self.assertTrue((magnet_cpu-magnet_gpu_32).length() < 1e-5) self.assertTrue((magnet_cpu-magnet_gpu_64).length() < 1e-14) def test_magnet_at2(self): p_cct = bl.trajectory.point_at( gantry.first_bending_part_length()+gantry.DL2+0.5).to_p3() + P3(1E-3, 1E-4, 1E-5) magnet_cpu = bl.magnetic_field_at(p_cct) magnet_gpu_64 = ga64_b256.magnet_at(bl, p_cct) magnet_gpu_32 = ga32_b256.magnet_at(bl, p_cct) print(f"test_magnet_at2 diff32 = {magnet_cpu - magnet_gpu_32}") print(f"test_magnet_at2 diff64 = {magnet_cpu - magnet_gpu_64}") print(f"-- test_magnet_at0 all beamline--") print(f"magnet_cpu = {magnet_cpu}") print(f"magnet_gpu_32 = {magnet_gpu_32}") print(f"magnet_gpu_64 = {magnet_gpu_64}") # test_cct, diff_32=[2.5088516841798025e-07, -2.2562693963168456e-07, -4.375363960029688e-08] # test_cct, diff_64=[2.4424906541753444e-15, 9.43689570931383e-16, 8.881784197001252e-16] self.assertTrue((magnet_cpu-magnet_gpu_32).length() < 1e-5) self.assertTrue((magnet_cpu-magnet_gpu_64).length() < 1e-14) def test_magnet_at3(self): p_qs = (bl.trajectory.point_at(gantry.first_bending_part_length()+gantry.DL2 + 1.19+gantry.GAP1+gantry.qs3_length/2).to_p3() + P3(10*MM, 10*MM, 10*MM)) magnet_cpu = bl.magnetic_field_at(p_qs) magnet_gpu_64 = ga64.magnet_at(bl, p_qs) magnet_gpu_32 = ga32.magnet_at(bl, p_qs) print(f"test_magnet_at3 diff32 = {magnet_cpu - magnet_gpu_32}") print(f"test_magnet_at3 diff64 = {magnet_cpu - magnet_gpu_64}") print(f"-- test_magnet_at0 all beamline--") print(f"magnet_cpu = {magnet_cpu}") print(f"magnet_gpu_32 = {magnet_gpu_32}") print(f"magnet_gpu_64 = {magnet_gpu_64}") # test_magnet_at0 diff32 = [-2.2375529054596832e-08, -6.045702764800875e-08, -4.853957882300364e-07] # test_magnet_at0 diff64 = [4.0245584642661925e-16, -1.5959455978986625e-16, -3.608224830031759e-16] self.assertTrue((magnet_cpu-magnet_gpu_32).length() < 1e-5) self.assertTrue((magnet_cpu-magnet_gpu_64).length() < 1e-14) def test_magnet_at4(self): p_qs = (bl.trajectory.point_at(gantry.first_bending_part_length()+gantry.DL2 + 1.19+gantry.GAP1+gantry.qs3_length/2).to_p3() + P3(10*MM, 10*MM, 10*MM)) magnet_cpu = bl.magnetic_field_at(p_qs) magnet_gpu_64 = ga64_b128.magnet_at(bl, p_qs) magnet_gpu_32 = ga32_b128.magnet_at(bl, p_qs) print(f"test_magnet_at4 diff32 = {magnet_cpu - magnet_gpu_32}") print(f"test_magnet_at4 diff64 = {magnet_cpu - magnet_gpu_64}") print(f"-- test_magnet_at0 all beamline--") print(f"magnet_cpu = {magnet_cpu}") print(f"magnet_gpu_32 = {magnet_gpu_32}") print(f"magnet_gpu_64 = {magnet_gpu_64}") # test_magnet_at0 diff32 = [-2.2375529054596832e-08, -5.673173734954684e-08, -4.704946270361887e-07] # test_magnet_at0 diff64 = [4.0245584642661925e-16, -1.6653345369377348e-16, -3.608224830031759e-16] self.assertTrue((magnet_cpu-magnet_gpu_32).length() < 1e-5) self.assertTrue((magnet_cpu-magnet_gpu_64).length() < 1e-14) def test_magnet_at5(self): p_qs = (bl.trajectory.point_at(gantry.first_bending_part_length()+gantry.DL2 + 1.19+gantry.GAP1+gantry.qs3_length/2).to_p3() + P3(10*MM, 10*MM, 10*MM)) magnet_cpu = bl.magnetic_field_at(p_qs) magnet_gpu_64 = ga64_b256.magnet_at(bl, p_qs) magnet_gpu_32 = ga32_b256.magnet_at(bl, p_qs) print(f"test_magnet_at5 diff32 = {magnet_cpu - magnet_gpu_32}") print(f"test_magnet_at5 diff64 = {magnet_cpu - magnet_gpu_64}") print(f"-- test_magnet_at0 all beamline--") print(f"magnet_cpu = {magnet_cpu}") print(f"magnet_gpu_32 = {magnet_gpu_32}") print(f"magnet_gpu_64 = {magnet_gpu_64}") # test_magnet_at0 diff32 = [-2.2375529054596832e-08, -6.045702764800875e-08, -4.853957882300364e-07] # test_magnet_at0 diff64 = [3.885780586188048e-16, -1.5959455978986625e-16, -3.608224830031759e-16] self.assertTrue((magnet_cpu-magnet_gpu_32).length() < 1e-5) self.assertTrue((magnet_cpu-magnet_gpu_64).length() < 1e-14) def test_track(self): p = ParticleFactory.create_proton_along( bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 215 ) print(f"init p={p}") ParticleRunner.run_only(p, bl, 2, 10*MM) print(f"track cpu p={p}") p = ParticleFactory.create_proton_along( bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 215 ) ga32.track_one_particle_with_single_qs(bl, p, 2, 10*MM) print(f"track gpu32 p={p}") p = ParticleFactory.create_proton_along( bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 215 ) ga64_b512.track_one_particle_with_single_qs(bl, p, 2, 10*MM) print(f"track gpu64 p={p}") def test_track_particles(self): """ CPU finish,time = 8.32676386833191 s p=[6.9952707893127615, 2.8450465238850184, 0.0007167060845681786],v=[137902813.48249766, -106627503.84776628, 276209.6460511479],v0=174317774.94179922 p=[6.991651980302911, 2.84687609876132, 0.0007879913873482307],v=[137570793.41293398, -107061906.14231186, 295395.40762040997],v0=174317774.94179922 p=[7.000264413031172, 2.8482101363154277, 0.000664009633475038],v=[137484972.58009422, -107167541.8711567, 259612.51092981483],v0=174317774.94179922 p=[7.006462737769314, 2.8471267081000597, 0.0006521308980659168],v=[137727592.68641925, -106855557.66272765, 261035.38507345604],v0=174317774.94179922 p=[7.00360052384275, 2.844813163521684, 0.000672045260457023],v=[138068550.86893737, -106419283.32886522, 267163.18213671655],v0=174317774.94179922 GPU32 finish,time = 2.0207936763763428 s diff=p=[-1.8471008127463051e-06, 7.189076014491036e-07, -8.941999065281616e-08],v=[349.48249766230583, 368.15223371982574, -27.260198852105532],v0=-1.0582007765769958 diff=p=[-9.85242743389847e-07, -7.609035970190803e-07, -1.367907528022948e-07],v=[121.41293397545815, 125.85768814384937, -44.40487959003076],v0=-1.0582007765769958 diff=p=[1.1989198442918791e-06, -1.6289738788977104e-06, -1.3034790655717422e-07],v=[-131.4199057817459, -253.87115670740604, -41.06719518516911],v0=-1.0582007765769958 diff=p=[-1.7435844457125427e-06, -4.910729138885017e-07, -7.633820543479896e-08],v=[376.68641924858093, 378.33727234601974, -24.536801543959882],v0=-1.0582007765769958 diff=p=[-1.0272131580890687e-06, 1.485588944749594e-06, -1.6725037835631316e-07],v=[118.86893737316132, 308.6711347848177, -52.81786328344606],v0=-1.0582007765769958 GPU64 finish,time = 3.7295095920562744 s diff=p=[0.0, 0.0, -3.0249240612345574e-17],v=[-2.9802322387695312e-08, 0.0, -7.275957614183426e-09],v0=0.0 diff=p=[0.0, 0.0, 9.443400922348744e-17],v=[0.0, 1.4901161193847656e-08, 3.317836672067642e-08],v0=0.0 diff=p=[0.0, -4.440892098500626e-16, 1.7661653389788867e-16],v=[1.1920928955078125e-07, -1.4901161193847656e-08, 5.954643711447716e-08],v0=0.0 diff=p=[0.0, 0.0, 3.5344990823027445e-16],v=[2.9802322387695312e-08, -2.9802322387695312e-08, 1.1886004358530045e-07],v0=0.0 diff=p=[0.0, 4.440892098500626e-16, 8.716985466783456e-17],v=[1.7881393432617188e-07, 2.384185791015625e-07, 2.9802322387695312e-08],v0=0.0 """ BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main() ip = ParticleFactory.create_proton_along( bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 215 ) number = 5 length = 2 pp = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane( 3.5*MM, 7.5*MM, 0.0, number ) ps = ParticleFactory.create_from_phase_space_particles( ip, ip.get_natural_coordinate_system(), pp ) ps_copy_for_cpu = [p.copy() for p in ps] s = time.time() ParticleRunner.run_only(ps_copy_for_cpu, bl, length, 10*MM, number) print(f"CPU finish,time = {time.time()-s} s") for p in ps_copy_for_cpu: print(p) ps_copy_for_gpu32 = [p.copy() for p in ps] s = time.time() ga32.track_multi_particle_for_magnet_with_single_qs( bl, ps_copy_for_gpu32, length, 10*MM) print(f"GPU32 finish,time = {time.time()-s} s") for i in range(len(ps_copy_for_gpu32)): print(f"diff={ps_copy_for_cpu[i]-ps_copy_for_gpu32[i]}") ps_copy_for_gpu64 = [p.copy() for p in ps] s = time.time() ga64_b512.track_multi_particle_for_magnet_with_single_qs( bl, ps_copy_for_gpu64, length, 10*MM) print(f"GPU64 finish,time = {time.time()-s} s") for i in range(len(ps_copy_for_gpu64)): print(f"diff={ps_copy_for_cpu[i]-ps_copy_for_gpu64[i]}") def test_track_particles_multi_beamline(self): """ CPU time = 46.470869064331055 p=[7.347045605483152, -5.038190852364211, -0.008126687951838598],v=[153429.15883334717, -174317566.6083884, -222984.23247565638],v0=174317774.94179922 p=[7.334089667832231, -5.030501484758217, -0.008305356296718356],v=[-171544.4749936812, -175781388.6674571, -229869.1911368924],v0=175781619.95982552 p=[7.359979027555547, -5.0455122541805535, -0.007776454452929961],v=[457482.144593381, -172821389.29986903, -212754.63869286922],v0=172822122.75297824 p=[7.347965549877004, -5.036742197173884, -0.00680266146120651],v=[121395.76132774584, -174317594.97821772, -220906.61484601715],v0=174317774.94179922 p=[7.348650531739526, -5.0330294642527775, -0.00660374692590846],v=[227297.55545838206, -175781340.8954671, -217122.2981601591],v0=175781619.95982552 p=[7.34475355176842, -5.03989404652068, -0.007153724839178421],v=[-58106.42448355021, -172821965.44718364, -228721.36663409878],v0=172822122.75297824 p=[7.347558101890118, -5.0374728307509296, -0.007485100785398123],v=[138628.18010870926, -174317578.3359581, -222383.28587266372],v0=174317774.94179922 p=[7.341713322047886, -5.031883095258832, -0.007453546571116006],v=[39058.28246667987, -175781473.9719213, -223465.34914476663],v0=175781619.95982552 p=[7.3519134653687255, -5.042608451774057, -0.007566444532410116],v=[185849.5993363538, -172821879.26466498, -223066.98384745672],v0=172822122.75297824 GPU64 GPU64 time = 5.543462753295898 p=[7.3470456054831565, -5.038190852364211, -0.008126687951838529],v=[153429.1588334337, -174317566.6083884, -222984.232475657],v0=174317774.94179922 p=[7.334089667832242, -5.030501484758215, -0.008305356296717829],v=[-171544.4749934145, -175781388.6674571, -229869.19113686983],v0=175781619.95982552 p=[7.359979027555571, -5.045512254180555, -0.007776454452929699],v=[457482.14459405426, -172821389.29986906, -212754.6386928537],v0=172822122.75297824 p=[7.347965549877003, -5.03674219717388, -0.006802661461206674],v=[121395.76132773953, -174317594.97821763, -220906.61484602414],v0=174317774.94179922 p=[7.348650531739527, -5.033029464252767, -0.006603746925910345],v=[227297.55545837895, -175781340.89546695, -217122.2981601885],v0=175781619.95982552 p=[7.344753551768423, -5.039894046520685, -0.007153724839179523],v=[-58106.424483512936, -172821965.44718373, -228721.36663410708],v0=172822122.75297824 p=[7.347558101890107, -5.0374728307509296, -0.0074851007853986564],v=[138628.18010849392, -174317578.33595803, -222383.2858726776],v0=174317774.94179922 p=[7.341713322047882, -5.031883095258819, -0.007453546571116809],v=[39058.28246653917, -175781473.97192112, -223465.34914477202],v0=175781619.95982552 p=[7.3519134653687255, -5.042608451774063, -0.007566444532410439],v=[185849.59933636745, -172821879.26466507, -223066.98384745035],v0=172822122.75297824 diff=p=[4.440892098500626e-15, 0.0, 6.938893903907228e-17],v=[8.65256879478693e-08, 0.0, -6.111804395914078e-10],v0=0.0 diff=p=[1.0658141036401503e-14, 1.7763568394002505e-15, 5.273559366969494e-16],v=[2.666783984750509e-07, 0.0, 2.255546860396862e-08],v0=0.0 diff=p=[2.398081733190338e-14, -1.7763568394002505e-15, 2.6194324487249787e-16],v=[6.732880137860775e-07, -2.9802322387695312e-08, 1.5512341633439064e-08],v0=0.0 diff=p=[-8.881784197001252e-16, 3.552713678800501e-15, -1.6393136847980827e-16],v=[-6.315531209111214e-09, 8.940696716308594e-08, -6.984919309616089e-09],v0=0.0 diff=p=[8.881784197001252e-16, 1.0658141036401503e-14, -1.884777056648801e-15],v=[-3.1141098588705063e-09, 1.4901161193847656e-07, -2.9423972591757774e-08],v0=0.0diff=p=[2.6645352591003757e-15, -5.329070518200751e-15, -1.1015494072452725e-15],v=[3.727473085746169e-08, -8.940696716308594e-08, -8.294591680169106e-09],v0=0.0 diff=p=[-1.0658141036401503e-14, 0.0, -5.334274688628682e-16],v=[-2.1533924154937267e-07, 5.960464477539063e-08, -1.3882527127861977e-08],v0=0.0 diff=p=[-3.552713678800501e-15, 1.2434497875801753e-14, -8.031769693772617e-16],v=[-1.407024683430791e-07, 1.7881393432617188e-07, -5.384208634495735e-09],v0=0.0 diff=p=[0.0, -6.217248937900877e-15, -3.226585665316861e-16],v=[1.3649696484208107e-08, -8.940696716308594e-08, 6.373738870024681e-09],v0=0.0 """ BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main() bl1 = HUST_SC_GANTRY().create_beamline() bl2 = HUST_SC_GANTRY(qs3_gradient=7).create_beamline() bl3 = HUST_SC_GANTRY(qs3_gradient=0).create_beamline() p1 = ParticleFactory.create_proton_along( bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 215 ) p2 = ParticleFactory.create_proton_along( bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 220 ) p3 = ParticleFactory.create_proton_along( bl.trajectory, gantry.first_bending_part_length() + gantry.DL2, 210 ) ps_cpu1 = [p1.copy(), p2.copy(), p3.copy()] ps_cpu2 = [p1.copy(), p2.copy(), p3.copy()] ps_cpu3 = [p1.copy(), p2.copy(), p3.copy()] ps_gpu32 = [p1.copy(), p2.copy(), p3.copy()] ps_gpu64 = [p1.copy(), p2.copy(), p3.copy()] print("CPU") s = time.time() ParticleRunner.run_only(ps_cpu1, bl1, 10, 20*MM, 6) ParticleRunner.run_only(ps_cpu2, bl2, 10, 20*MM, 6) ParticleRunner.run_only(ps_cpu3, bl3, 10, 20*MM, 6) print(f"CPU time = {time.time()-s}") for p in ps_cpu1+ps_cpu2 + ps_cpu3: print(p) print("GPU64") s = time.time() ps_end = ga64_b512.track_multi_particle_beamlime_for_magnet_with_single_qs( [bl1, bl2, bl3], ps_gpu64, 10, 20*MM ) print(f"GPU64 time = {time.time()-s}") for ps in ps_end: for p in ps: print(p) for gid in range(3): for pid in range(3): print(f"diff={ps_end[gid][pid]-(ps_cpu1+ps_cpu2 + ps_cpu3)[gid*3+pid]}") # Test().test_track_particles_multi_beamline() unittest.main(verbosity=1)
import mysvg import os import mine def generate_codes(svg_file = None, output_dir = "/home/peter/ink"): filename = "/home/peter/setup/ref/verbs" with open(filename, "r") as f: datainput = f.readlines() skip_list = mine.get_files("/home/peter/ink_similar", "png") skip_list = [os.path.splitext(afile)[0] for afile in skip_list] ret = {} for aline in datainput: if "nopref" not in aline or "filter" not in aline or "inkscape" not in aline: continue code = aline.split(":")[0] code_id = code.split(".")[-2] if code_id in skip_list: continue ret[code_id] = code return ret def generate(svg_file, output_dir = "/home/peter/ink"): os.makedirs(output_dir, exist_ok=True) if not svg_file: svg_file = mysvg.get_template() codes = mine.getp("verb_codes", generate_codes) for code_id, code in codes.items(): if code_id != "f008": continue print(f"code : {code }") continue output=f"{output_dir}/{code_id}.png" # os.system(f"inkscape \ os.system(f"/home/peter/gits/inkscape/build/install_dir/bin/inkscape \ --with-gui\ --batch-process\ --verb=\"EditSelectAll;EditSelectAll;{code}\"\ --export-type=png \ --export-filename={output} {svg_file}") if __name__ == '__main__': # codes = mine.getp("verb_codes") # print(f"codes : {codes }") generate("/mnt/c/Home/clean_lower_a.svg", "/home/peter/ink_letter_a_f008")
import re from django import forms from django.contrib.auth.models import User from django.utils.translation import ugettext as _ from datetime import timedelta from django.utils import timezone from django.utils.crypto import get_random_string from apps.cprofile.models import CProfile class SignupForm(forms.Form): username = forms.CharField() email = forms.EmailField() password = forms.CharField(widget=forms.PasswordInput()) fullname = forms.CharField(max_length=128) phone = forms.CharField() accept_terms = forms.BooleanField(label=_("I agree to the terms and conditions")) def clean_username(self): data = self.cleaned_data try: User.objects.get(username=data['username']) except User.DoesNotExist: return data['username'] raise forms.ValidationError(_("This username already exists.")) def clean_email(self): data = self.cleaned_data try: User.objects.get(email=data['email']) except User.DoesNotExist: return data['email'] raise forms.ValidationError(_("This email already exists.")) def clean_phone(self): data = self.cleaned_data return data['phone'] def save(self, request): data = self.cleaned_data user = User.objects.create_user( first_name=data['fullname'], last_name=data['phone'], username=data['username'], email=data['email'], password=data['password'], is_active=False, ) activation_key = get_random_string(64).lower() key_expires = timezone.now() + timedelta(days=1) profile = CProfile.objects.create( user=user, activation_key=activation_key, key_expires=key_expires, ) profile.send_email_verified(request) return True
''' Video filters with support for motion-compensation ''' import cv2 # type: ignore # Tell MyPy to ignore missing type hints import numpy as np from multiprocessing import Pool from typing import Iterable from mcvf import motion_estimation class Filter: ''' Base class for video filters ''' def __init__(self): pass def filter_frames(self, frames: list[np.ndarray]) -> Iterable[np.ndarray]: ''' Parse the given list of frames and return a new list of filtered ones Parameters ---------- frames : list[np.ndaray] A list of frames (as NumPy arrays) to filter Returns ------- frames : list[np.ndarray] A list of filtered frames ''' pass class MCFilter(Filter): ''' Base class for motion-compensated video filters ''' def __init__(self, block_size: int): ''' Parameters ---------- block_size : int The size in pixel of the blocks in which the frames will be subdivided motion_threshold : int The motion vector strength above which there will be considered to be movement ''' super() self.block_size = block_size def filter_frames(self, frames: list[np.ndarray]) -> Iterable[np.ndarray]: ''' Parse the given list of frames contextually with a Motion Field and return a new list of filtered ones Parameters ---------- frames : list[np.ndaray] A list of frames (as NumPy arrays) to filter Returns ------- frames : list[np.ndarray] A list of filtered frames ''' pass class GaussianFilter(Filter): ''' Low-Pass Gaussian blur ''' def __init__(self): self.kernel = np.array([ [1, 4, 7, 4, 1], [4, 16, 26, 16, 4], [7, 26, 41, 26, 7], [4, 16, 26, 16, 4], [1, 4, 7, 4, 1] ])/273 def filter_frames(self, frames: list[np.ndarray]) -> Iterable[np.ndarray]: ''' Apply a gaussian blur with 5x5 kernel Parameters ---------- frames : list[np.ndaray] A list of frames (as NumPy arrays) to filter Returns ------- frames : list[np.ndarray] A list of filtered frames ''' if len(frames) == 0: return [] if frames[0].ndim != 3: raise ValueError("Frame arrays are expected to have 3 dimensions") with Pool() as p: return p.map(self._filter_frame, frames) def _filter_frame(self, frame: np.ndarray): new_frame = np.ndarray(shape=frame.shape, dtype=frame.dtype) if True: # Faster OpenCV implementation return cv2.GaussianBlur(frame, (5, 5), 1) else: # Slower self-implementation height, width, channels = frame.shape new_frame = np.ndarray(shape=frame.shape, dtype=frame.dtype) kh, kw = self.kernel.shape[0]//2, self.kernel.shape[1]//2 for x in range(kw, width-kw): for y in range(kh, height-kh): pixel = [0, 0, 0] for kern_x in range(-kw, kw): for kern_y in range(-kh, kh): pixel += frame[y+kern_y, x+kern_x]*self.kernel[kh+kern_y][kw+kern_x] new_frame[y, x] = pixel return new_frame class MCGaussianFilter(MCFilter): ''' Motion-Compensated Low-Pass gaussian blur ''' def __init__(self, block_size: int): super().__init__(block_size) def filter_frames(self, frames: list[np.ndarray]) -> Iterable[np.ndarray]: ''' Apply a 5x5 gaussian blur to the frames where motion is not present Parameters ---------- frames : list[np.ndaray] A list of frames (as NumPy arrays) to filter Returns ------- frames : list[np.ndarray] A list of filtered frames ''' BBME = motion_estimation.BBME(frames, block_size=self.block_size) MF = BBME.calculate_motion_field() if len(frames) - 1 != len(MF): raise ValueError("Size mismatch: %d (-1) frames / %d MFs" % (len(frames), len(MF))) yield frames[0] for frame, mf in zip(frames[1:], MF): yield self._filter_frame(frame, mf) def _filter_frame(self, frame: np.ndarray, mf: np.ndarray): bh, bw = mf.shape bs = self.block_size for bx in range(bw): for by in range(bh): v = mf[by, bx] if v.origin_x == v.target_x and v.origin_y == v.target_y: x, y = bx*bs, by*bs frame[y:y+bs, x:x+bs] = cv2.GaussianBlur(frame[y:y+bs, x:x+bs], (5, 5), 1) return frame class MCDarkenFilter(MCFilter): ''' Motion-Compensated darkening filter ''' def __init__(self, block_size: int): super().__init__(block_size) def filter_frames(self, frames: list[np.ndarray]) -> Iterable[np.ndarray]: ''' Darken the frame areas where motion is not present Parameters ---------- frames : list[np.ndaray] A list of frames (as NumPy arrays) to filter Returns ------- frames : list[np.ndarray] A list of filtered frames ''' BBME = motion_estimation.BBME( frames, block_size=self.block_size, window_size=3, algorithm='EBBME' ) MF = BBME.calculate_motion_field() if len(frames) - 1 != len(MF): raise ValueError("Size mismatch: %d (-1) frames / %d MFs" % (len(frames), len(MF))) yield frames[0] for frame, mf in zip(frames[1:], MF): yield self._filter_frame(frame, mf) def _filter_frame(self, frame: np.ndarray, mf: np.ndarray): bh, bw = mf.shape bs = self.block_size for bx in range(bw): for by in range(bh): v = mf[by, bx] if v.origin_x == v.target_x and v.origin_y == v.target_y: x, y = bx*bs, by*bs frame[y:y+bs, x:x+bs] = frame[y:y+bs, x:x+bs]//3 return frame class MFDrawerFilter(Filter): ''' A drawer filter to render motion vectors onto each frame Attributes ---------- block_size : int The size in pixel of the blocks in which the frames will be subdivided ''' def __init__(self, block_size: int): ''' Parameters ---------- block_size : int The size in pixel of the blocks in which the frames will be subdivided ''' self.block_size = block_size def filter_frames(self, frames: list[np.ndarray]) -> Iterable[np.ndarray]: ''' Overlay a needle diagram to each frame showing its motion field Parameters ---------- frames : list[np.ndaray] A list of frames (as NumPy arrays) to filter Returns ------- frames : list[np.ndarray] A list of filtered frames ''' BBME = motion_estimation.BBME( frames, block_size=self.block_size, window_size=15, algorithm='2DLS' ) yield frames[0] for frame, mf in zip(frames[1:], BBME.calculate_motion_field()): new_f = frame for row in mf: for vector in row: new_f = cv2.arrowedLine( new_f, (vector.origin_x, vector.origin_y), (vector.target_x, vector.target_y), (0, 0, 200), thickness=1, tipLength=0.2 ) yield new_f class MCMovingAvergeFilter(MCFilter): def __init__(self, block_size: int): super().__init__(block_size) def filter_frames(self, frames: list[np.ndarray]) -> Iterable[np.ndarray]: BBME = motion_estimation.BBME( frames, block_size=self.block_size, window_size=15, algorithm='2DLS' ) self.frames: list[np.ndarray] = list(frames) MF = BBME.calculate_motion_field() self.mf_map: list[dict] = list([{}] + [self._map_MF(mf) for mf in MF]) with Pool() as p: return [self.frames[0]] + p.map( self._filter_frame, [i for i in range(1, len(self.frames))] ) # for i, f in enumerate(frames): # print("%.2f%%" % (100*i/len(self.frames)), end='\r') # if i == 0: # continue # yield self._filter_frame(i) # print() def _map_MF(self, mf: list[np.ndarray]) -> dict: mf_map = {} bs = self.block_size for row in mf: for v in row: A = (v.origin_x//bs, v.origin_y//bs) B = (v.target_x//bs, v.target_y//bs) if A[0] != B[0] or A[1] != B[1]: mf_map[B] = A return mf_map def _filter_frame(self, f_idx: int) -> np.ndarray: h, w, c = self.frames[0].shape new_f = np.ndarray(shape=(h, w, c), dtype=self.frames[0].dtype) for y in range(h): for x in range(w): new_f[y, x] = self._filter_pixel(f_idx, x, y) return new_f def _filter_pixel(self, f_idx: int, x: int, y: int) -> np.ndarray: alpha = 0.2 N = min(4, f_idx) res = self.frames[f_idx][y, x] * (1 - alpha) tmp = np.array([0, 0, 0]) target = (x, y) for n in range(N): if target in self.mf_map[f_idx - n]: target = self.mf_map[f_idx - n][target] tx, ty = target for ch in range(3): tmp[ch] += self.frames[f_idx - n - 1][ty, tx][ch] * 1/N res += tmp * alpha return res
import numpy as np import torch import pandas as pd from matplotlib import pyplot as plt import sys import dataloader # TODO figsize argument HALF_FIGSIZE = 113 def save_figures(X, y, savename): X_, y_ = X.squeeze(dim=1).detach().cpu().numpy(), y.detach().cpu().numpy() fig = plt.figure(figsize=(12, 12)) fig.subplots_adjust( left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05 ) for i in range(16): axis = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[]) axis.imshow(X_[i]) points = np.vstack(np.split(y_[i], 15)).T * HALF_FIGSIZE + HALF_FIGSIZE axis.plot(points[0], points[1], 'o', color='red') fig.savefig(savename) def generate_csv(output, filepath='data/IdLookupTable.csv'): lookid_data = pd.read_csv(filepath) lookid_list = list(lookid_data['FeatureName']) imageID = list(lookid_data['ImageId'] - 1) pre_list = list(output.cpu().numpy()) rowid = list(lookid_data['RowId']) feature = [] for f in list(lookid_data['FeatureName']): feature.append(lookid_list.index(f)) preded = [] for x, y in zip(imageID, feature): preded.append(pre_list[x][y]) rowid = pd.Series(rowid, name='RowId') loc = pd.Series(preded, name='Location') # TODO debug here, is it okay to keep 48 to submit ? submission = pd.concat([rowid, loc * 48 + 48], axis=1) submission.to_csv('submission.csv', index=False)
class configuration(object): def __init__(self): s = self s.server = 'irc.twitch.tv' s.port = 6667 s.readbuffer = "" #Edit according to your details s.channel = "#enter Chat Channel Here" s.user = "Your Bots User Name" s.passw = "oauth:The oath key"
# -*-coding=utf-8-*- import time __author__ = 'Rocky' import json from Queue import Queue # from multiprocessing import Queue def base_usage(): a = [1, 2, 3, 4] b = [54, 3, 2, 1, 2] c = a + b print(c) print(set(c)) d = list(set(c)) print(d) print({}.fromkeys(c).keys()) x= d.index(54) print(x) # for i in x: # print(i) urlss = ['http://bj.xiaozhu.com/search-duanzufang-p{}-0/'.format(str(i)) for i in range(1, 6, 1)] print(urlss) for i in urlss: print(i) record = [json.loads(line) for line in open('json.txt')] print(record) print('\n') for line in open('json.txt'): print(line) print("\n") def getCount(strings): counts = {} for x in strings: if x in counts: counts[x] += 1 else: counts[x] = 1 return counts def empty_test(): my_list = [0, 0, 0, 0, 0, 0] if 0 in my_list: print('empty') my_list = [] print(type(my_list)) if len(my_list) == 0: print("it was None") def modify_list(): lst = list("IamRocky") print(lst) for i, strs in enumerate(lst): print("Index", i, "content", strs) def in_test(): a = ['sam', 'disk', 'jacky', 'homeless'] b = 'jacky' if b in a: print(b) else: print("not there") def delete_item_list(): x = [[1, 2], [3, 4], [5, 6], [7, 8]] for i in x: # print(i) if 5 in i: x.remove(i) print(x) def generator_list(): g = (sum(i) for i in [(1, 2, 3), (4, 5, 6), (7, 8, 9)]) h = [sum(i) for i in [(1, 2, 3), (4, 5, 6), (7, 8, 9)]] print(type(g)) print(type(h)) print(g) #print(g.get(1)) for i in g: print(i) print(h) def iter_test(): a = [1, 2, 3, 4, 5, 6] i = iter(a) ''' for x in i: print(x) print(i) ''' while 1: try: print(next(i)) print('while') except Exception as e: print(e) break print(i) while 1: try: print(next(i)) #不会有任何输出,因为你已经在上一个循环中迭代完成了,位置已经指向最后。 except Exception as e: print(e) break def rang_test(): ''' for v in range(1000000000000): #possible Memory Error if v == 2: break ''' for v in xrange(100000): #fine if v == 2: break def generator_test(a): #a=0 i = 0 while i < a: yield i * i i = i + 1 def use_generator(): ''' for i in generator_list(10): print(i) ''' #x=generator_test(10) #print(x) for i in generator_test(10): print(i) def cut_case(): #y=[1,2,3,4,5,6] y = range(0, 200) print(y[2:10]) print(y[190:]) def iter_case2(): q = Queue() org = ['a', 'b', 'c', 'd', 'e', 'f', 'not see this'] for i in org: q.put(i) for j in iter(q.get, 'd'): print(j) def mutebale(): a = 2 print(id(a)) a = 5 print(id(a)) x = 'abc' y = x.replace('a', 'A') print(x) print(y) def in_usage(): l = range(100000) if 100000 in l: print("In") else: print("Not in") city='qd' if city in ['bj','qd']: print('city in ') else: print('not in') def reversed_usage(): s = 'Python' q = reversed(s) print(q) print(type(q)) print(list(q)) def remove_list(): l = [1, 2, 3, 4, 5] x = l.remove(1) print(x) print(l) x = l.pop(0) print(x) print(l) def extendList(val, list=[]): list.append(val) return list def extend_case(): l1=[] l2=[1,2,3] l1.extend(l2) print(l1) time.sleep(100) list1 = extendList(10) list2 = extendList(123, []) list3 = extendList('a') print("list1 = %s" % list1) print("list2 = %s" % list2) print("list3 = %s" % list3) list = [[]] * 5 print(list) print(len(list)) list[0].append(10) print(list) list[1].append(20) print(list) list.append(30) print(list) def list_filter(): l = [12, 22, 43, 23, 65, 34, 22, 33, 55, 22, 11, 2, 3, 5, 7] l1 = l[2:8:2] print(l1) print(id(l)) print(id(l1)) def list_change(): coin_list = ['IFC', 'DOGE', 'EAC', 'DNC', 'MET', 'ZET', 'SKT', 'YTC', 'PLC', 'LKC', 'JBC', 'MRYC', 'GOOC', 'QEC', 'PEB', 'XRP', 'NXT', 'WDC', 'MAX', 'ZCC', 'HLB', 'RSS', 'PGC', 'RIO', 'XAS', 'TFC', 'BLK', 'FZ', 'ANS', 'XPM', 'VTC', 'KTC', 'VRC', 'XSGS', 'LSK', 'PPC', 'ETC', 'GAME', 'LTC', 'ETH', 'BTC'] l1 = map(lambda x: x.lower(), coin_list) print(coin_list) print(l1) with open('coin_list.cfg', 'w') as f: for i in l1: f.write(i) coin_name = {'zet': u'泽塔币', 'doge': u'狗狗币', 'eac': u'地球币', 'dnc': u'暗网币', 'rio': u'里约币', 'blk': u'黑币', 'ifc': u'无限币', 'met': u'美通币', 'gooc': u'谷壳币', 'jbc': u'聚宝币', 'pgc': u'乐通币', 'lsk': u'LISK', 'tfc': u'传送币', 'xpm': u'质数币', 'nxt': u'未来币', 'ppc': u'点点币', 'ktc': u'肯特币', 'mtc': u'猴宝币', 'skt': u'鲨之信', 'btc': u'比特币', 'peb': u'普银币', 'ltc': u'莱特币', 'xsgs': u'雪山古树', 'eth': u'以太坊', 'vtc': u'绿币', 'bts': u'比特股', 'hlb': u'活力币', 'zcc': u'招财币', 'etc': u'以太经典', 'qec': u'企鹅币', 'fz': u'冰河币', 'plc': u'保罗币', 'max': u'最大币', 'ytc': u'一号币', 'xrp': u'瑞波币', 'lkc': u'幸运币', 'wdc': u'世界币', 'vrc': u'维理币', 'rss': u'红贝壳', 'ans': u'小蚁股', 'xas': u'阿希比', 'game': u'游戏点', 'mryc': u'美人鱼币', 'ugt': u'UG Token', 'ico': u'ICO币', 'tic': u'钛币', 'mcc': u'行云币', 'eos': u'EOS' } cn = json.dumps(coin_name) print(type(cn)) with open('coin_list.cfg', 'w') as f: f.write(cn) with open('coin_list.cfg', 'r') as rf: s = rf.read() dic = json.loads(s) print(type(dic)) print(dic) print(coin_name) for k, v in dic.items(): print(k, v) def index_usage(): l1=['R','O','C','K','Y'] # wrong usage print(l1.index) for i in l1.index: print(i) def change_value(): a=[1,2,3,4,5,6,7,8] print(a) for i in a: if i==4: i=0 # a doesn't change print(a) def sort_case(): x=[2,5,3,11,43,33,99,100,66,44] print(x.sort()) print(x) y=[10,11,12,13,11,14,10,9] y.reverse() print(y) print(x[::-1]) print(x) def mutable(): b=[1,2,3] a=(1,2,3,4,b) print(a[4]) b[1]=99 print(a) print(a[4]) # reverse def slice_case(): x= range(10) print(x) y=x[::-1] print(y) def list_generator(): string = ['uk','hello world','china','jp','usa','canada'] country =[ x.upper() for x in string if len(x) > 2] print(country) t1=range(1,11) t2=range(11,21) t=[t1,t2] s = [n2 for n1 in t for n2 in n1 if n2%2==0] print(s) def set_case(): x={1,2,3,4} print(x) x.add(4) print(x) def dict_map(): string=['China','USA','Japan','Hollan'] dict_index = {value:index for index,value in enumerate(string)} print(dict_index) # in_test() #delete_item_list() #generator_list() #iter_test() #rang_test() #use_generator() #cut_case() #iter_case2() #mutebale() #in_usage() # base_usage() #reversed_usage() #remove_list() # list_filter() #list_change() #extend_case() # index_usage() # change_value() #sort_case() # mutable() #slice_case() # recordx=['a','b','c','a','b'] # r = 'hello' # count=getCount(r) # print(count) # print(count.get('a')) # modify_list() # empty_test() # list_generator() # set_case() dict_map()
# -*-coding:utf8-*- __author__ = 'user' import threading import time import Queue import socket # 模拟两个队列,设备监听队列和网络监听队列 # Producer为设备监听队列的生产者,网络监听队列的消费者 # Consumer为设备监听队列的消费者,网络监听队列的生产者 net_queue = Queue.Queue() dev_queue = Queue.Queue() # mylock = threading.RLock() import logging # 配置日志文件格式 logging.basicConfig(level=logging.DEBUG, format = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt= '%a, %d %b %Y %H:%M:%S', filename= 'myproject.log', filemode='a') s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect(('127.0.0.1', 9999)) except BaseException: print 'can not connect to the local host' class Producer(threading.Thread): def __init__(self,net_que,dev_que,sock): threading.Thread.__init__(self) self._net_que = net_que self._dev_que = dev_que self.sock = sock def run(self): while True: str_rec = self.sock.recv(1024) if str_rec != '': if self._dev_que.qsize() > 100: pass else: self._dev_que.put({ 'from':self.name, 'data':str_rec }) print '存入 %s 设备监听队列 queue 还有%d' % (str_rec, self._dev_que.qsize()) # self._dev_que.task_done() if self._net_que.qsize(): net_order = self._net_que.get(timeout=1) try: self.sock.send(net_order) print '发送命令 %s,网络监听队列还剩 %d' % (net_order, self._net_que.qsize()) except BaseException: print '命令 %s 未发送,网络监听队列还剩 %d' % (net_order, self._net_que.qsize()) time.sleep(0.1) print 'the net thread is still working' logging.info('the net thread is still working') class Consumer(threading.Thread): def __init__(self,net_que,dev_que): threading.Thread.__init__(self) self._net_que = net_que self._dev_que = dev_que self._time = time.time() def run(self): while True: if self._net_que.qsize() > 100: pass else: if time.time() - self._time > 5: self._net_que.put('laoxu') print '存入 laoxu 到网络监听队列'+" queue 还剩%d" % self._net_que.qsize() self._time = time.time() if self._dev_que.qsize(): print '执行命令'+self._dev_que.get(timeout=1).get('data')+' 设备监听队列还剩 %d' % self._dev_que.qsize() time.sleep(0.1) print 'the device queue is still working' logging.info('the device queue is still working') def test(): for i in range(5): dev_queue.put({'from':threading.current_thread().name, 'data':str(i) }) net_queue.put('xuxubin'+str(i)) print '%s=>队列' % threading.current_thread().name for i in range(1): p = Producer(net_queue,dev_queue,s) p.start() for i in range(1): c = Consumer(net_queue,dev_queue) c.start() if __name__ == '__main__': test()
#!/usr/bin/env python3 # Required packages # - paho-mqtt # - flask # - flask-Restful # - flask-Cors # - requests # - pymongo # - jsonpatch # - flask-HTTPAuth VERSION = "1.10" import time from threading import Thread from mqtt.CommandIO import CommandIO from rest.RestIO import RestIO from control.Injector import Injector from control.WorkerControl import WorkerControl from data.StorageIO import StorageIO from data.ArchitectureIO import ArchitecureIO print("Starting... (v" + VERSION + ")") # Storage storage = StorageIO() architectureIO = ArchitecureIO() # Worker workerControl = WorkerControl(CommandIO(), storage) t = Thread(target=workerControl.run) t.start() Injector.inject(workerControl, storage, architectureIO) restIO = RestIO() # Never reached print("Bye")
# Relative path of directory for uploaded files UPLOAD_DIR = 'uploads/' app.config['UPLOAD_FOLDER'] = UPLOAD_DIR app.secret_key = 'MySecretKey' if not os.path.isdir(UPLOAD_DIR): os.mkdir(UPLOAD_DIR) # Allowed file types for file upload ALLOWED_EXTENSIONS = set(['txt', 'csv', 'dat', 'npy']) def allowed_file(filename): """Does filename have the right extension?""" return '.' in filename and \ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
import requests data = { 'name':'zjx', 'age':23 } response = requests.get('http://httpbin.org/get',params=data) print(response.url) print(response.text)
from ._harfbuzz import *
""" // Time Complexity : As mentioned in each function // Space Complexity : O(n) // Did this code successfully run on Leetcode : problem not there on Leetcode // Any problem you faced while coding this : No """ class ListNode: """ A node in a singly-linked list. """ def __init__(self, data=None, next=None): self.data = data self.next = next class SinglyLinkedList: def __init__(self): """ Create a new singly-linked list. Takes O(1) time. """ self.head = None def append(self, data): """ Insert a new element at the end of the list. Takes O(n) time. """ node = ListNode(data) #print(node.data) if self.head==None: #if the linkedlist is empty, make the new node the head self.head=node else: ptr = self.head #else iterate over the entire list to get to the end, where the next variable for the node is None while ptr.next: ptr=ptr.next ptr.next=node #attach the node at the last node #print(ptr.next) def find(self, key): """ Search for the first element with `data` matching `key`. Return the element or `None` if not found. Takes O(n) time. """ if self.head==None: # if the linked list is empty, return None return None ptr = self.head #print(ptr.next.data) while ptr.next != None: #else iterate over the lit until element is found and return the element #print("here") if ptr.data==key: #print("found") return ptr ptr=ptr.next return None def remove(self, key): """ Remove the first occurrence of `key` in the list. Takes O(n) time. """ if self.head.data==key: #if the first element is the key, remove it by making the head point to the element next in sequence self.head=self.head.next else: ptr=self.head #else iterate over the list, keeping track of the next element while ptr.next.next: if ptr.next.data==key:#if the next element is the key, remove it by breaking the link ptr.next=ptr.next.next break ptr=ptr.next
# -*- coding: utf-8 -*- import config import logging import json import requests im_url=config.IM_RPC_URL def post_peer_message(appid, sender, receiver, content): params = { "appid":appid, "receiver":receiver, "sender":sender } url = im_url + "/post_peer_message" res = requests.post(url, data=content.encode("utf-8"), params=params) return res def post_group_message(appid, sender, receiver, content): params = { "appid":appid, "sender":sender, "receiver":receiver, } url = im_url + "/post_group_message" res = requests.post(url, data=content.encode("utf-8"), params=params) return res def post_group_notification_s(appid, gid, notification, members): url = im_url + "/post_group_notification" obj = { "appid": appid, "group_id": gid, "notification":notification } if members: obj["members"] = members headers = {"Content-Type":"application/json"} data = json.dumps(obj) resp = requests.post(url, data=data, headers=headers) if resp.status_code != 200: logging.warning("send group notification error:%s", resp.content) else: logging.debug("send group notification success:%s", data) return resp def post_group_notification(appid, gid, op, members): try: return post_group_notification_s(appid, gid, json.dumps(op), members) except Exception as e: logging.warning("send group notification err:%s", e) return None def send_group_notification(appid, gid, op, members): return post_group_notification(appid, gid, op, members) def post_peer_notification(appid, uid, content): params = { "appid":appid, "uid":uid } url = im_url + "/post_notification" headers = {"Content-Type":"text/plain; charset=UTF-8"} resp = requests.post(url, data=content.encode("utf8"), headers=headers, params=params) return resp def post_system_message(appid, uid, content): params = { "appid":appid, "uid":uid } url = im_url + "/post_system_message" headers = {"Content-Type":"text/plain; charset=UTF-8"} resp = requests.post(url, data=content.encode("utf8"), headers=headers, params=params) return resp def post_room_message(appid, uid, room_id, content): params = { "appid":appid, "uid":uid, "room":room_id } url = im_url + "/post_room_message" headers = {"Content-Type":"text/plain; charset=UTF-8"} resp = requests.post(url, data=content.encode("utf8"), headers=headers, params=params) return resp def get_offline_count(appid, uid, platform_id, device_id): obj = { "appid":appid, "uid":uid, "device_id":device_id, "platform_id":platform_id } url = im_url + "/get_offline_count" logging.debug("url:%s", url) headers = {"Content-Type":"application/json"} res = requests.get(url, params=obj, headers=headers) if res.status_code != 200: return 0 else: r = json.loads(res.content) return r["data"]["count"]
#!/usr/bin/env python import sys import re # input comes from STDIN (standard input) for line in sys.stdin: try: #sometimes bad data can cause errors use this how you like to deal with lint and bad data FiveDigitCode = "Z" #default sorted as first countyname = "Z" #default sorted as first State2digit = "Z" #default sorted as first Population = "Z" #default sorted as first # remove leading and trailing whitespace line = line.strip() splits = line.split(",") # finds lines from DataSet.txt, outputs first 2 items as array if line.count(",")>5: splits = line.split(",", 2)[0:2] FiveDigitCode = splits[0] Population = splits[1] # removes the country and the state lines from DataSet if re.match('..000',splits[0]): splits= "" else: #do Fips # finds lines from Fips_CountyName.txt, cleans them, outputs 3 items as array # replaces a comma followed by space (state) w/comma splits = line.replace(', ',',') # replaces all remaining spaces w/comma splits = splits.replace(" ",",",1) splits = splits.split(",") FiveDigitCode = splits[0] countyname = splits[1] State2digit = splits[2] print '%s^%s^%s^%s' % (FiveDigitCode,countyname,State2digit,Population) except: #errors are going to make your job fail which you may or may not want pass
#!/usr/bin/env python3 import time from chatterbot import filters from chatterbot import ChatBot from chatterbot import response_selection from chatterbot import comparisons from spade.agent import Agent from spade.behaviour import FSMBehaviour, State from spade.message import Message from spade.template import Template import logging RECIEVE_STATE = "RECIEVE_STATE" SEND_STATE = "SEND_STATE" END_STATE = "END_STATE" logging.basicConfig(level=logging.CRITICAL) user_question = "" #sentiment_comparison, jaccard_similarity, synset_distance, levenshtein_distance bot = ChatBot('ChatAgent', storage_adapter='chatterbot.storage.SQLStorageAdapter', logic_adapters=[ {'import_path':'chatterbot.logic.BestMatch', "statement_comparison_function": comparisons.jaccard_similarity, "response_selection_method": response_selection.get_random_response, 'default_response':'I am sorry but I do not know what to say, ask me later'}], database_uri='sqlite:///db.sqlite3', read_only=True, filters=[filters.get_recent_repeated_responses] ) class BotBehaviour(FSMBehaviour): async def on_start(self): print(f"Agent starting at the initial state {self.current_state}") async def on_end(self): print(f"Agent finished at state {self.current_state}") class EndState(State): async def run(self): print("Agent is shutting down.") self.kill() class RecieveState(State): async def run(self): global user_question print("Still thinking!") self.msg = await self.receive(timeout=20) if(self.msg): #if message isn't empty, and has passed the template it should be the message we need if self.msg.body != "" and self.msg.body!="failure": response = bot.get_response(user_question) print (response) self.set_next_state(SEND_STATE) else: self.set_next_state(RECIEVE_STATE) else: print("Sorry, can't think of anything,really") self.set_next_state(SEND_STATE) class SendState(State): async def run(self): global user_question minimal_confidence = 0.2 user_input = input("What would you like to talk about? ") if (user_input != "") and (user_input.lower()!="exit"): response = bot.get_response(user_input) user_question = user_input #if response score is low don't show it but instead ask bot to learn something new if response.confidence <= minimal_confidence: msg = Message(to = "twitterAgent@localhost") msg.sender = "chattingAgent@localhost" msg.body = user_input msg.set_metadata("performative","inform") msg.set_metadata("ontology","research-theme") await self.send(msg) print("One moment please, I'm trying to think of an anwser") self.set_next_state(RECIEVE_STATE) else: print(response) # it should again enter this same state because the talk should continue without using other agent self.set_next_state(SEND_STATE) elif user_input.lower()=="exit": self.set_next_state(END_STATE) else: self.set_next_state(RECIEVE_STATE) class ChattingAgent(Agent): def setup(self): print("Chatting agent created") behaviour = BotBehaviour() behaviour.add_state(name=SEND_STATE, state=SendState(), initial = True) behaviour.add_state(name=RECIEVE_STATE, state=RecieveState()) behaviour.add_state(name=END_STATE, state=EndState()) behaviour.add_transition(source=SEND_STATE, dest=RECIEVE_STATE) behaviour.add_transition(source=SEND_STATE, dest=SEND_STATE) behaviour.add_transition(source=SEND_STATE, dest=END_STATE) behaviour.add_transition(source=RECIEVE_STATE, dest=SEND_STATE) behaviour.add_transition(source=RECIEVE_STATE, dest=RECIEVE_STATE) template = Template() template.to="chattingAgent@localhost" template.set_metadata("performative","inform") template.set_metadata("ontology","research-theme") self.add_behaviour(behaviour, template) if __name__ == "__main__": chatAgent = ChattingAgent('chattingAgent@localhost','chitchat') chatAgent.start() while chatAgent.is_alive(): try: time.sleep(1) except KeyboardInterrupt: chatAgent.stop() break
from rest_framework.test import APITestCase from rest_framework.status import HTTP_200_OK, HTTP_401_UNAUTHORIZED class AccountTests(APITestCase): fixtures = ['users', 'accounts'] def test_should_be_authenticated(self): response = self.client.get('/accounts/') self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED) def test_should_only_retrieve_his(self): self.client.login(username='sieira', password='Pa$$word1234') response = self.client.get('/accounts/') self.assertEqual(response.status_code, HTTP_200_OK) self.assertEqual(response.json(), [ {'id': 1, 'name': 'Million dollars account'}, {'id': 2, 'name': 'Poor guys account'} ])
from selenium.webdriver import Chrome import json class BDriver(Chrome): with open('../Resources/DriverConfig.json', 'r') as fp: data = json.load(fp) exe_path = data['exe_path'] def __init__(self, url): Chrome.__init__(self, executable_path=BDriver.exe_path) self.url = url self.maximize_window() self.get(self.url) def driver_close(self): self.close()
#!usr/bin/python import os import urllib2 def main(): fqn = os.uname()[1] ext_ip = urllib2.urlopen('http://whatismyip.org').read() print ("Asset: %s " % fqn, "Checking in from IP#: %s " % ext_ip) print 'hi this is a update to check jenkin call' if __name__ == '__main__': main ()
""" Here, we create a custom dataset """ import torch import time import pickle import json from utils.types import PathT from torch.utils.data import Dataset from typing import Any, Tuple, Dict, List from torchvision import transforms from PIL import Image from collections import defaultdict from compute_softscore import preprocess_answer UNKNOWN_TOKEN = "<unk>" PAD_TOKEN = "<pad>" # Optional: this is used to pad a batch of sentences in different lengths. SPECIAL_TOKENS = [PAD_TOKEN, UNKNOWN_TOKEN] MAX_QUESTION_LEN = 20 class VQADataset(Dataset): """ VQA dataset """ def __init__(self, path_answers: PathT, path_image: PathT, path_questions: PathT, word_dict=None) -> None: # Set variables self.path_answers = path_answers self.path_image = path_image self.path_questions = path_questions # if word_dict is already built, it's val dataset self.is_val = False if word_dict is None else True # Load Q&A self.questionsAnswers = self._get_questions_answers() # Set picture size self.pic_size = 224 if word_dict is None: self.word_dict = defaultdict(int) # Create vocabs of entries self.get_vocabs() else: self.word_dict = word_dict self.word_idx_mappings, self.idx_word_mappings = self.init_word_vocab(self.word_dict) self.vocab_size = len(self.idx_word_mappings) # Create list of entries self.entries = self._get_entries() def __getitem__(self, index: int) -> Tuple: """ :param index: :return: item's image, question, question len, labels """ path = self.path_image +str(self.entries[index]['image_id']).zfill(12)+'.jpg' image = self._get_images(path) return image, self.entries[index]['question'],self.entries[index]['question_len'], self.entries[index]['labels'].to_dense() def __len__(self) -> int: """ :return: the length of the dataset (number of sample). """ return len(self.entries) def _get_questions_answers(self) -> Any: """ Load all features into a structure :return: :rtype: """ with open(self.path_answers, "rb") as features_file: features = pickle.load(features_file) questions_answers = {item['question_id']: item for item in features} with open(self.path_questions, "rb") as f: features_questions = json.load(f)['questions'] for item in features_questions: questions_answers[item['question_id']]['question'] = preprocess_answer(item['question']) return questions_answers def init_word_vocab(self, word_dict): """ Creating word vocabulary :return: list of mapping from idx to word """ idx_word_mappings = sorted([token for token in SPECIAL_TOKENS]) word_idx_mappings = {token: idx_word_mappings.index(token) for token in idx_word_mappings} for i, word in enumerate(sorted(word_dict.keys())): word_idx_mappings[str(word)] = int(i + len(SPECIAL_TOKENS)) idx_word_mappings.append(str(word)) return word_idx_mappings, idx_word_mappings def _get_entries(self) -> List: """ This function create a list of all the entries. We will use it later in __getitem__ :return: list of samples """ entries = [] for item in self.questionsAnswers.values(): entries.append(self._get_entry(item)) return entries def get_vocabs(self): """ return frequencies dict """ for item in self.questionsAnswers.values(): for word in item['question'].split(): self.word_dict[word] += 1 def _get_entry(self, item: Dict) -> Dict: """ :item: item from the data. """ labels_tensor = torch.tensor([item['labels']], requires_grad=False, dtype=torch.int64) scores_tensor = torch.tensor(item['scores'], requires_grad=False, dtype=torch.float32) labels = torch.sparse.FloatTensor(labels_tensor, scores_tensor, torch.Size([2410])) # 2410 - num of labels words_idx_list = [] for idx, word in enumerate(item['question'].split()): # going over the comment content if idx >= MAX_QUESTION_LEN: break # map word to index words_idx_list.append(self.word_idx_mappings.get(word, self.word_idx_mappings.get(UNKNOWN_TOKEN))) # pad the end of the tensor for i in range(len(item['question'].split()), MAX_QUESTION_LEN): words_idx_list.append(self.word_idx_mappings.get(PAD_TOKEN)) question_tensor = torch.tensor(words_idx_list, dtype=torch.long, requires_grad=False) question_len = len(item['question'].split()) return {'image_id': item['image_id'], 'question': question_tensor,'question_len':question_len, 'labels': labels} def _get_images(self, path): try: image = Image.open(path) normalize = transforms.Normalize(mean=[0.4783, 0.4493, 0.4075], std=[0.1214, 0.1191, 0.1429]) if self.is_val: transform = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(self.pic_size), transforms.ToTensor(), ]) else: transform = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(self.pic_size), #transforms.RandomHorizontalFlip(p=0.1), #transforms.RandomVerticalFlip(p=0.1), transforms.ToTensor(), ]) tensor_image = transform(image) # if the image is greyscale change it to rgb representation if tensor_image.size(0) == 1: tensor_image = tensor_image.repeat(3, 1, 1) return tensor_image #normalize(tensor_image) # sometimes an image is locked by other students, so catch the exception, wait a second and repeat except: print("entered into the except :(") time.sleep(1) return self._get_images(path)
import random def busqueda_binaria(lista,comienzo,final,objetivo): #se recibe una lista comienzo final y objetivo print(f'buscando {objetivo} entre {lista[comienzo]} y {lista[final - 1]}') if comienzo > final:#si el comienzo es mas grande que el final termina return False #si no es cierto se divide a la mitad medio = (comienzo + final) // 2#se divide la lista en dos if lista[medio] == objetivo:#comparamos si esta el onbjetivo return True elif lista[medio] < objetivo:#si es mas pequeño return busqueda_binaria(lista,medio+1,final,objetivo) else: return busqueda_binaria(lista,comienzo,medio-1,objetivo) if __name__ == '__main__': tamano_de_lista = int(input('de que tamaño sera la lista')) objetivo = int(input('que numero quieres encontrar?')) lista =sorted([random.randint(0,100) for i in range(tamano_de_lista)]) #sorted para ordenar la lista para la busqueda binaria encontrado = busqueda_binaria(lista,0,len(lista),objetivo)#se empieza en 0 y se termin en l longitud de la lista #utilizar indices para movernos en la lista print(lista) print(f'el elemento{objetivo} {" si esta" if encontrado else "no esta"} en la lista"')