text
stringlengths
8
6.05M
from sklearn.datasets import load_breast_cancer from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split cancer = load_breast_cancer() x_train, x_test, y_train, y_test = train_test_split(cancer.data, cancer.target, stratify = cancer.target, random_state=42) # tree = DecisionTreeClassifier(random_state=0) # tree.fit(x_train, y_train) # print("훈련세트 정확도: {:.2f}".format(tree.score(x_train, y_train))) # print("테스트세트 정확도: {:.2f}".format(tree.score(x_test, y_test))) tree = DecisionTreeClassifier(max_depth=1, random_state=0) tree.fit(x_train, y_train) print("훈련세트 정확도: {:.2f}".format(tree.score(x_train, y_train))) print("테스트세트 정확도: {:.2f}".format(tree.score(x_test, y_test))) print("특성 중요도: \n", tree.feature_importances_) import matplotlib.pyplot as plt import numpy as np def plot_feature_importances_cancer(model): n_features = cancer.data.shape[1] plt.barh(np.arange(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), cancer.feature_names) plt.xlabel("특성 중요도") plt.ylabel("특성") plt.ylim(-1, n_features) plot_feature_importances_cancer(tree) plt.show()
class Solution: def isBoomerang(self, points: List[List[int]]) -> bool: x1, y1 = points[0][0], points[0][1] x2, y2 = points[1][0], points[1][1] x3, y3 = points[2][0], points[2][1] if (x1 == x2 and y1 == y2) or (x1 == x3 and y1 == y3) or (x2 == x3 and y2 == y3): return False if (x1 == x2 != x3 or x1 == x3 != x2 or x2 == x3 != x1): return True if x1 == x2 == x3: return False return abs(y1 +(x3 - x1) * (y2 - y1) / (x2 - x1) - y3) >= 1e-6
''' Created on Jul 20, 2012 @author: petrbouchal ''' from BusinessPlans import * #=============================================================================== # #=============================================================================== # # ADVANCED ANALYTICS 3: TIME SERIES # #=============================================================================== #=============================================================================== #TODO: build time-series analytics # the snapshot data for this is in alldata and in the output of the previous section # other data will need to be taken from previous reports - need a system for iteration # need to adapt if 'static' things actually change over time [scheduled dates, ids, actions coming in and out] # first step will be to build time series of aggregates and see if changes are real or noise from changes in units # checks should include # all subactions present in previous period are present in this one # subactions with the same ID have the same titles # subactios belong to the same actions, actions to the same priorities, priorities to the same departments # in other words, the hypothetical linking indices haven't been broken by a change in the underlying data structure # this would ideally be done by querying things from relational database
def backtracking(W, wt, val, n): return 0
# Vehicles Pattern1(from W to S) for i, veh in enumerate(self.vehicles_W_S): # Check if there are vehicles ahead. If true, stop if (veh.getPosition().x + veh.getSpeed().x, veh.getPosition().y + veh.getSpeed().y) in self.collision_check_W: self.calculate_vehnum(i, veh.getPosition().x, veh.getPosition().y, veh.getPosition().x, veh.getPosition().y, self.sendData_1) qp.drawRect(veh.getPosition().x, veh.getPosition().y, veh.getSize().x, veh.getSize().y) # Make the room not available for other vehicles for j in range(11): self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y)) # Move forward else: # Just before the intersection if veh.getPosition().x == 260: # Try to make a reservation if self.propose((veh.getPosition().x, veh.getPosition().y), self.t_t, self.sendData_1["vehicle"][i], 0): veh.getPosition().x += veh.getSpeed().x # Influential veh_num calculation old_x = veh.getPosition().x - veh.getSpeed().x old_y = veh.getPosition().y - veh.getSpeed().y self.calculate_vehnum(i, old_x, old_y, veh.getPosition().x, veh.getPosition().y, self.sendData_1) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5) for j in range(11): self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y)) # Enter intersection else: self.calculate_vehnum(i, veh.getPosition().x, veh.getPosition().y, veh.getPosition().x, veh.getPosition().y, self.sendData_1) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5) for j in range(11): self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y)) else: # Already in the intersection if 270 < veh.getPosition().x < 318 and veh.getPosition().y < 330: # Calculate trajectory by using Bezier Curve x = pow(1 - (self.beze_t[i] / (318 - 270)), 2) * 270 + 2 * (self.beze_t[i] / (318 - 270)) * ( 1 - self.beze_t[i] / (318 - 270)) * 318 + pow( self.beze_t[i] / (318 - 270), 2) * 318 y = pow(1 - (self.beze_t[i] / (318 - 270)), 2) * 283 + 2 * (self.beze_t[i] / (318 - 270)) * ( 1 - self.beze_t[i] / (318 - 270)) * 283 + pow( self.beze_t[i] / (318 - 270), 2) * 320 veh.setPosition(Position(x, y)) self.beze_t[i] += 2 # Calculate rotation angle qp.save() qp.translate(veh.getPosition().x, veh.getPosition().y) if ((veh.getPosition().x - 270 + veh.getSpeed().x) / (318 - 270)) * 90 > 15: self.r[i] = ((veh.getPosition().x - 270 + veh.getSpeed().x) / (318 - 270)) * 90 qp.rotate(self.r[i]) elif ((veh.getPosition().x - 270 + veh.getSpeed().x) / (318 - 270)) * 90 > 90: self.r[i] = 90 qp.rotate(self.r[i]) else: self.r[i] = 0 qp.rotate(self.r[i]) qp.translate(-veh.getPosition().x, -veh.getPosition().y) print('***************************************************') print(i, veh.getPosition().x, veh.getPosition().y) # Influential veh_num calculation self.calculate_vehnum_inside(i, self.sendData_1) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5) # for j in range(11): # self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y)) qp.restore() # Already left intersection elif 318 <= veh.getPosition().x and veh.getPosition().y < 600: veh.getPosition().y += veh.getSpeed().x qp.save() qp.translate(veh.getPosition().x, veh.getPosition().y) qp.rotate(90) qp.translate(-veh.getPosition().x, -veh.getPosition().y) # Influential veh_num calculation old_x = veh.getPosition().x old_y = veh.getPosition().y - veh.getSpeed().x self.calculate_vehnum(i, old_x, old_y, veh.getPosition().x, veh.getPosition().y, sendData_1) # print('***************************************************') # print(veh.getPosition().x, veh.getPosition().y) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5) for j in range(11): self.collision_check_S.append((veh.getPosition().x, veh.getPosition().y - j)) qp.restore() # Already left screen elif veh.getPosition().y >= 600: veh.getPosition().x = 0 veh.getPosition().y = 283 self.beze_t[i] = 2 # Influential veh_num calculation old_x = veh.getPosition().x - veh.getSpeed().x old_y = veh.getPosition().y - veh.getSpeed().y self.calculate_vehnum(i, old_x, old_y, veh.getPosition().x, veh.getPosition().y, self.sendData_1) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5) for j in range(11): self.collision_check_W.append((veh.getPosition().x, veh.getPosition().y - j)) # Move horizontal direction(across X_axis) else: veh.getPosition().x += veh.getSpeed().x # Influential veh_num calculation old_x = veh.getPosition().x - veh.getSpeed().x old_y = veh.getPosition().y - veh.getSpeed().y self.calculate_vehnum(i, old_x, old_y, veh.getPosition().x, veh.getPosition().y, self.sendData_1) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 10, 5) for j in range(11): self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y)) # Vehicles Pattern2(from N to W) for i, veh in enumerate(self.vehicles_N_W): # Check if there are vehicles ahead. If true, stop if (veh.getPosition().x + veh.getSpeed().x, veh.getPosition().y + veh.getSpeed().y) in self.collision_check_W: self.calculate_vehnum(i, veh.getPosition().x, veh.getPosition().y, veh.getPosition().x, veh.getPosition().y, self.sendData_2) qp.drawRect(veh.getPosition().x, veh.getPosition().y, veh.getSize().x, veh.getSize().y) # Make the room not available for other vehicles for j in range(11): self.collision_check_N.append((veh.getPosition().x - j, veh.getPosition().y)) # Move forward else: # Just before the intersection if veh.getPosition().y == 260: # Try to make a reservation if self.propose((veh.getPosition().x, veh.getPosition().y), self.t_t, self.sendData_2["vehicle"][i], 0): veh.getPosition().y += veh.getSpeed().y # Influential veh_num calculation old_x = veh.getPosition().x - veh.getSpeed().x old_y = veh.getPosition().y - veh.getSpeed().y self.calculate_vehnum(i, old_x, old_y, veh.getPosition().x, veh.getPosition().y, self.sendData_2) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10) for j in range(11): self.collision_check_N.append((veh.getPosition().x - j, veh.getPosition().y)) # Enter intersection else: self.calculate_vehnum(i, veh.getPosition().x, veh.getPosition().y, veh.getPosition().x, veh.getPosition().y, self.sendData_2) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10) for j in range(11): self.collision_check_N.append((veh.getPosition().x - j, veh.getPosition().y)) else: # Already in the intersection if 270 < veh.getPosition().x < 318 and veh.getPosition().y < 330: # Calculate trajectory by using Bezier Curve x = pow(1 - (self.beze_t[i] / (318 - 270)), 2) * 270 + 2 * (self.beze_t[i] / (318 - 270)) * ( 1 - self.beze_t[i] / (318 - 270)) * 318 + pow( self.beze_t[i] / (318 - 270), 2) * 318 y = pow(1 - (self.beze_t[i] / (318 - 270)), 2) * 283 + 2 * (self.beze_t[i] / (318 - 270)) * ( 1 - self.beze_t[i] / (318 - 270)) * 283 + pow( self.beze_t[i] / (318 - 270), 2) * 320 veh.setPosition(Position(x, y)) self.beze_t[i] += 2 # Calculate rotation angle qp.save() qp.translate(veh.getPosition().x, veh.getPosition().y) if ((veh.getPosition().x - 270 + veh.getSpeed().x) / (318 - 270)) * 90 > 15: self.r[i] = ((veh.getPosition().x - 270 + veh.getSpeed().x) / (318 - 270)) * 90 qp.rotate(self.r[i]) elif ((veh.getPosition().x - 270 + veh.getSpeed().x) / (318 - 270)) * 90 > 90: self.r[i] = 90 qp.rotate(self.r[i]) else: self.r[i] = 0 qp.rotate(self.r[i]) qp.translate(-veh.getPosition().x, -veh.getPosition().y) print('***************************************************') print(i, veh.getPosition().x, veh.getPosition().y) # Influential veh_num calculation self.calculate_vehnum_inside(i, self.sendData_2) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10) # for j in range(11): # self.collision_check_W.append((veh.getPosition().x - j, veh.getPosition().y)) qp.restore() # Already left intersection elif 318 <= veh.getPosition().x and veh.getPosition().y < 600: veh.getPosition().y += veh.getSpeed().x qp.save() qp.translate(veh.getPosition().x, veh.getPosition().y) qp.rotate(90) qp.translate(-veh.getPosition().x, -veh.getPosition().y) # Influential veh_num calculation old_x = veh.getPosition().x old_y = veh.getPosition().y - veh.getSpeed().x self.calculate_vehnum(i, old_x, old_y, veh.getPosition().x, veh.getPosition().y, sendData_2) # print('***************************************************') # print(veh.getPosition().x, veh.getPosition().y) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10) for j in range(11): self.collision_check_W.append((veh.getPosition().x, veh.getPosition().y - j)) qp.restore() # Already left screen elif veh.getPosition().y >= 600: veh.getPosition().x = 0 veh.getPosition().y = 283 self.beze_t[i] = 2 # Influential veh_num calculation old_x = veh.getPosition().x - veh.getSpeed().x old_y = veh.getPosition().y - veh.getSpeed().y self.calculate_vehnum(i, old_x, old_y, veh.getPosition().x, veh.getPosition().y, self.sendData_2) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10) for j in range(11): self.collision_check_N.append((veh.getPosition().x, veh.getPosition().y - j)) # Move horizontal direction(across X_axis) else: veh.getPosition().x += veh.getSpeed().x # Influential veh_num calculation old_x = veh.getPosition().x - veh.getSpeed().x old_y = veh.getPosition().y - veh.getSpeed().y self.calculate_vehnum(i, old_x, old_y, veh.getPosition().x, veh.getPosition().y, self.sendData_2) qp.drawRect(veh.getPosition().x, veh.getPosition().y, 5, 10) for j in range(11): self.collision_check_N.append((veh.getPosition().x - j, veh.getPosition().y))
#!/home/walker/anaconda3/bin/python3 #coding=utf-8 ###################################################### # > File Name: train.py # > Author: Yanming Ji # > Mail: 1225401399@qq.com # > Created Time: 2019年09月06日 星期五 14时56分55秒 # > Description: 训练模型 ###################################################### from data_generator import id2alpha from model import Lenet import torch as t import torch.nn as nn from torch import optim from torch.optim import lr_scheduler from torch.autograd import Variable from torch.utils.data import DataLoader from torch.utils.data import Dataset import torchvision.transforms as T from PIL import Image import os import time import cv2 class Config: height = 32 width = 16 epochs = 20 batch_size = 64 classes = len(id2alpha) stepsize = [5,10] lr = 0.001 gamma = 0.1 weight_decay = 0.00005 seed = 1 train_dir = "./train_data" val_dir = "./val_data" opt = Config() t.manual_seed(opt.seed) if t.cuda.is_available: print("use cuda!!!") transform = T.Compose([ T.Resize((opt.height, opt.width)), T.ToTensor(), T.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5]) ]) class ImageDataset(Dataset): def __init__(self, dataset,transform=None): self.dataset = dataset self.transform=transform def __len__(self): return len(self.dataset) def __getitem__(self, index): img, pid = self.dataset[index] # img = Image.open(img) img = cv2.imread(img) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = Image.fromarray(img) if self.transform: img = self.transform(img) return img, pid traindata = list() valdata = list() for subpath in os.listdir(opt.train_dir): pid = subpath.split("_")[0] pid = int(pid) fullpath = os.path.join(opt.train_dir, subpath) traindata.append([fullpath, pid]) # img = Image.open(fullpath) # traindata.append([img, pid]) for subpath in os.listdir(opt.val_dir): pid = subpath.split("_")[0] pid = int(pid) fullpath = os.path.join(opt.val_dir, subpath) valdata.append([fullpath, pid]) # img = Image.open(fullpath) # valdata.append([img, pid]) trainloader = DataLoader(ImageDataset(dataset=traindata, transform=transform), batch_size = opt.batch_size, shuffle=True, drop_last=True) valloader = DataLoader(ImageDataset(dataset=valdata, transform=transform), batch_size = opt.batch_size, shuffle=True, drop_last=True) if __name__ == "__main__": model = Lenet(classes = opt.classes) if t.cuda.is_available: model = model.cuda() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=opt.lr,betas=(0.9,0.99)) # optimizer = optim.SGD(model.parameters(), lr=opt.lr) scheduler = lr_scheduler.StepLR(optimizer,step_size=5,gamma=opt.gamma) star = time.time() best_loss = 10. for epoch in range(1,opt.epochs): running_loss = 0. scheduler.step() for i, (imgs, labels) in enumerate(trainloader): optimizer.zero_grad() imgs, labels = Variable(imgs),Variable(labels) if t.cuda.is_available: imgs, labels = imgs.cuda(), labels.cuda() outputs = model(imgs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss+=loss if (i+1)%10==0: loss = running_loss/10 print("[%d, %5d], lr:%.5f, loss:%.3f" %(epoch, i+1, scheduler.get_lr()[0],loss)) if loss < best_loss: best_loss = loss running_loss=0 t.save(model.state_dict(),"./checkpoints/net_epoch_%d.pth" % epoch)
import unittest from katas.kyu_6.weird_string_case import to_weird_case class WeirdStringCaseTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(to_weird_case('This'), 'ThIs') def test_equals_2(self): self.assertEqual(to_weird_case('is'), 'Is') def test_equals_3(self): self.assertEqual(to_weird_case('This is a test'), 'ThIs Is A TeSt')
"""cleanup Revision ID: 9ca5901af374 Revises: a477f34dbaa4 Create Date: 2020-01-28 20:44:00.184324 """ from alembic import op import sqlalchemy as sa import app.model_types # revision identifiers, used by Alembic. revision = '9ca5901af374' down_revision = 'a477f34dbaa4' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('note', schema=None) as batch_op: batch_op.drop_column('projects') batch_op.drop_column('tags') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('note', schema=None) as batch_op: batch_op.add_column(sa.Column('tags', sa.VARCHAR(), nullable=True)) batch_op.add_column(sa.Column('projects', sa.VARCHAR(), nullable=True)) # ### end Alembic commands ###
NAMES = set() TRANSFORMATIONS = set() A = "transformations.txt" B = "code_and_first_name_only.txt" t = open(A, 'w') n = open(B, 'w') class Inside(): pass class Outside(): pass def switch(x): if isinstance(x,Inside): return Outside() elif isinstance(x, Outside): return Inside() else: exit("error") f = open("code_and_name.txt").read() lines = [i for i in f.split('\n')] for line in lines: line = line.split(':', 1) if len(line) != 2: continue code, whole_name = (line[0], line[1]) #print(code) #print(whole_name) state = Outside() i = [] aux = "" for c in whole_name: if c == '(' or c == ')': state = switch(state) elif c == ',': if isinstance(state, Inside): aux += ',' elif isinstance(state, Outside): i.append(aux) aux = "" else: exit("error") else: aux += c i.append(aux) print(i) i = [f.strip().lower() for f in i] name = set((i[0], )) print(name) transformations = list(set(i[1:])) code = int(code.strip()) print(code, name, transformations) TRANSFORMATIONS = TRANSFORMATIONS.union(transformations) NAMES = NAMES.union(name) TRANSFORMATIONS = list(TRANSFORMATIONS) NAMES = list(NAMES) #print(TRANSFORMATIONS) #print(NAMES) for i in TRANSFORMATIONS: t.write(i + '\n') for i in NAMES: n.write(i + '\n') t.close() n.close() import shutil shutil.copy2(A, "transformation_to_edit.txt") shutil.copy2(B, "code_and_first_name_to_edit.txt")
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals import os AUTHOR = "Christopher D'Cunha" SITENAME = "D'Cunha Matata" SITEURL = "" THEME = os.path.abspath("modules/theme") DISPLAY_PAGES_ON_MENU = False DEFAULT_PAGINATION = 10 TIMEZONE = "Europe/London" DEFAULT_LANG = 'en' FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None GITHUB_URL = 'https://github.com/christopherdcunha' TWITTER_URL = 'https://twitter.com/dcunhamatata' DISQUS_SITENAME = "christopherdcunha" DEFAULT_DATE_FORMAT = '%d %b %Y' DATE_FORMATS = { 'en': DEFAULT_DATE_FORMAT } MENUITEMS = () ARTICLE_URL = '{date:%Y}/{date:%m}/{date:%d}/{slug}/' ARTICLE_SAVE_AS = '{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html' CATEGORY_URL = 'c/{slug}' CATEGORY_SAVE_AS = 'c/{slug}/index.html' PAGE_URL = '{slug}/' PAGE_SAVE_AS = '{slug}/index.html' TAG_URL = 't/{slug}' TAG_SAVE_AS = 't/{slug}/index.html' MONTH_ARCHIVE_SAVE_AS = '{date:%Y}/{date:%m}/index.html' SUMMARY_MAX_LENGTH = 75 # Blogroll LINKS = () # LINKS = (('Pelican', 'http://getpelican.com/'), # ('Python.org', 'http://python.org/'), # ('Jinja2', 'http://jinja.pocoo.org/'), # ('You can modify those links in your config file', '#'),) # Social widget SOCIAL = () # SOCIAL = (('You can add links in your config file', '#'), # ('Another social link', '#'),) DEFAULT_PAGINATION = 20 PLUGIN_PATHS = ['modules/plugins',] PLUGINS = [ 'html_rst_directive', 'assets', 'extract_toc', 'pelican_fontawesome', ] ASSET_SOURCE_PATHS = ['static',]
import numpy as np import matplotlib.pyplot as plt import sys import os from mpl_toolkits.mplot3d import Axes3D modes = [0, 1, 4] figdir = "media/" fig_filetype = "pdf" if not sys.argv[1]: print("Usage: python plot_eigenmode.py <path_to_data_dir>") datadir = sys.argv[1] figdir = os.path.join(datadir, figdir) print("Reading files...", end='') inner_list = np.loadtxt(datadir+"inner_list.txt", dtype=int) eigenmodes = np.loadtxt(datadir+"eigenmodes.txt") eigenvalues = np.loadtxt(datadir+"eigenvalues.txt") fractal_x, fractal_y = np.loadtxt(datadir+"fractal.txt", unpack=True) print("done!") index_max, index_min = np.max(inner_list), np.min(inner_list) size = index_max + 1 grid = np.ones((size, size)) # Move and resice fractal fractal_x -= np.min(fractal_x) fractal_y -= np.min(fractal_y) fractal_x /= np.max(fractal_x) fractal_y /= np.max(fractal_y) fractal_y = 1 - fractal_y def plot_colormesh(grid): plt.pcolormesh(grid_to_plot) plt.xticks([]) plt.yticks([]) plt.gca().set_aspect("equal") def plot_wireframe(grid, fractal_x, fractal_y): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x = np.linspace(0, 1, grid.shape[0]) y = np.linspace(0, 1, grid.shape[1]) xx, yy = np.meshgrid(x, y) ax.plot_wireframe(xx, yy, np.where(grid == 1, np.nan, grid), color="gray") ax.plot(fractal_x, fractal_y, linewidth=0.8, color="darkred") ax.grid(False) plt.axis("off") ax.set_zlim(-0.01, 0.01) def plot_surface(grid, fractal_x, fractal_y): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x = np.linspace(0, 1, grid.shape[0]) y = np.linspace(0, 1, grid.shape[1]) xx, yy = np.meshgrid(x, y) my_map = plt.get_cmap("coolwarm") my_map.set_over((0,0,0,0)) ax.plot_surface(xx, yy, grid, cmap=my_map, vmin=-0.006, vmax=0.006, zorder=8) ax.plot(fractal_x, fractal_y, linewidth=0.8, color="darkred") ax.grid(False) plt.axis("off") ax.set_zlim(-0.01, 0.01) for mode in modes: print("Mode", mode) print(" .Populating grid") for i, index in enumerate(inner_list): grid[tuple(index)] = eigenmodes[i, mode] print(" .Plotting") grid_to_plot = grid plot_surface(grid_to_plot, fractal_x, fractal_y) plt.show() #plt.savefig(figdir + "mode_" + str(mode) +"." + fig_filetype)
# -*- coding: utf-8 -*- """ Created on Tue Oct 27 06:31:12 2020 @author: Siddhi """ from random import seed import numpy as np import pandas as pd import matplotlib.pyplot as plt import math #Split dataset into training and testing set def train_test_split(dataframe,split=0.70): train_size = int(split * len(dataframe)) #print(train_size) test_size = len(dataframe) - train_size #print(test_size) dataframe = dataframe.sample(frac=1) #shuffle rows of the dataframe train = dataframe[:train_size] #copy first 70% elements into train test = dataframe[-test_size:] #copy last 30% elements into test return train,test def hypothesis(weight,X): return weight*X def Compute_Cost(X,y,weight): m = X.shape[0] y1 = hypothesis(weight,X) y1 = np.sum(y1,axis=1) return sum((y1-y)**2)/(2*m) def SGD(X,Y,weights,alpha,epoch,precision): m = X.shape[0] cost_history = np.zeros(epoch) for i in range(epoch): cost = 0.0 df = pd.concat([X,Y],axis=1) df = df.sample(frac=1) X = df.loc[:,'bias':'children'] Y = df.loc[:,'charges'] for j in range(0,m): X_j = X.iloc[j] y_prediction = X_j.dot(weights) X_T = X_j.transpose() diff = y_prediction - Y.iloc[j] weights = weights - alpha*(X_T * diff) cost += Compute_Cost(X,Y,weights) if(cost<=precision): break cost_history[i]=cost plt.plot(cost_history) plt.title("learning rate = 0.005") plt.xlabel('number of epoch') plt.ylabel('loss') return cost_history,weights def RMSE(df,wt): N = len(df) X = df.loc[:,'bias':'children'] y_obs = df.loc[:,'charges'] yi_hat = X.dot(wt) diff = (y_obs - yi_hat)**2 rmse = math.sqrt(np.sum(diff)/N) return rmse def Solve(dataframe): #Splitting the data into training and testing data train, test = train_test_split(dataframe) #print(train.shape) X = train.loc[:,'bias':'children'] Y = train.loc[:,'charges'] weight = np.array([0.0]*len(X.columns)) learning_rate = 0.005 #0.005 epoch = 151 precision = 0.000001 Cost,weight = SGD(X,Y,weight,learning_rate,epoch,precision) print("Cost Values after every 50 epochs: ") for i in range(0,len(Cost)): if(i%50==0): print(Cost[i]) train_loss = RMSE(train,weight) test_loss = RMSE(test,weight) return weight,train_loss, test_loss if __name__ == '__main__': seed(1) dataframe = pd.read_csv('D:/Study/4-1/Fundamentals of Data Science/Assignments/A2/A2_2017B3A70907H_2017B3A70972H_2017B3A71433H/insurance.txt', sep=",",header=None) dataframe.columns = ["age","bmi","children","charges"] dataframe = pd.concat([pd.Series(1,index=dataframe.index,name="bias"),dataframe],axis=1) #Normalize the input variables by dividing each column by the maximum values of that column for column in dataframe: max_val = np.max(dataframe[column]) dataframe[column]=dataframe[column]/max_val #print(dataframe[:5]) #initializing the variables num_samples = 20 rows,cols = (num_samples,3) weights = np.zeros((num_samples,4)) b_val = np.zeros(num_samples) train_data_error = np.zeros(num_samples) test_data_error = np.zeros(num_samples) w0_sum,w1_sum,w2_sum,w3_sum = (0,0,0,0) err_train_sum,err_test_sum = (0,0) #Sampling the data 20 times to get more accurate results for i in range(0,num_samples): print("\nSample ",i+1) weights[i],train_data_error[i],test_data_error[i] = Solve(dataframe) w0_sum += weights[i][0] w1_sum += weights[i][1] w2_sum += weights[i][2] w3_sum += weights[i][3] err_train_sum += train_data_error[i] err_test_sum += test_data_error[i] train_mean_err = err_train_sum/num_samples test_mean_err = err_test_sum/num_samples var_train_error = np.var(train_data_error) var_test_error = np.var(test_data_error) min_train_error = np.amin(train_data_error) min_test_error = np.amin(test_data_error) print("\nThe regression line is of the form:\n insurance = w0 + w1*age + w2*bmi + w3*children") result = [w0_sum/num_samples, w1_sum/num_samples, w2_sum/num_samples, w3_sum/num_samples] print("\nWeights of the independent variables are: ") print("w0 = ",result[0]) print("w1 = ",result[1]) print("w2 = ",result[2]) print("w3 = ",result[3]) print("\nRMSE Mean of accuracy prediction of training data: ",train_mean_err) print("RMSE Variance of accuracy prediction of training data: ",var_train_error) #print("Minimum RMSE of training data: ",min_train_error) print("\nRMSE Mean of accuracy prediction of testing data: ",test_mean_err) print("RMSE Variance of accuracy prediction of testing data: ",var_test_error) #print("Minimum RMSE of testing data: ",min_test_error) print("\nMinimum RMSE of regression model using normal equation",min(min_train_error,min_test_error))
#!/usr/bin/python import numpy as np #Input parameters: ofile='../data/LISA/LISA' #Output file for the data. #----------------------------------------------------------------- ifile1='../data/LISA/lisa.out' #I think it comes from: http://www.srl.caltech.edu/~shane/sensitivity/MakeCurve.html ul1=np.array(np.loadtxt(ifile1,usecols=(0,1))) #fvec,svec=10**(ul1[:,0]),10**(ul1[:,1]) fvec,svec=ul1[:,0],ul1[:,1] lisamat=np.vstack((fvec,svec)).T #Save as npy file. dicti={'LISA':lisamat} np.save(ofile,dicti)
from rest_framework import serializers from app.models import Image class ImageSerializer(serializers.ModelSerializer): image = serializers.SerializerMethodField('serialize_image') class Meta: model = Image fields = ('id', 'name', 'desc', 'image', 'created_at', 'updated_at') def serialize_image(self, image): return image.image.url if image else None
import numpy as np def __discrete_unif_pdf(x, start, n_numbers): # TODO ensure that only ints are passed here. # TODO this should return 0 for any non integer number. if x >= start and x <= start +n_numbers: return 1/n_numbers else: return 0 _discrete_unif_pdf = np.vectorize(__discrete_unif_pdf) class AbstractPriorDistribution(object): support = None def __init__(self, seed): self.rng = np.random.RandomState(seed) def rvs(self): """Draw a random variable""" raise NotImplementedError def draw(self): return self.rvs() def pdf(self, x): """The probability of observing x""" raise NotImplementedError class DiscreteUniform(AbstractPriorDistribution): """ A discrete uniform distribution mimic-ing some of scipy.stats.uniform methods. """ def __init__(self, dimensions, start, n_numbers, seed=2017): """ Creates a distribution :param dimensions: The number of dimensions/state space :param start: The start point of the range of allowable numbers :param n_numbers: The number of allowable numbers """ super().__init__(seed) self.dimensions = dimensions self.start = start self.n_numbers = n_numbers self.support = np.arange(start, start+n_numbers+1) def rvs(self): return np.array([self.rng.choice(self.support) for i in range(self.dimensions)]) def draw(self): return self.rvs() def pdf(self, x): probs = _discrete_unif_pdf(x, self.start, self.n_numbers) if len(x.shape) == 1: return probs.prod() else: return probs.prod(axis=1) class MultiWindowDiscreteUniform(AbstractPriorDistribution): def __init__(self, dimensions, window_ranges=[(-5, 5)], seed=2017): super().__init__(seed) self.dimensions = dimensions self.window_ranges = window_ranges self.support = [] for (l,r) in self.window_ranges: self.support.extend(range(l, r+1)) # for backward compatability # TODO: find a way around this. self.start = np.min(self.support) self.n_numbers = len(self.support) def rvs(self): return np.array([self.rng.choice(self.support) for _ in range(self.dimensions)]) def draw(self): return self.rvs() def pdf(self, x): probs = np.isin(x, self.support)/len(self.support) if len(x.shape) == 1: return probs.prod() else: return probs.prod(axis=1)
from tensorflow.keras.layers import BatchNormalization, Conv2D, Activation, MaxPooling2D, ZeroPadding2D from model.modules import conv_block, identity_block def resnet_graph(input_image, architecture, stage5=False, train_bn=True): """Build a ResNet graph. architecture: Can be resnet50 or resnet101 stage5: Boolean. If False, stage5 of the network is not created train_bn: Boolean. Train or freeze Batch Norm layers """ assert architecture in ["resnet50", "resnet101"] # Stage 1 x = ZeroPadding2D((3, 3))(input_image) x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNormalization(name='bn_conv1')(x, training=train_bn) x = Activation('relu')(x) C1 = x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn) # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn) # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn) block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4, C5]
import pandas as pd from sklearn.cross_validation import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.externals import joblib import gc file_name_str = 'dt_mod_{}_{}_{}_{}.pkl' gc.enable() # df_train = pd.read_csv('Kaggle_Datasets/Facebook/train.csv') # df_test = pd.read_csv('https://s3-us-west-2.amazonaws.com/fbdataset/test.csv') class MultiPredictionModel(object): def __init__(self, df, xsize=0.5, ysize=0.5, xslide=0.25, yslide=0.25, xcol='x', ycol='y'): self.df = df self.xsize = xsize self.ysize = ysize self.xslide = xslide self.yslide = yslide self.xcol = xcol self.ycol = ycol self.xmax = self.df.x.max() self.ymax = self.df.y.max() self.features = ['x', 'y', 'accuracy', 'hour', 'day', 'week', 'month', 'year'] self.mod_df(self.df) self.windows = self.generate_windows() self.expected = None self.actual = None self.result_set = {} def mod_df(self, df): df.loc[:, 'hours'] = df.time / float(60) df.loc[:, 'hour'] = df.hours % 24 + 1 df.loc[:, 'days'] = df.time / float(60*24) df.loc[:, 'day'] = df.days % 7 + 1 df.loc[:, 'weeks'] = df.time / float(60*24*7) df.loc[:, 'week'] = df.weeks % 52 + 1 df.loc[:, 'months'] = df.time / float(60*24*30) df.loc[:, 'month'] = df.months % 12 + 1 df.loc[:, 'year'] = df.time / float(60*24*365) + 1 def frange(self, x, y, jump): while x < y: yield x x += jump yield y def generate_windows(self): ranges = [] result = [] xmin, xmax = self.df.x.min(), self.df.x.max() ymin, ymax = self.df.y.min(), self.df.y.max() xranges = list(self.frange(xmin, xmax-self.xsize, self.xslide)) yranges = list(self.frange(ymin, ymax-self.ysize, self.yslide)) ylen = len(yranges) for x in xranges: subrange = [x] * ylen ranges.extend(zip(subrange, yranges)) for x1, y1 in ranges: x2, y2 = x1 + self.xsize, y1 + self.ysize result.append(((x1, y1), (x2, y2))) return result def find_x_window(self, x): xs = max(0, x - (self.xsize/2.0)) x0 = 0 while x0 < xs: x0 += self.xslide if x0 >= self.xmax - self.xsize: x0 = self.xmax - self.xsize return x0 def find_y_window(self, y): ys = max(0, y - (self.ysize/2.0)) y0 = 0 while y0 < ys: y0 += self.yslide if y0 >= self.ymax - self.ysize: y0 = self.ymax - self.ysize return y0 def train(self): for i, window in enumerate(self.windows): print 'Training Model: {} of {}'.format(i, len(self.windows)) import os (x1, y1), (x2, y2) = window file_name = file_name_str.format(x1, y1, x2, y2) if os.path.isfile(file_name): print 'Already trained' continue model = DecisionTreeClassifier() print 'Training Model: {}'.format(model) (x1, y1), (x2, y2) = window model_df = self.df[(self.df[self.xcol] >= x1) & (self.df[self.xcol] <= x2) & (self.df[self.ycol] >= y1) & (self.df[self.ycol] <= y2)] model_df = model_df.sort_values('row_id').set_index('row_id') values = model_df['place_id'] model_df = model_df[self.features] model.fit(model_df, values) file_name = file_name_str.format(x1, y1, x2, y2) joblib.dump(model, file_name) del model_df del model def load_model(self, window): (x1, y1), (x2, y2) = window file_name = file_name_str.format(x1, y1, x2, y2) model = joblib.load(file_name) return model def predict(self, df): df = df.sort_values('row_id') self.expected = df.place_id self.mod_df(df) df.loc[:, 'x1'] = df.x.apply(self.find_x_window) df.loc[:, 'x2'] = df.x1 + self.xsize df.loc[:, 'y1'] = df.y.apply(self.find_y_window) df.loc[:, 'y2'] = df.y1 + self.ysize out_range = df[(df.x < df.x1) | (df.x > df.x2) | (df.y < df.y1) | (df.y > df.y2)] if len(out_range): print 'Error in windows'; import pdb; pdb.set_trace() for i, window in enumerate(self.windows): print 'Predicting Model: {} of {}'.format(i, len(self.windows)) model = self.load_model(window) (x1, y1), (x2, y2) = window wdf = df[(df.x1 == x1) & (df.x2 == x2) & (df.y1 == y1) & (df.y2 == y2)] wdf = wdf.sort_values('row_id').set_index('row_id') wdf = wdf[self.features] predictions = model.predict(wdf) del model; model = None gc.collect() res = dict(zip(wdf.index, predictions)) self.result_set.update(res) del res; res = None gc.collect() self.actual = [self.result_set[x] for x in sorted(self.result_set.keys())] return self.result_set def score(self): expect = pd.Series(self.expected) actual = pd.Series(self.actual) return (sum(expect == actual) / float(len(self.expected))) * 100 def run(): print 'Loading DataFrame' df_train = pd.read_csv('Kaggle_Datasets/Facebook/train.csv') df_train = df_train.loc[(df_train.x <= 2) & (df_train.y <= 2), :] print 'Splitting train and test data' train, test = train_test_split(df_train, test_size=0.2) del df_train print 'Initializing PredictionModel class' pred_model = MultiPredictionModel(df=train) print 'Init done' print pred_model.windows print 'Training Model' pred_model.train() print 'Done Training' print 'Predicting on test data' print pred_model.predict(test) print 'Done predicting' score = pred_model.score() print 'Score: {}'.format(score) return score run()
from django.shortcuts import render from django.http import HttpResponse, Http404, HttpResponseRedirect from .models import Lecture, Question, Tag from django.urls import reverse from django.db import DatabaseError from django.contrib import messages from . import profanity import re # PEP8 OK # 1 View for index page def index(request): return render(request, 'question/index.html', {}) # 2 View for about page def about(request): return render(request, 'question/about.html', {}) # 3 View for make lecture page def make_lecture(request): return render(request, 'question/make_lecture.html', {}) # 4 Action-view for the action create lecture (lecturer) | POST-form in make_lecture.html def submit_lecture(request): lecture_name = request.POST['lecture_name'] if lecture_name == "": return render(request, 'question/make_lecture.html', {'error_message': "Lecture name can not be empty. "}) lecture_owner = request.POST['lecture_owner'] if lecture_owner == "": return render(request, 'question/make_lecture.html', {'error_message': "Your name can not be empty. "}) if not re.fullmatch('[A-Z][A-Za-z. ]+', lecture_owner): return render(request, 'question/make_lecture.html', {'error_message': "Your name can only contain letters, dots and spaces. "}) num_questions = request.POST['num_questions'] if not (num_questions.isdigit() or num_questions == ""): return render(request, 'question/make_lecture.html', {'error_message': "Number of questions must be an integer or empty (default=10). "}) try: if num_questions == "": # Default number of questions to display: if not request.FILES: # Checks if the lecturer uploaded a presentation file (No file:) lecture = Lecture(lecture_name=lecture_name, lecture_owner=lecture_owner) lecture.save() else: # With file: lecture = Lecture(lecture_name=lecture_name, lecture_owner=lecture_owner, presentation=request.FILES['lecture_ppt']) lecture.save() else: # Custom number of questions to display: if not request.FILES: # No file: lecture = Lecture(lecture_name=lecture_name, lecture_owner=lecture_owner, num_questions=num_questions) lecture.save() else: # With file: lecture = Lecture(lecture_name=lecture_name, lecture_owner=lecture_owner, num_questions=num_questions, presentation=request.FILES['lecture_ppt']) lecture.save() tags = request.POST['lecture_tags'] if not tags == "": # With custom tags: tags = tags.strip() tags = tags.split(' ') existing_tags = [] # Checks for duplicate tags for tag in tags: tag = tag.strip(',') # Enables both ', ' and ' ' as tag-splits tag = tag.strip() # Checks for empty tags if not (tag == "" or tag in existing_tags): lecture.tag_set.create(tag_text=tag) existing_tags.append(tag) # Extra tag made for possible questions without a tag (also made if tags=""): lecture.tag_set.create(tag_text='no_tag') except (KeyError, ValueError, DatabaseError): return render(request, 'question/make_lecture.html', {'error_message': "Error: Something went wrong. "}) else: return HttpResponseRedirect(reverse('lecturer_questions', args=(lecture.id,))) # 5 View for lecturer questions page def lecturer_questions(request, lecture_id): try: lecture = Lecture.objects.get(pk=lecture_id) except Lecture.DoesNotExist: return page_not_found(request) # Context-variables sent to lecturer_questions.html: tag_list = lecture.tag_set.exclude(tag_text='no_tag') no_tag = lecture.tag_set.get(tag_text='no_tag') tag_texts = [] tag_votes = [] for tag in tag_list: tag_texts.append(tag.tag_text) tag_votes.append(tag.votes) tag_texts.append("Other") tag_votes.append(no_tag.votes) return render(request, 'question/lecturer_questions.html', {'lecture': lecture, 'tag_texts': tag_texts, 'tag_votes': tag_votes}) # 6 Action-view for the action delete lecture (lecturer) | POST-form in lecturer_questions.html def delete_lecture(request, lecture_id): try: lecture = Lecture.objects.get(pk=lecture_id) except Lecture.DoesNotExist: return render(request, 'question/index.html', {'error_message': "Lecture was already deleted. "}) else: # Deletes the presentations-file if it exists: if lecture.presentation: file = lecture.presentation file.delete() lecture.delete() messages.success(request, "Lecture %s was deleted. " % lecture_id) return HttpResponseRedirect(reverse('index')) # 7 Action-view for the action access lecture (student) | GET-form in index.html def access_lecture(request): lecture_id = request.GET['lecture'] try: lecture = Lecture.objects.get(pk=lecture_id) except (KeyError, Lecture.DoesNotExist, ValueError): return render(request, 'question/index.html', {'error_message': "Lecture %s does not exist. " % lecture_id}) else: return HttpResponseRedirect(reverse('user_questions', args=(lecture.id,))) # 8 View for user question page (student) def user_questions(request, lecture_id): try: lecture = Lecture.objects.get(pk=lecture_id) except Lecture.DoesNotExist: return page_not_found(request) # Context-variables sent to user_questions.html: tag_exists = len(lecture.tag_set.all()) > 1 # Boolean - True if lecturer made custom tags no_tag = lecture.tag_set.get(tag_text='no_tag') return render(request, 'question/user_questions.html', {'lecture': lecture, 'tags': tag_exists, 'no_tag': no_tag}) # 9 Action-view for the action ask question (student) | POST-form in user_questions.html def ask_question(request, lecture_id): try: lecture = Lecture.objects.get(pk=lecture_id) except Lecture.DoesNotExist: return page_not_found(request) question_text = request.POST['question_text'] # Boolean - True if a curse word was written: curse_word = profanity.check(question_text) # Context-variables sent to user_questions.html: tag_exists = len(lecture.tag_set.all()) > 1 # Boolean - True if lecturer made custom tags no_tag = lecture.tag_set.get(tag_text='no_tag') if curse_word is True: return render(request, 'question/user_questions.html', {'lecture': lecture, 'error_message': "Curse words are not allowed! ", 'tags': tag_exists, 'no_tag': no_tag}) if question_text == "" or len(question_text) > 200: return render(request, 'question/user_questions.html', {'lecture': lecture, 'error_message': "Question is too short or too long. (1-200 char) ", 'tags': tag_exists, 'no_tag': no_tag}) try: if 'question_tag' not in request.POST: # No tag chosen by the student selected_tag = lecture.tag_set.get(tag_text='no_tag') else: selected_tag = lecture.tag_set.get(pk=request.POST['question_tag']) # Statistic over tags: selected_tag.votes += 1 question = lecture.question_set.create(question_text=question_text, tag=selected_tag) except (KeyError, ValueError, Tag.DoesNotExist, DatabaseError): return render(request, 'question/user_questions.html', {'lecture': lecture, 'error_message': "Error: Something went wrong. "}) else: selected_tag.save() question.save() return HttpResponseRedirect(reverse('user_questions', args=(lecture.id,))) # 10 Action-view for the action vote question (student) | POST-form in user_submitted.html def vote_question(request, lecture_id, question_id): try: lecture = Lecture.objects.get(pk=lecture_id) question = lecture.question_set.get(pk=question_id) except (Lecture.DoesNotExist, Question.DoesNotExist): return page_not_found(request) question.votes += 1 question.save() # Statistic over tags: question_tag = question.tag question_tag.votes += 1 question_tag.save() return HttpResponseRedirect(reverse('user_questions', args=(lecture.id,))) # 11 Action-view for the action flag question (student) | POST-form in user_submitted.html def flag_question(request, lecture_id, question_id): try: lecture = Lecture.objects.get(pk=lecture_id) question = lecture.question_set.get(pk=question_id) except (Lecture.DoesNotExist, Question.DoesNotExist): return page_not_found(request) question.flags += 1 question.save() question_text = question.question_text if question.flags > 4: question_votes = question.votes question_tag = question.tag question.delete() # Statistic over tags: question_tag.votes -= (question_votes + 1) question_tag.save() messages.success(request, "Flagged", extra_tags=question_text) return HttpResponseRedirect(reverse('user_questions', args=(lecture.id,))) # 12 Action-view for the action delete question (lecturer) | POST-form in lecturer_submitted.html def delete_question(request, lecture_id, question_id): try: lecture = Lecture.objects.get(pk=lecture_id) except Lecture.DoesNotExist: return page_not_found(request) # Context-variables sent to lecturer_questions.html: tag_list = lecture.tag_set.exclude(tag_text='no_tag') no_tag = lecture.tag_set.get(tag_text='no_tag') tag_texts = [] tag_votes = [] for tag in tag_list: tag_texts.append(tag.tag_text) tag_votes.append(tag.votes) tag_texts.append("Other") tag_votes.append(no_tag.votes) try: question = lecture.question_set.get(pk=question_id) except Question.DoesNotExist: return render(request, 'question/lecturer_questions.html', {'lecture': lecture, 'error_message': "Question was already deleted. ", 'tag_texts': tag_texts, 'tag_votes': tag_votes}) else: question_text = question.question_text question_votes = question.votes question_tag = question.tag question.delete() # Statistic over tags: question_tag.votes -= (question_votes + 1) question_tag.save() messages.success(request, "Question '%s' was deleted. " % question_text) return HttpResponseRedirect(reverse('lecturer_questions', args=(lecture.id,))) # SPECIAL VIEWS: # Update 1 | Updates the lecturers question-list | JavaScript in lecturer_questions.html updates lecturer_submitted.html def lecturer_submitted(request, lecture_id): try: lecture = Lecture.objects.get(pk=lecture_id) except Lecture.DoesNotExist: return page_not_found(request) # Context-variables sent to lecturer_submitted.html: # Q-list sorted on most votes, with length num_questions: mostvoted_question_list = \ (lecture.question_set.order_by('-votes', '-pub_date')[:lecture.num_questions]) return render(request, 'question/lecturer_submitted.html', {'lecture': lecture, 'question_list': mostvoted_question_list}) # Update 2 | Updates the students question-list | JavaScript in user_questions.html updates user_submitted.html def user_submitted(request, lecture_id): try: lecture = Lecture.objects.get(pk=lecture_id) except Lecture.DoesNotExist: return page_not_found(request) # Context-variables sent to user_submitted.html: latest_question_list = (lecture.question_set.order_by('-pub_date')) return render(request, 'question/user_submitted.html', {'lecture': lecture, 'question_list': latest_question_list}) # Update 3 | Updates the lecturers tag-statistic | JavaScript in lecturer_questions.html uptades lecturer_statistic.html def lecturer_statistic(request, lecture_id): try: lecture = Lecture.objects.get(pk=lecture_id) except Lecture.DoesNotExist: return page_not_found(request) # Context-variables sent to lecturer_statistic.html: tag_list = lecture.tag_set.exclude(tag_text='no_tag') no_tag = lecture.tag_set.get(tag_text='no_tag') tag_texts = [] tag_votes = [] for tag in tag_list: tag_texts.append(tag.tag_text) tag_votes.append(tag.votes) tag_texts.append("Other") tag_votes.append(no_tag.votes) return render(request, 'question/lecturer_statistic.html', {'lecture': lecture, 'tag_texts': tag_texts, 'tag_votes': tag_votes}) # View for page not found | Handles all wrong urls (error 404) def page_not_found(request): response = render(request, 'question/404.html', {'request_path': request.path}) response.status_code = 404 return response
import json path=r"C:\Users\土豆\Desktop\数据驱动读取json.json" m=open(path,"r") a=m.read() lis=json.loads("a")
from matplotlib import pyplot as plt def plot(history, from_epoch = 0): try: acc = history.history['acc'][from_epoch:] val_acc = history.history['val_acc'][from_epoch:] # summarize history for accuracy plt.plot(acc) plt.plot(val_acc) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() except: print('There is no accuracy log in history variable') try: loss = history.history['loss'][from_epoch:] val_loss = history.history['val_loss'][from_epoch:] # summarize history for loss plt.plot(loss) plt.plot(val_loss) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() except: print('There is no loss log in history variable')
""" Script to read the root files from positron simulation and saved their hittime distributions to txt file. These hittime distributions can then be analyzed further with pulse_shape_analysis_v1.py as reference to the prompt signal of IBD-like NC events (to compare hittime distributions of positrons and NC events). Procedure to get the hittime distribution with vertex reconstruction and time smearing of PMTs: 1. apply same cuts like on prompt signals of NC events: 1.1 energy cut on prompt signal: only positrons with energy from 10 MeV to 100 MeV (uniformly distributed) are simulated -> energy cut is applied automatically 1.2 volume cut: must be same like for NC events (take initial position of initial particle -> smear it with vertex resolution with function position_smearing()) 2. calculate time of flight: 2.1 for every photon, that hits a PMT (20inch and 3inch), take the PMT position (via PMT ID from file PMT_position.root) and calculate the time-of-flight distance with the reconstructed position from above. 2.2 with the time-of-flight distance, calculate the time-of-flight of this photon from production to PMT by considering an effective speed of light in the LS. 3. consider TTS of PMTs: 3.1 for every photon, that hits a PMT (20inch and 3inch), take the time resolution (sigma) of the PMT (via PMT ID either from file PmtData.root for the 20inch PMTs or set TTS = 5 ns for 3inch PMTs.) 3.2 the TTS of the PMT is FWHM. Therefore calculate sigma from TTS (FWHM = 2*sqrt(2*ln(2)) * sigma). 3.3 smear hittime of detsim with gaussian of sigma (time resolution) around the value of detsim hittime to get the smeared hittime 4. for every photon, calculate the 'real' hittime (= smeared hittime - time_of_flight) and store it in array 5. Do points 2. to 4. for every photon. Then you get the correct hittime of this event. Build histogram with correct hittimes and save histogram value in txt file and display histogram in png file """ import datetime import ROOT import sys import NC_background_functions import numpy as np from matplotlib import pyplot as plt def get_hittime_from_rootfile_fixenergy(input_path, output_path, first_file, last_file, num_evts, kin_energy, min_t, max_t, t_limit, bin_width, radius, now): """ function to read events of the root files of positron simulation and save hittime distribution to png and txt file. function is used for positron with fixed kinetic energy. IMPORTANT: time of flight correction is NOT correct!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! :param input_path: path, where root-files are saved :param output_path: path, where png and txt files of hittime distribution are saved :param first_file: number of the first file to read :param last_file: number of the last file to read :param num_evts: number of events per file :param kin_energy: kinetic energy of positron in MeV :param min_t: minimum of time window of whole signal in ns :param max_t: maximum of time window of whole signal in ns :param t_limit: time in ns, where prompt signal should be 0 :param bin_width: bin-width of hittime-distribution in ns :param radius: cut radius in mm :param now: actual time :return: """ # preallocate array, where total nPE of prompt signal per event of all files is stored: num_pe_total = np.array([]) # number of events that are analyzed (pass volume cut): number_analyzed = 0 # loop over root files with positron simulation: for index in range(first_file, last_file + 1, 1): # load user_positron_{}.root file: rfile = ROOT.TFile(input_path + "user_positron_{0:d}_MeV_{1:d}.root".format(kin_energy, index)) print("... read {0}...".format(rfile)) # get the "evt"-TTree from the TFile: rtree_evt = rfile.Get("evt") # get geninfo tree from TFile: rtree_geninfo = rfile.Get("geninfo") # get the number of events in the 'evt' Tree: num_events_evt = rtree_evt.GetEntries() # check number of events: if num_events_evt != num_evts: sys.exit("ERROR: number of events in root file ({0:d}) != {1:d}" .format(num_events_evt, num_evts)) # loop over the events: for event in range(num_evts): # check volume cut: rtree_geninfo.GetEntry(event) # get number of initial particles: n_init_particles = int(rtree_geninfo.GetBranch('nInitParticles').GetLeaf('nInitParticles').GetValue()) if n_init_particles != 1: sys.exit("ERROR: more than 1 initial particles in event {0:d}".format(event)) # get initial x, y, z position: x_init = float(rtree_geninfo.GetBranch('InitX').GetLeaf('InitX').GetValue()) y_init = float(rtree_geninfo.GetBranch('InitY').GetLeaf('InitY').GetValue()) z_init = float(rtree_geninfo.GetBranch('InitZ').GetLeaf('InitZ').GetValue()) # calculate distance to center: r_init = np.sqrt(x_init**2 + y_init**2 + z_init**2) if r_init >= radius: print("file {0:d}, event = {1:d}: r_init = {2:0.2f} mm".format(index, event, r_init)) continue # get event of 'evt'-tree: rtree_evt.GetEntry(event) # get evtID of the tree and compare with event: evt_id = int(rtree_evt.GetBranch('evtID').GetLeaf('evtID').GetValue()) if evt_id != event: sys.exit("ERROR: evtID of tree ({0:d}) != {1:d}".format(evt_id, event)) print("\nanalyze event {0:d}".format(evt_id)) # increment number_analyzed: number_analyzed += 1 # get number of photons of this event: n_photons = int(rtree_evt.GetBranch('nPhotons').GetLeaf('nPhotons').GetValue()) # preallocate empty array to build default hittime-histogram: hittime_array = [] # loop over every photon in the event: for index1 in range(n_photons): # get PMT ID, where photon is absorbed: pmt_id = int(rtree_evt.GetBranch('pmtID').GetLeaf('pmtID').GetValue(index1)) # only 20 inch PMTs (PMT ID of 20 inch PMTs are below 21000, PMT ID of 3 inch PMTs start at 290000): if pmt_id < 25000: # get nPE for this photon: n_pe = int(rtree_evt.GetBranch('nPE').GetLeaf('nPE').GetValue(index1)) # check, if photon produces only 1 PE: if n_pe != 1: print("{1:d} PE for 1 photon in event {0:d} in file user_positron_{3:d}_MeV_{2:d}.root" .format(evt_id, n_pe, index, kin_energy)) # get hittime of this photon: hit_time = float(rtree_evt.GetBranch('hitTime').GetLeaf('hitTime').GetValue(index1)) # append hittime to array: hittime_array.append(hit_time) else: continue """ analyze prompt signal: """ # build histogram, where hittimes are saved: # set bin-edges of hittime histogram in ns: bins_hittime = np.arange(min_t, max_t + 2 * bin_width, bin_width) # build hittime histogram: npe_per_hittime, bin_edges_hittime = np.histogram(hittime_array, bins_hittime) # get index of bins_hittime corresponding to min_time (should be index = 0): index_min_hittime_prompt = int(min_t / bin_width) # Where does prompt signal end? # get index of bins_hittime corresponding to t_limit: index_time_limit_prompt = int(t_limit / bin_width) # check if npe_per_hittime is 0 for this index: if npe_per_hittime[index_time_limit_prompt] == 0: # prompt signal already 0: index_max_hittime_prompt = index_time_limit_prompt else: # prompt signal not yet 0. # loop over npe_per_hittime from index_time_limit_prompt until npe_per_hittime is 0: for index2 in range(index_time_limit_prompt, index_time_limit_prompt+200): if npe_per_hittime[index2] == 0: index_max_hittime_prompt = index2 break # calculate nPE as function of hittime only for prompt time window (from min_hittime_prompt to # max_hittime_prompt+1 (last index should be included)): npe_per_hittime_prompt = npe_per_hittime[index_min_hittime_prompt:index_max_hittime_prompt+1] # bin edges of hittime histogram only for prompt time window: bins_hittime_prompt = bin_edges_hittime[index_min_hittime_prompt:index_max_hittime_prompt+1] # get the minimum and maximum time of the prompt signal time window in ns: min_time_prompt = bins_hittime_prompt[0] max_time_prompt = bins_hittime_prompt[-1] # sum up the values of npe_per_hittime_prompt to get the total number of pe of the prompt signal: number_pe_prompt = np.sum(npe_per_hittime_prompt) # append number of pe to array: num_pe_total = np.append(num_pe_total, number_pe_prompt) h1 = plt.figure(1) plt.step(bins_hittime_prompt, npe_per_hittime_prompt, label="number of pe = {0:d}".format(number_pe_prompt)) plt.xlabel("hit-time in ns") plt.ylabel("number of p.e. per bin (bin-width = {0:0.2f} ns)".format(bin_width)) plt.title("Hit-time distribution of prompt time window of event {0:d}".format(evt_id)) plt.xlim(xmin=min_time_prompt, xmax=max_time_prompt) plt.legend() plt.grid() plt.savefig(output_path + "file{1:d}_evt{0:d}_positron_{2:d}_MeV.png".format(evt_id, index, kin_energy)) plt.close() # plt.show() # save npe_per_hittime_prompt to txt file: # build list, where 0th entry is start-hittime in ns, 1st entry is last-hittime in ns, 2nd entry is binwidth # in ns and the following entries are nPE of each hittime-bin of prompt signal: npe_per_hittime_prompt_save = [min_time_prompt, max_time_prompt, bin_width] npe_per_hittime_prompt_save.extend(npe_per_hittime_prompt) np.savetxt(output_path + "file{0:d}_evt{1:d}_positron_{2:d}_MeV.txt".format(index, evt_id, kin_energy), npe_per_hittime_prompt_save, fmt='%1.2f', header="Number of pe as function of the hittime of the prompt positron signal of file " "user_positron_{6:d}_MeV_{0:d}.root," "\nevent = {1:d}, (analyzed with hittime_distribution_positron.py, {2}):" "\ntime window of hittime: from {3:.3f} ns to {4:.3f} ns with bin-width = {5:0.3f} ns:" .format(index, evt_id, now, min_time_prompt, max_time_prompt, bin_width, kin_energy)) return num_pe_total, number_analyzed # get the date and time, when the script was run: date = datetime.datetime.now() NOW = date.strftime("%Y-%m-%d %H:%M") """ define time window and bin width: """ # set time window of whole signal in ns: min_time = -50 max_time = 1000000 # set time in ns, where the prompt signal should be 0: time_limit_prompt = 500 # Set bin-width of hittime histogram in ns: binwidth = 5.0 """ set parameter for volume cut (must be the same like for NC events): """ # cut-radius in mm: radius_cut = 16000 # path, where root files of positron simulation are saved: input_path_positron = "/local/scratch1/pipc51/astro/blum/positron_output/" # path, where hittime distributions (png and txt) are saved: output_path_positron = "/home/astro/blum/juno/atmoNC/data_NC/output_PSD/positron_hittime/" """ analyze positron hittime distribution for kinetic energy from 10 MeV to 100 MeV (uniformly distributed): """ # first file of positron simulation: first_file_positron = 0 # last file of positron simulation: last_file_positron = 99 # number of events per file: number_evts_per_file = 100 # total number of positron events: number_evts_total = (last_file_positron - first_file_positron + 1) * number_evts_per_file # preallocate number of events that are analyzed (pass volume cut): number_analyzed = 0 """ load position of the PMTs and corresponding PMT ID from file PMT_position.root: """ file_PMT_position = "/home/astro/blum/juno/atmoNC/PMT_information/PMT_position.root" # array with PMT ID and corresponding x, y, z position in mm: pmtID_pos_file, x_pos_pmt, y_pos_pmt, z_pos_pmt = NC_background_functions.get_pmt_position(file_PMT_position) """ load 'time resolution' in ns of the 20 inch PMTs and corresponding PMT ID from file PmtData.root: """ file_PMT_time = "/home/astro/blum/juno/atmoNC/PMT_information/PmtData.root" # array with PMT ID and corresponding sigma in ns: pmtID_time_file, sigma_time_20inch = NC_background_functions.get_20inchpmt_tts(file_PMT_time) # set TTS (FWHM) of the 3inch PMTs in ns: tts_3inch = 5.0 # calculate time resolution (sigma) for the 3inch PMTs in ns: sigma_time_3inch = tts_3inch / (2 * np.sqrt(2 * np.log(2))) # set effective speed of light in the liquid scintillator in mm/ns (see page 7 of c_effective_JUNO-doc-3144-v2.pdf in # folder /home/astro/blum/PhD/paper/Pulse_Shape_Discrimination/). Effective refraction index in LS n_eff = 1.54. # c/n_eff = 299792458 m / 1.54 s ~ 194670427 m/s = 194670427 * 10**(-6) mm/ns ~ 194.67 mm/ns: c_effective = 194.67 # loop over root files with positron simulation: for index in range(first_file_positron, last_file_positron + 1, 1): # load user_positron_{}.root file: rfile = ROOT.TFile(input_path_positron + "user_positron_{0:d}.root".format(index)) print("... read {0}...".format(rfile)) # get the "evt"-TTree from the TFile: rtree_evt = rfile.Get("evt") # get geninfo tree from TFile: rtree_geninfo = rfile.Get("geninfo") # get prmtrkdep tree from TFile: rtree_prmtrkdep = rfile.Get("prmtrkdep") # get the number of events in the 'evt' Tree: num_events_evt = rtree_evt.GetEntries() # check number of events: if num_events_evt != number_evts_per_file: sys.exit("ERROR: number of events in root file ({0:d}) != {1:d}" .format(num_events_evt, number_evts_per_file)) # loop over the events: for event in range(num_events_evt): # print("\nanalyze event {0:d}".format(evt_id)) """ check volume cut: """ # get current event in prmtrkdep tree: rtree_prmtrkdep.GetEntry(event) # get number of initial particles: n_init_part = int(rtree_prmtrkdep.GetBranch('nInitParticles').GetLeaf('nInitParticles').GetValue()) if n_init_part != 1: # check if there is just one initial positron: sys.exit("ERROR: more than 1 initial particles in event {0:d}".format(event)) # get quenched deposited energy of the initial particle in MeV: qedep_prmtrkdep = float(rtree_prmtrkdep.GetBranch("Qedep").GetLeaf("Qedep").GetValue()) # get current event in geninfo tree: rtree_geninfo.GetEntry(event) # get number of initial particles: n_init_particles = int(rtree_geninfo.GetBranch('nInitParticles').GetLeaf('nInitParticles').GetValue()) if n_init_particles != 1: # check if there is just one initial positron: sys.exit("ERROR: more than 1 initial particles in event {0:d}".format(event)) # get initial x, y, z position: x_init = float(rtree_geninfo.GetBranch('InitX').GetLeaf('InitX').GetValue()) y_init = float(rtree_geninfo.GetBranch('InitY').GetLeaf('InitY').GetValue()) z_init = float(rtree_geninfo.GetBranch('InitZ').GetLeaf('InitZ').GetValue()) # do vertex reconstruction with function position_smearing(): # Smear x,y and z position of the initial position (returns reconstructed position in mm): x_reconstructed = NC_background_functions.position_smearing(x_init, qedep_prmtrkdep) y_reconstructed = NC_background_functions.position_smearing(y_init, qedep_prmtrkdep) z_reconstructed = NC_background_functions.position_smearing(z_init, qedep_prmtrkdep) # calculate distance to detector center in mm: r_reconstructed = np.sqrt(x_reconstructed**2 + y_reconstructed**2 + z_reconstructed**2) # check if event passes the volume cut: if r_reconstructed >= radius_cut: # event is rejected by volume cut. print("file {0:d}, event = {1:d}: r_init = {2:0.2f} mm".format(index, event, r_reconstructed)) # go to next event continue else: # event passes volume cut. increment number_analyzed: number_analyzed += 1 """ calculate the real hittime distribution (time of flight correction with reconstructed position and time smearing with TTS for each hit): """ # get event of 'evt'-tree: rtree_evt.GetEntry(event) # get evtID of the tree and compare with event: evt_id = int(rtree_evt.GetBranch('evtID').GetLeaf('evtID').GetValue()) if evt_id != event: sys.exit("ERROR: evtID of tree ({0:d}) != {1:d}".format(evt_id, event)) # get number of photons of this event: n_photons = int(rtree_evt.GetBranch('nPhotons').GetLeaf('nPhotons').GetValue()) # preallocate list, where corrected (real) hittimes are saved: hittime_array = [] # loop over every photon in the event: for index1 in range(n_photons): # get number of pe per photon and check if it is equal to 1: npe = int(rtree_evt.GetBranch('nPE').GetLeaf('nPE').GetValue(index1)) if npe != 1: sys.exit("ERROR: more than one p.e. per photon in event {0:d}, file {1}".format(event, index)) # get the pmtID of the hit PMT: pmtID = int(rtree_evt.GetBranch('pmtID').GetLeaf('pmtID').GetValue(index1)) """ time of flight correction: """ # get hittime of PMT from tree in ns: hittime = float(rtree_evt.GetBranch('hitTime').GetLeaf('hitTime').GetValue(index1)) # get position of the PMT with specific pmtID (pmtID is ascending number from 0 to 17738 (17739 large PMTs) # and from 300000 to 336571 (36572 small PMTs)). # For large PMTs -> For 20inch PMTs, the pmtID is equal to index of x,y,z_pos_pmt array. # For small PMTs -> For 3inch PMTs, the pmtID - (300000 - 17739) is equal to index of x,y,z_pos_pmt array. # check if PMT is 20 inch or 3inch (pmtID < 50000 means 20inch PMT): if pmtID < 50000: # 20inch PMT: # get PMT position in mm from arrays: x_pmt = x_pos_pmt[pmtID] y_pmt = y_pos_pmt[pmtID] z_pmt = z_pos_pmt[pmtID] else: # 3inch PMT: # calculate index of pos_pmt array that correspond to pmtID of 3inch PMTs (for example: # first small PMT: 300000-282261 = 17739, last small PMT: 336571-282261 = 54310) index_3inch = pmtID - 282261 # get PMT position in mm from arrays: x_pmt = x_pos_pmt[index_3inch] y_pmt = y_pos_pmt[index_3inch] z_pmt = z_pos_pmt[index_3inch] # calculate distance between reconstructed position of event and position of PMT (in mm): distance_tof = np.sqrt((x_reconstructed - x_pmt)**2 + (y_reconstructed - y_pmt)**2 + (z_reconstructed - z_pmt)**2) # calculate time of flight in ns: time_of_flight = distance_tof / c_effective """ time resolution of PMT: """ # get time resolution of PMT with specific pmtID (pmtID is ascending number from 0 to 17738 (17739 large # PMTs)) -> For 20inch PMTs, the pmtID is equal to index of sigma_time_20inch array. # check if PMT is 20 inch or 3inch (pmtID < 50000 means 20inch PMT): if pmtID < 50000: # 20inch PMT: # get time resolution (sigma) of PMT in ns from array: sigma_pmt = sigma_time_20inch[pmtID] else: # 3inch PMT: sigma_pmt = sigma_time_3inch # consider time resolution of PMT by generating normal distributed random number with mu = hittime and # sigma = sigma_pmt (only the hittime at the PMT must be smeared, not the time-of-flight): hittime_tts = np.random.normal(hittime, sigma_pmt) """ calculate the 'real' hittime of the photon in ns: """ hittime_real = hittime_tts - time_of_flight if hittime_real < min_time: print("------") print(hittime_real) print(pmtID) print(sigma_pmt) # append real hittime to array: hittime_array.append(hittime_real) """ analyze prompt signal: """ # build histogram, where hittimes are saved: # set bin-edges of hittime histogram in ns: bins_hittime = np.arange(min_time, max_time + 2 * binwidth, binwidth) # build hittime histogram: npe_per_hittime, bin_edges_hittime = np.histogram(hittime_array, bins_hittime) # get index of bins_hittime corresponding to min_time (should be index = 0): index_min_hittime_prompt = 0 # Where does prompt signal end? # get index of bins_hittime corresponding to t_limit: index_time_limit_prompt = int((time_limit_prompt + np.abs(min_time)) / binwidth) # check if npe_per_hittime (and following two bins) are 0 for this index: if (npe_per_hittime[index_time_limit_prompt] == npe_per_hittime[index_time_limit_prompt+1] == npe_per_hittime[index_time_limit_prompt+2] == 0): # prompt signal already 0: index_max_hittime_prompt = index_time_limit_prompt else: # prompt signal not yet 0. # loop over npe_per_hittime from index_time_limit_prompt until npe_per_hittime (and following two bins) # are 0: for index2 in range(index_time_limit_prompt, index_time_limit_prompt + 200): if npe_per_hittime[index2] == npe_per_hittime[index2+1] == npe_per_hittime[index2+2] == 0: index_max_hittime_prompt = index2 break # calculate nPE as function of hittime only for prompt time window (from min_hittime_prompt to # max_hittime_prompt+1 (last index should be included)): npe_per_hittime_prompt = npe_per_hittime[index_min_hittime_prompt:index_max_hittime_prompt + 1] # bin edges of hittime histogram only for prompt time window: bins_hittime_prompt = bin_edges_hittime[index_min_hittime_prompt:index_max_hittime_prompt + 1] # get the minimum and maximum time of the prompt signal time window in ns: min_time_prompt = bins_hittime_prompt[0] max_time_prompt = bins_hittime_prompt[-1] # sum up the values of npe_per_hittime_prompt to get the total number of pe of the prompt signal: number_pe_prompt = np.sum(npe_per_hittime_prompt) """ save hittime distribution in png file """ h1 = plt.figure(1) plt.step(bins_hittime_prompt, npe_per_hittime_prompt, label="number of pe = {0:d}".format(number_pe_prompt)) plt.xlabel("hit-time in ns") plt.ylabel("number of p.e. per bin (bin-width = {0:0.2f} ns)".format(binwidth)) plt.title("Hit-time distribution of prompt time window of event {0:d}".format(evt_id)) plt.xlim(xmin=min_time_prompt, xmax=max_time_prompt) plt.legend() plt.grid() plt.savefig(output_path_positron + "file{1:d}_evt{0:d}_positron.png".format(evt_id, index)) plt.close() # plt.show() # save npe_per_hittime_prompt to txt file: # build list, where 0th entry is start-hittime in ns, 1st entry is last-hittime in ns, 2nd entry is binwidth # in ns and the following entries are nPE of each hittime-bin of prompt signal: npe_per_hittime_prompt_save = [min_time_prompt, max_time_prompt, binwidth] npe_per_hittime_prompt_save.extend(npe_per_hittime_prompt) np.savetxt(output_path_positron + "file{0:d}_evt{1:d}_positron.txt".format(index, evt_id), npe_per_hittime_prompt_save, fmt='%1.2f', header="Number of pe as function of the corrected hittime (time-of-flight correction and TTS " "smearing) of the prompt positron signal of file " "user_positron_{0:d}.root," "\nevent = {1:d}, (analyzed with hittime_distribution_positron.py, {2}):" "\ntime window of hittime: from {3:.3f} ns to {4:.3f} ns with bin-width = {5:0.3f} ns:" .format(index, evt_id, NOW, min_time_prompt, max_time_prompt, binwidth)) print("total number of events = {0:d}".format(number_evts_total)) print("number of analyzed events = {0:d}".format(number_analyzed)) """ analyze 10 MeV positron hittime distributions """ # # event per root file: # number_evts_positron = 10 # # # first file of positron simulation for 10 MeV: # first_file_10 = 0 # # last file of positron simulation for 10 MeV: # last_file_10 = 99 # # kinetic energy of positrons in MeV: # energy_positron_10 = 10 # # total number of positron events with 10 MeV: # number_evts_total_10 = (last_file_10 - first_file_10 + 1) * number_evts_positron # # # get hittime distribution (return values: number of pe of each event, number of events that are analyzed): # number_pe_10, number_analyzed_10 = get_hittime_from_rootfile_fixenergy(input_path_positron, output_path_positron, # first_file_10, last_file_10, # number_evts_positron, energy_positron_10, # min_time, max_time, time_limit_prompt, # binwidth, radius_cut, NOW) # # # check the distribution of total nPE for all events with positrons of 10 MeV kinetic energy: # # calculate mean of number of PE: # mean_10_MeV = np.mean(number_pe_10) # # display number_pe_10 in histogram: # h1 = plt.figure(1, figsize=(15, 8)) # n_PE_10MeV, bins_10MeV, patches1 = plt.hist(number_pe_10, align='mid', bins=100, # label="{0:d} positrons with kinetic energy = {1:d} MeV" # .format(number_evts_total_10, energy_positron_10)) # plt.vlines(mean_10_MeV, ymin=0, ymax=max(n_PE_10MeV), label="mean = {0:0.2f} nPE".format(mean_10_MeV)) # plt.xlabel("number of PE (per positron)", fontsize=13) # plt.ylabel("entries per bin", fontsize=13) # plt.title("Number of PE of 10 MeV positrons", fontsize=18) # plt.legend() # plt.grid() # plt.savefig(output_path_positron + "hist_nPE_10_MeV.png") # plt.close() """ analyze 100 MeV positron hittime distributions """ # # event per root file: # number_evts_positron = 10 # # # first file of positron simulation for 100 MeV: # first_file_100 = 0 # # last file of positron simulation for 100 MeV: # last_file_100 = 99 # # kinetic energy of positrons in MeV: # energy_positron_100 = 100 # # total number of positron events with 10 MeV: # number_evts_total_100 = (last_file_100 - first_file_100 + 1) * number_evts_positron # # number_pe_100, number_analyzed_100 = get_hittime_from_rootfile_fixenergy(input_path_positron, output_path_positron, # first_file_100, last_file_100, # number_evts_positron, energy_positron_100, # min_time, max_time, time_limit_prompt, # binwidth, radius_cut, NOW) # # # check the distribution of total nPE for all events with positrons of 100 MeV kinetic energy: # # calculate mean of number of PE: # mean_100_MeV = np.mean(number_pe_100) # # display number_pe_100 in histogram: # h2 = plt.figure(2, figsize=(15, 8)) # n_PE_100MeV, bins_100MeV, patches2 = plt.hist(number_pe_100, align='mid', bins=100, # label="{0:d} positrons with kinetic energy = {1:d} MeV" # .format(number_evts_total_100, energy_positron_100)) # plt.vlines(mean_100_MeV, ymin=0, ymax=max(n_PE_100MeV), label="mean = {0:0.2f} nPE".format(mean_100_MeV)) # plt.xlabel("number of PE (per positron)", fontsize=13) # plt.ylabel("entries per bin", fontsize=13) # plt.title("Number of PE of 100 MeV positrons", fontsize=18) # plt.legend() # plt.grid() # plt.savefig(output_path_positron + "hist_nPE_100_MeV.png") # plt.close()
inputs = [1.2,5.1,2.1] weights = [3.1,2.1,8.7] bias =3 output = inputs[0]*weights[0] + inputs[1]*weights[1] + inputs[2]*weights[2] +bias print(output)
# -*- coding: utf-8 -*- import itertools class Solution: def combine(self, n, k): return [list(el) for el in itertools.combinations(range(1, n + 1), k)] if __name__ == "__main__": solution = Solution() assert [ [1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4], ] == solution.combine(4, 2)
class Dispatcher(object): def __init__(self, handlers=[]): self.handlers = handlers def handle_request(self, request): for handle in self.handlers: request = handle(request) return request def function_1(in_string): print(in_string) return "".join([x for x in in_string if x != "1"]) def function_2(in_string): print(in_string) return "".join([x for x in in_string if x != "2"]) def function_3(in_string): print(in_string) return "".join([x for x in in_string if x != "3"]) def function_4(in_string): print(in_string) return "".join([x for x in in_string if x != "4"]) def main(request): dispatcher = Dispatcher([function_1, function_2, function_3, function_4]) dispatcher.handle_request(request) if __name__ == '__main__': main("1221345439")
from dataclasses import * @dataclass class TelephonBook: name: str mail: str tel: str remark: str member: str def load(new): address = [] with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\08\20k1026-07-address.txt", encoding="UTF8") as file: for line in file: info = line.rstrip("\n") # 改行を消す data = info.split(",") # リストにする address.append(data) print(address) # 追加前 new = [new.name, new.mail, new.tel, new.remark, new.member] address.append(new) print(address) # 追加後 return address def save(address): # with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\08\20k1026-07-sample.txt", encoding="UTF8", mode="a") as file: with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\08\20k1026-07-address.txt", encoding="UTF8", mode="w") as file: for x in address: file.write(f"{x[0]},{x[1]},{x[2]},{x[3]},{x[4]}\n") def add(): name = input("名前:") mail = input("メールアドレス:") tel = input("電話番号:") remark = input("誕生日:") member = input("サークル:") new_address = TelephonBook(name, mail, tel, remark, member) return new_address new_address = add() address = load(new_address) save(address) #
from treadmill.infra.setup import base_provision from treadmill.infra import configuration, constants, exceptions, connection from treadmill.api import ipa class LDAP(base_provision.BaseProvision): def setup( self, image, count, key, cidr_block, tm_release, instance_type, app_root, ipa_admin_password, proid, subnet_name, ): ipa_server_hostname, = self.hostnames_for( roles=[constants.ROLES['IPA']] ) if not ipa_server_hostname: raise exceptions.IPAServerNotFound() _ldap_hostnames = self._hostname_cluster(count=count) _ipa = ipa.API() for _idx in _ldap_hostnames.keys(): _ldap_h = _ldap_hostnames[_idx] otp = _ipa.add_host(hostname=_ldap_h) _ipa.service_add('ldap', _ldap_h, { 'domain': connection.Connection.context.domain, 'hostname': _ldap_h, }) self.name = _ldap_h self.configuration = configuration.LDAP( tm_release=tm_release, app_root=app_root, hostname=_ldap_h, ipa_admin_password=ipa_admin_password, ipa_server_hostname=ipa_server_hostname, otp=otp, proid=proid ) super().setup( image=image, count=count, cidr_block=cidr_block, key=key, instance_type=instance_type, subnet_name=subnet_name, sg_names=[constants.COMMON_SEC_GRP], )
from django.db import models from accounts.models import User # Create your models here. class Order(models.Model): username = models.CharField(max_length=200,blank=True,null=True) order_id = models.CharField(max_length=200,blank=True,null=True) address = models.CharField(max_length=200,blank=True,null=True) total = models.CharField(max_length=200,blank=True,null=True) contact = models.CharField(max_length=200,blank=True,null=True) email = models.EmailField(max_length=100,blank=True,null=True) paid = models.BooleanField(default=False) details = models.TextField(max_length=2000,null=True,blank=True) def __str__(self): if self.username is not None: return "Username : " + self.username + " Order ID: " + str(self.order_id) + " Email : " + self.email + " Contact : " + self.contact return "Order ID: " + str(self.order_id) + "Email : " + self.email + "Contact : " + self.contact class Order_Count(models.Model): email = models.EmailField(max_length=100,blank=True,null=True) Ordercount = models.IntegerField(default=0,null=True,blank=True) def __str__(self): return str(self.email)
# This file is part of beets. # Copyright 2016, Blemjhoo Tezoulbr <baobab@heresiarch.info>. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """ Clears tag fields in media files.""" import re from beets.plugins import BeetsPlugin from mediafile import MediaFile from beets.importer import action from beets.ui import Subcommand, decargs, input_yn import confuse __author__ = 'baobab@heresiarch.info' class ZeroPlugin(BeetsPlugin): def __init__(self): super().__init__() self.register_listener('write', self.write_event) self.register_listener('import_task_choice', self.import_task_choice_event) self.config.add({ 'auto': True, 'fields': [], 'keep_fields': [], 'update_database': False, }) self.fields_to_progs = {} self.warned = False """Read the bulk of the config into `self.fields_to_progs`. After construction, `fields_to_progs` contains all the fields that should be zeroed as keys and maps each of those to a list of compiled regexes (progs) as values. A field is zeroed if its value matches one of the associated progs. If progs is empty, then the associated field is always zeroed. """ if self.config['fields'] and self.config['keep_fields']: self._log.warning( 'cannot blacklist and whitelist at the same time' ) # Blacklist mode. elif self.config['fields']: for field in self.config['fields'].as_str_seq(): self._set_pattern(field) # Whitelist mode. elif self.config['keep_fields']: for field in MediaFile.fields(): if (field not in self.config['keep_fields'].as_str_seq() and # These fields should always be preserved. field not in ('id', 'path', 'album_id')): self._set_pattern(field) def commands(self): zero_command = Subcommand('zero', help='set fields to null') def zero_fields(lib, opts, args): if not decargs(args) and not input_yn( "Remove fields for all items? (Y/n)", True): return for item in lib.items(decargs(args)): self.process_item(item) zero_command.func = zero_fields return [zero_command] def _set_pattern(self, field): """Populate `self.fields_to_progs` for a given field. Do some sanity checks then compile the regexes. """ if field not in MediaFile.fields(): self._log.error('invalid field: {0}', field) elif field in ('id', 'path', 'album_id'): self._log.warning('field \'{0}\' ignored, zeroing ' 'it would be dangerous', field) else: try: for pattern in self.config[field].as_str_seq(): prog = re.compile(pattern, re.IGNORECASE) self.fields_to_progs.setdefault(field, []).append(prog) except confuse.NotFoundError: # Matches everything self.fields_to_progs[field] = [] def import_task_choice_event(self, session, task): if task.choice_flag == action.ASIS and not self.warned: self._log.warning('cannot zero in \"as-is\" mode') self.warned = True # TODO request write in as-is mode def write_event(self, item, path, tags): if self.config['auto']: self.set_fields(item, tags) def set_fields(self, item, tags): """Set values in `tags` to `None` if the field is in `self.fields_to_progs` and any of the corresponding `progs` matches the field value. Also update the `item` itself if `update_database` is set in the config. """ fields_set = False if not self.fields_to_progs: self._log.warning('no fields, nothing to do') return False for field, progs in self.fields_to_progs.items(): if field in tags: value = tags[field] match = _match_progs(tags[field], progs) else: value = '' match = not progs if match: fields_set = True self._log.debug('{0}: {1} -> None', field, value) tags[field] = None if self.config['update_database']: item[field] = None return fields_set def process_item(self, item): tags = dict(item) if self.set_fields(item, tags): item.write(tags=tags) if self.config['update_database']: item.store(fields=tags) def _match_progs(value, progs): """Check if `value` (as string) is matching any of the compiled regexes in the `progs` list. """ if not progs: return True for prog in progs: if prog.search(str(value)): return True return False
import logging from django.urls import reverse from django.contrib import admin from django.db.models import Model from django.db.models import CASCADE from django.db.models import PROTECT from django.db.models import Sum, Max from django.db.models import CharField from django.db.models import TextField from django.db.models import ForeignKey from django.db.models import DecimalField from django.db.models import DateTimeField from django.db.models import PositiveIntegerField from django.core.validators import validate_comma_separated_integer_list from .dish import Dish class Map(Model): name = CharField(verbose_name='название', max_length=250) source = CharField(verbose_name='источник', blank=True, max_length=250, null=True) approved = ForeignKey('People', verbose_name='утверждено', null=False, blank=True, default=None, related_name='map_approved', on_delete=PROTECT) agreed = ForeignKey('People', verbose_name='согласовано', null=False, blank=True, default=None, related_name='map_agreed', on_delete=PROTECT) technology = TextField(verbose_name='технология приготовления') created_at = DateTimeField(verbose_name='создано', auto_now_add=True) batch_output = CharField(verbose_name='выход порции', validators=[ validate_comma_separated_integer_list], max_length=255, blank=True) unit = ForeignKey('Unit', verbose_name='ед. изм', null=True, blank=True, default=None, on_delete=PROTECT) def __str__(self): return 'ТЕХНОЛОГИЧЕСКАЯ КАРТА:{}'.format(self.name) @property def netto(self): return self.items.aggregate(sum=Sum('netto'))['sum'] @property def brutto(self): return self.items.aggregate(sum=Sum('brutto'))['sum'] def save(self, *args, **kwargs): self.name = self.name.capitalize() super(Map, self).save(*args, **kwargs) if not self.dish.exists(): dish = Dish(tech_map=self) else: dish = self.dish.all()[0] dish.name = self.name dish.out = self.batch_output dish.unit = self.unit dish.save() def copy(self): new_map = self new_map.pk = None new_map.name += '_copy' new_map.save() for old_map_item in self.items.all(): logging.error(old_map_item) new_map_item = MapItems(map_doc_id=new_map.pk, product=old_map_item.product, brutto=old_map_item.brutto, netto=old_map_item.netto) new_map_item.save() logging.error(new_map_item) def get_absolute_url(self): return reverse('map-update', kwargs={'pk': self.pk}) class Meta: app_label = 'calculation' verbose_name = 'Технологическая карта' verbose_name_plural = 'Технологические карты' ordering = ['name'] class MapItems(Model): map_doc = ForeignKey(Map, verbose_name='документ', null=False, blank=True, default=None, on_delete=CASCADE, related_name='items') position = PositiveIntegerField(verbose_name='№', editable=False, db_index=True) product = ForeignKey('Dish', verbose_name='сырье', null=False, blank=True, default=None, on_delete=PROTECT) brutto = DecimalField(max_digits=15, decimal_places=3, default=0, verbose_name='брутто') netto = DecimalField(max_digits=15, decimal_places=3, blank=True, null=True, default=0, verbose_name='неттто') def __str__(self): return self.product.name def save(self, *args, **kwargs): if not self.position: position = self.map_doc.items.aggregate( Max('position'))['position__max'] or 0 self.position = position + 1 super(MapItems, self).save(*args, **kwargs) class MapItemsInline(admin.TabularInline): model = MapItems fields = ( 'position', 'product', 'brutto', 'netto', ) readonly_fields = ('position',) ordering = ['position'] @admin.register(Map) class MapAdmin(admin.ModelAdmin): fieldsets = (('', {'fields': (('name', 'source'), ('approved', 'agreed'), ('batch_output', 'unit', 'technology')) }), ) inlines = [ MapItemsInline, ] list_display = ('name', 'approved', 'agreed',) search_fields = ('name', 'source',) list_display_links = ('name',)
""" mod_cmds.py Created: March 13, 2019 by Mimi Sun Purpose: cog with mod commands """ import discord from discord.ext import commands import typing class Mod_cmds(commands.Cog): def __init__(self, bot): self.client = bot #mass ban members @commands.command(aliases=[]) @commands.has_permissions(ban_members = True) @commands.bot_has_permissions(ban_members = True) async def ban(self, ctx, members: commands.Greedy[discord.Member], delete_days: typing.Optional[int] = 0, *, reason: str): """ Mass ban members with an optional argument to delete messages from the past [0-7] days this command can be invoked by: $ban @member @member2 spam bot $ban @member 7 spam bot $ban @member spam """ member_list = '' if member is not None: for member in members: await member.ban(delete_message_days = delete_days, reason=reason) member_list += member.name + ' ' await ctx.send(f"{member_list} banned.") @commands.command(hidden=True) @commands.has_permissions(ban_members = True) @commands.bot_has_permissions(ban_members = True) async def unban(self, ctx, user: int=None, *reason): '''Unban a member with a reason MUST be followed by user ID ''' user = discord.User(id=user) if user is not None: if reason: reason = ' '.join(reason) else: reason = None await ctx.guild.unban(user, reason=reason) await ctx.send(f"{str(user)} has been unbanned!") else: await ctx.send('**:no_entry:** | No users were unbanned.!') @commands.command() @commands.has_permissions(kick_members = True) @commands.bot_has_permissions(ban_members = True) async def bans(self, ctx): '''Shows a list of currently banned users''' users = await ctx.guild.bans() if len(users) > 0: msg = f'`{"ID":21}{"Name":25} reason \n' for entry in users: userId = entry.user.id userName = str(entry.user) if entry.user.bot: username = ':robot:' + userName reason = str(entry.reason) msg += f'{userId:<21}{userName:25} {reason}\n' embed = discord.Embed(color=0xe74c3c) #Red embed.set_thumbnail(url=ctx.guild.icon_url) embed.set_footer(text=f'Server: {ctx.guild.name}') embed.add_field(name='Banned users', value=msg + '`', inline=True) await ctx.send(embed=embed) else: await ctx.send('No banned users were found.') @commands.command() @commands.has_permissions(manage_messages = True) @commands.bot_has_permissions(manage_messages = True) async def purgebot(self, ctx): """ Delete bot messages """ await ctx.trigger_typing() def is_bot(m): return m.author == self.client.user deleted = await ctx.channel.purge(limit=100, check= is_bot) await ctx.send(f"Deleted {len(deleted)} message(s).") @commands.command() @commands.has_permissions(manage_messages = True) @commands.bot_has_permissions(manage_messages = True) async def purge(self, ctx, a: int): """ Mass delete previous [a] messages """ await ctx.trigger_typing() deleted = await ctx.channel.purge(limit=a) await ctx.send(f"Deleted {len(deleted)} message(s).") @purge.error @purgebot.error @bans.error @ban.error @unban.error async def commands_error(error, ctx): if isinstance(error, discord.ext.commands.error.MissingPermissions): errors = '' for e in missing_perms: errors += e + '\n' await ctx.send(e) def setup(bot): bot.add_cog(Mod_cmds(bot))
import os from app import create_app, db from app.models import User, Card, Notification, Task, Tag, Tagging app = create_app() @app.shell_context_processor def make_shell_context(): request_ctx = app.test_request_context() request_ctx.push() return { "db": db, "User": User, "Card": Card, "Notification": Notification, "Tag": Tag, "Task": Task, "Tagging": Tagging, } @app.template_filter() def get_env(key): return os.environ.get(key) app.jinja_env.filters["get_env"] = get_env
from PyTsetlinMachineCUDA.tm import RegressionTsetlinMachine from PyTsetlinMachineCUDA.tools import Booleanizer #from pyTsetlinMachineParallel.tm import RegressionTsetlinMachine #from pyTsetlinMachineParallel.tools import Binarizer import numpy as np from time import time from sklearn.model_selection import train_test_split import pandas as pd import numpy as np f1=open("../dataPool/win/data.csv","r") data=pd.read_csv(f1)[['people_vaccinated_per_hundred', 'people_fully_vaccinated_per_hundred', 'people_vaccinated', 'confirmed']].to_numpy() f2=open("../dataPool/win/death.csv","r") target=pd.read_csv(f2).to_numpy().flatten() X = data Y =target b = Booleanizer(max_bits_per_feature = 10) #b= Binarizer(max_bits_per_feature=32) b.fit(X) X_transformed = b.transform(X) ############################Description of argument############################### # Clauses: # Num of clauses,decides the expression power of the RTM # T: # A larger T requires more literal products to reach a particular value y. # Thus, increasing T makes the regression function increasingly fine-grained # A higher T reduces the overall probability of feedback, resulting in more conservative learning # Produces an ensemble effect by stimulating up to T clauses to output 2 for each input, but # not more than T # s: # Type Ib feedback is provided to TAs stochastically using this user set parameter # Is used by the TM to control the granularity of the clauses, playing a similar role as so-called # support in frequent itemset mining # Controls how fine-grained patterns the TM seeks ############################Description ends here################################## tm = RegressionTsetlinMachine(8000, 4000,12.5,max_weight=4) #tm = RegressionTsetlinMachine(80000, 20000,32,max_weight=14,number_of_state_bits=6) print("\nRMSD per runs:\n") tm_results = np.empty(0) X_train, X_test, Y_train, Y_test = train_test_split(X_transformed, Y) tm.fit(X_train, Y_train, epochs=100) tm_results = np.append(tm_results, np.sqrt(((tm.predict(X_test) - Y_test)**2).mean())) out=pd.DataFrame(tm.predict(X)).to_csv("output.csv") print("RMSD: %.2f " % (tm_results.mean()))
#Write a void function to draw a star, where the length of each side is 100 units. (Hint: You should turn the turtle #by 144 degrees at each point.) import turtle paper = turtle.Screen() leonardo = turtle.Turtle() def draw_star(n): """ Draw star :param n: length of side :return: """ for i in range (5): leonardo.right(144) leonardo.forward(n) draw_star(100) paper.exitonclick()
""" Stack Class Inherits from SLLIST (Single Linked List) Supports operations: Push, Pop, Top, Length, Is_Empty """ from data_structure_and_algorithms import SingleLinkedList class Stack: def __init__(self): """ """ self._linked_list = SingleLinkedList() self._length = 0 def is_empty(self): if self.length == 0: return True else: return False def push(self, value): """ Adds a new value to the stack. """ self._linked_list.push_to_front(value) def pop(self): """ Removes the first value from the stack and returns it. """ return self._linked_list.pop_front() def top(self): """ Get the value of the last element pushed to the stack but don't remove it's node. """ if self._linked_list.begin: return self._linked_list.begin.value else: return None @property def length(self): return self._linked_list.count()
from __future__ import unicode_literals # install django-multiselectfield from multiselectfield import MultiSelectField from django.db import models # Status du drone. En recopiant les l'attribut system_status de l'objet vehicule cree STATUS_drone = ((1, 'UNINIT'), (2, 'BOOT'), (3, 'CALIBRATING'), (4, 'STANDBY'), (5, 'ACTIVE'), (6, 'CRITICAL'), (7, 'EMERGENCY'), (8, 'POWEROFF')) # Etat de la livraison STATUS_delivery = ((1, 'NOT STARTED'), (2, 'STARTED'), (3, 'ABORTED'), (4, 'FINISHED')) STATUS_packet = ((1, 'Waiting'), (2, 'Delivering'), (3, 'Delivered')) class Customer(models.Model): name = models.CharField(max_length=255, blank=True, null=True) def __str__(self): return "\n Nom : {0}\n".format(self.name) class Meta: managed = True db_table = 'customer' class Drone(models.Model): status = MultiSelectField(choices=STATUS_drone, max_choices=1, null=True) height = models.IntegerField(blank=True, null=True) consumption = models.FloatField(blank=True, null=True) name = models.CharField(max_length=45, blank=True, null=True) radius = models.IntegerField(blank=True, null=True) weight = models.IntegerField(blank=True, null=True) customer_id = models.ForeignKey(Customer, on_delete=models.CASCADE, null= True) def __str__(self): return "\n Nom : {0}, Statut : {1}, Client : {2}".format(self.name, self.status, self.customer_id) class Meta: managed = True db_table = 'drone' class Droneposition(models.Model): position = models.CharField(max_length=255, blank=True, null=True) drone_id = models.ForeignKey(Drone, on_delete=models.CASCADE, null= True) creation_date = models.DateTimeField(auto_now_add=True) timestamp_value = models.DateTimeField(auto_now=True) def __str__(self): return "\n Drone : {0}, Position : {1}, Altitude : {2}, Timestamp : {3}\n".format(self.drone_id, self.position,self.altitude, self.timestamp_value) class Meta: managed = True db_table = 'droneposition' class Stock(models.Model): name = models.CharField(max_length=255, blank=True, null=True) position = models.CharField(max_length=255, blank=True, null=True) def __str__(self): return "\n Name : {0} , Position : {1}".format(self.name,self.position) class Meta: managed = True db_table = 'stock' class Packet(models.Model): name = models.CharField(max_length=255, blank=True, null=True) status = MultiSelectField(choices=STATUS_packet,max_choices=1, null=True) weight = models.FloatField(db_column='packetWeight', blank=True, null=True) stock_id = models.ForeignKey(Stock, on_delete=models.CASCADE, null= True) def __str__(self): return "\n Code : {0}, Statut : {1}, Stock : {2}, Weight : {3}]".format(self.name, self.status, self.stock_id,self.weight) class Meta: managed = True db_table = 'packet' class Delivery(models.Model): name = models.CharField(max_length=255, blank=True, null=True) position = models.CharField(max_length=255, blank=True, null=True) drone_id = models.ForeignKey(Drone, on_delete=models.CASCADE, null= True) stock_id = models.ForeignKey(Stock, on_delete=models.CASCADE, null= True) packet = models.ForeignKey(Packet, on_delete=models.CASCADE, null=True) status = MultiSelectField(choices=STATUS_delivery, max_choices=1, null=True) def __str__(self): return "\nTitre : {0}, Drone : {1}, Stock : {2}, Produit : {3}, Destination position : {4}".format(self.name, self.drone_id, self.stock_id, self.packet,self.position) class Meta: managed = True db_table = 'delivery' class Station(models.Model): name = models.CharField(max_length=255, blank=True, null=True) long = models.FloatField(blank=True, null=True) lat = models.FloatField(blank=True, null=True) num_charged_battery = models.IntegerField(blank=True, null=True) anticipated_charged_battery = models.IntegerField(blank=True, null=True) def __str__(self): return "\nNom : {0}, Longitude : {1}, Latitude : {2}, Nombre de batteries chargees : {3}".format(self.name, self.long, self.lat, self.num_charged_battery) class Meta: managed = True db_table = 'Station' class Charginglog(models.Model): station_id = models.ForeignKey(Station, on_delete=models.CASCADE, null= True) drone_id = models.ForeignKey(Drone, on_delete=models.CASCADE, null= True) creation_date = models.DateTimeField(auto_now_add=True) timestamp_value = models.DateTimeField(auto_now=True) def __str__(self): return "\n Drone : {0}, Station : {1}, Date de creation : {2}, Timestamp : {3}".format(self.drone_id, self.station_id, self.creation_date, self.timestamp_value) class Meta: managed = True db_table = 'charginglog'
from enum import Enum class Vulnerability: def __init__(self, kind=None, description=None, transactions=None): self.type = kind self.description = description self.transactions = transactions self.tested = False self.confirmed = False def __str__(self): return """ Type = {type} Description = {description} Transactions = {transactions} """.format( type=self.type, description=self.description, transactions=self.transactions ) class VulnerabilityType(Enum): KILL_ONLY = 0 KILL_AND_WITHDRAW = 1 ETHER_THEFT = 2
lb = int(input("enter lower boud: ")) ub = int(input("enter upper bound: ")) for n in range(lb,ub+1): if n % 9 == 0 and n% 5!= 0: print(n)
class mysolution: def copybook(self, data,k): length = len(data) for i in range(0, k): return way = mysolution() data=(2,5,4,3) k = 2 res = way.copybook(data,k) #result(2,5) (4,3)
print("LETTER T HAS BEEN SUCCESSFULLY EXECUTED")
from django.urls import path from .views import stock_count_by_date, stock_count_by_person, stock_count_in_channel, channel_count_by_stock, person_count_by_stock urlpatterns = [ path("count/date", stock_count_by_date, name="CountStockFromDate"), path("count/sender", stock_count_by_person, name="CountStockByPerson"), path("count/channel", stock_count_in_channel, name="CountStockByChannel"), path("count/channelByStock", channel_count_by_stock, name="CountChannelByStock"), path("count/personByStock", person_count_by_stock,name="PersonByStockCount") ]
class triangulo: def __init__(self): self.LadoA = None self.LadoB = None self.LadoC = None def perim(self): perim = self.LadoA + self.LadoB + self.LadoC return perim def getMaiorLado(self): return self.__MaiorLado def getArea(self): return self.perim()
import os import sys # import imgaug # https://github.com/aleju/imgaug (pip3 install imgaug) import time # Import Mask RCNN ROOT_DIR = os.path.abspath("../../") sys.path.append(ROOT_DIR) # To find local version of the library # Root directory of the project from samples.coco.coco import CocoConfig, CocoDataset from mrcnn import model as modellib import argparse # Path to trained weights file MODEL_PATH = os.path.join(ROOT_DIR, 'weights') # Directory to save logs and model checkpoints, if not provided # through the command line argument --logs DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs") DEFAULT_DATASET_YEAR = "2017" if __name__ == '__main__': # Parse command line arguments parser = argparse.ArgumentParser(description='Train Mask R-CNN on MS COCO.') parser.add_argument('--local', help='input directory of videos', action='store', default=False) parser.add_argument('--dataset', required=False, default='/specific/netapp5_2/gamir/DER-Roei/datasets/MSCoco', metavar="/path/to/coco/", help='Directory of the MS-COCO dataset') parser.add_argument('--year', required=False, default=DEFAULT_DATASET_YEAR, metavar="<year>", help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)') parser.add_argument('--model', required=False, default="coco", metavar="/path/to/weights.h5", help="Path to weights .h5 file or 'coco'") parser.add_argument('--logs', required=False, default=DEFAULT_LOGS_DIR, metavar="/path/to/logs/", help='Logs and checkpoints directory (default=logs/)') parser.add_argument('--limit', required=False, default=500, metavar="<image count>", help='Images to use for evaluation (default=500)') parser.add_argument('--download', required=False, default=False, metavar="<True|False>", help='Automatically download and unzip MS-COCO files (default=False)', type=bool) parser.add_argument('--gpu', required=False, default=0, metavar="0, 1, ...", help='GPU number ro run', type=int) parser.add_argument('--workers', required=False, default=5, metavar="0, 1, ...", help='Number of workers', type=int) parser.add_argument('--queue_size', required=False, default=200, metavar="0, 1, ...", help='Number of workers', type=int) args = parser.parse_args() print("Model: ", args.model) print("Dataset: ", args.dataset) print("Year: ", args.year) print("Logs: ", args.logs) print("Auto Download: ", args.download) print("GPU: ", args.gpu) print("Number of Workers: ", args.workers) print("Number of Queue Size: ", args.queue_size) # # Define GPU training # os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) # Use Local params if args.local: args.dataset = "/Users/roeiherzig/Datasets/MSCoco" args.model = "coco" args.workers = 0 args.queue_size = 10 # Configurations training config = CocoConfig() config.display() # Create model model = modellib.MaskRCNN(mode="training", config=config, model_dir=args.logs) # Select weights file to load if args.model.lower() == "coco": model_path = model.get_imagenet_weights() elif args.model.lower() == "last": # Find last trained weights model_path = model.find_last() elif args.model.lower() == "imagenet": # Start from ImageNet trained weights model_path = model.get_imagenet_weights() else: model_path = args.model # Load weights print("Loading weights ", model_path) model.load_weights(model_path, by_name=True) # # Save in a new locations # stmp = time.strftime("%c").replace(" ", "_") # model_path = os.path.join(MODEL_PATH, stmp) # create_folder(model_path) # model_path = os.path.join(model_path, stmp, "mask_rcnn.h5") # Training dataset. Use the training set and 35K from the validation set, as as in the Mask RCNN paper. dataset_train = CocoDataset() dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download) # dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download) dataset_train.prepare() # Validation dataset dataset_val = CocoDataset() dataset_val.load_coco(args.dataset, "val", year=args.year, auto_download=args.download) dataset_val.prepare() # Image Augmentation # Right/Left flip 50% of the time # augmentation = imgaug.augmenters.Fliplr(0.5) augmentation = None # Training - Fine tune all layers print("Fine tune all layers") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE / 10, epochs=160, layers='all', augmentation=augmentation, workers_nb=args.workers, queue_size=args.queue_size)
from flask_wtf import FlaskForm from wtforms import TextAreaField, SubmitField from wtforms.validators import DataRequired, Length class MessageForm(FlaskForm): message = TextAreaField('Message', validators=[DataRequired(), Length(min=0, max=140)]) submit = SubmitField('Send')
# coding: utf-8 from flask_wtf import FlaskForm from flask import session from wtforms import StringField, PasswordField, SubmitField, SelectField, SelectMultipleField, TextAreaField from wtforms.validators import DataRequired, ValidationError from app.modles import User record_type = [(0, 'A'), (1, 'NS'), (2, 'CNAME'), (3, 'MX')] host_type = [(0, 'hk.zhxfei.com'), (1, 'sh.zhxfei.com'), (2, 'hkweb.zhxfei.com'), (3, 'qd.zhxfei.com')] class UserLoginForm(FlaskForm): account = StringField(label='账号', validators=[DataRequired('请输入账号!'), ], description='账号', render_kw={ 'class': "form-control", 'placeholder': "请输入账号!", 'required': 'required' } ) password = PasswordField(label='密码', validators=[DataRequired('请输入密码'), ], description='密码', render_kw={ 'class': "form-control", 'placeholder': "请输入密码!", 'required': 'required' } ) submit = SubmitField(label='登录', render_kw={ 'class': 'btn btn-primary btn-block btn-flat', 'id': "btn-sub" } ) def validate_account(self, field): account = field.data users = User.query.filter_by(name=account).count() if users == 0: raise ValidationError('账号不存在!') class PwdForm(FlaskForm): old_pwd = PasswordField(label='旧密码', validators=[DataRequired('请输入旧密码'), ], description='旧密码', render_kw={ 'style': 'width: 300px', 'class': "form-control", 'id': 'input_pwd', 'placeholder': "请输入旧密码!", 'required': 'required' } ) new_pwd = PasswordField(label='新密码', validators=[DataRequired('请输入新密码'), ], description='新密码', render_kw={ 'style': 'width: 300px', 'class': "form-control", 'id': 'input_newpwd', 'placeholder': "请输入新密码!", 'required': 'required' } ) submit = SubmitField( label='修改', render_kw={ 'class': 'btn btn-primary', 'id': "btn-sub" } ) def validate_old_pwd(self, field): input_old_pwd = field.data name = session['admin'] admin = User.query.filter_by(name=name).first() if not admin.check_pwd(input_old_pwd): raise ValidationError('旧密码输入错误!') class RecordAddForm(FlaskForm): name = StringField(label='记录名称', validators=[DataRequired('记录名!'), ], description='record name', render_kw={ 'style': "width: 80px; margin: auto; text-align:center", 'required': 'required' } ) value = StringField(label='value', validators=[DataRequired('value!'), ], description='record value', render_kw={ 'style': "width: 80px; margin: auto; text-align:center", 'required': 'required' } ) type = SelectField( label='record type', description='record type', coerce=int, choices=record_type, render_kw={ 'style': 'width: 80px; margin: auto;', 'class': 'form-control' } ) submit = SubmitField( label='添加', render_kw={ 'class': 'btn btn-primary btn-sm', 'id': "btn-sub" } ) class CommandCommitForm(FlaskForm): host = SelectMultipleField( label='主机', description='command will be execute in hosts', coerce=int, choices=host_type, render_kw={ 'class': 'form-control', 'id': 'select_host', 'data - placeholder': "Select Host", 'style': 'width: 30%' } ) content = StringField(label='command content', validators=[DataRequired('请输入执行命令!'), ], description='record name', render_kw={ 'class': "form-control", 'style': 'width: 50%' } ) submit = SubmitField( label='Send command', render_kw={ 'class': ' btn btn-default', 'id': "btn-sub", 'type': "submit" } ) result = TextAreaField( label='执行结果', render_kw={ 'class': "form-control", 'placeholder': "command execute result", 'readonly': "True", } )
from ._layout import LayoutValidator from ._data import DataValidator
import os def get_query(query_file: str): path = os.path.dirname(os.path.abspath(__file__)) graphql_file = os.path.join(path, query_file) with open(graphql_file, 'r') as query_file: query = query_file.read() return query comments_graphql_query = get_query('comments.graphql') pull_request_graphql_query = get_query('pull_request.graphql') pull_requests_graphql_query = get_query('pull_requests.graphql') reviews_graphql_query = get_query('reviews.graphql') user_graphql_query = get_query('user.graphql')
# This is a helper module that contains conveniences to access the MS COCO # dataset. You can modify at will. In fact, you will almost certainly have # to, or implement otherwise. # Limit GPU usage from os import environ print("Limiting gpu usage") environ['CUDA_VISIBLE_DEVICES'] = '2' import sys # This is evil, forgive me, but practical under the circumstances. # It's a hardcoded access to the COCO API. COCOAPI_PATH = '/scratch/lt2316-h18-resources/cocoapi/PythonAPI/' TRAIN_ANNOTATION_FILE = '/scratch/lt2316-h18-resources/coco/annotations/instances_train2017.json' VALIDATION_ANNOTATION_FILE = '/scratch/lt2316-h18-resources/coco/annotations/instances_val2017.json' TRAIN_CAPTION_FILE = '/scratch/lt2316-h18-resources/coco/annotations/captions_train2017.json' VAL_CAPTION_FILE = '/scratch/lt2316-h18-resources/coco/annotations/captions_val2017.json' TRAIN_IMAGE_DIR = '/scratch/lt2316-h18-resources/coco/train2017/' VALIDATION_IMAGE_DIR = '/scratch/lt2316-h18-resources/coco/val2017/' annotation_file = TRAIN_ANNOTATION_FILE caption_file = TRAIN_CAPTION_FILE image_directory = TRAIN_IMAGE_DIR sys.path.append(COCOAPI_PATH) from pycocotools.coco import COCO annotation_coco = None caption_coco = None category_dictionary = {} # OK back to normal. import random import skimage.io as io import skimage.transform as tform import numpy as np def setmode(mode): """ Set entire module's mode as 'train' or 'test' for the purpose of data extraction. """ global annotation_file global caption_file global image_directory global annotation_coco, caption_coco global category_dictionary if mode == "train": annotation_file = TRAIN_ANNOTATION_FILE caption_file = TRAIN_CAPTION_FILE image_directory = TRAIN_IMAGE_DIR elif mode == "test": annotation_file = VALIDATION_ANNOTATION_FILE caption_file = VAL_CAPTION_FILE image_directory = VALIDATION_IMAGE_DIR else: raise ValueError annotation_coco = COCO(annotation_file) caption_coco = COCO(caption_file) # To facilitate category lookup. cats = annotation_coco.getCatIds() category_dictionary = {x: (annotation_coco.loadCats(ids=[x])[0]['name']) for x in cats} def query(queries, exclusive=True): """ Collects mutually-exclusive lists of COCO ids by queries, so returns a parallel list of lists. (Setting 'exclusive' to False makes the lists non-exclusive.) e.g., exclusive_query([['toilet', 'boat'], ['umbrella', 'bench']]) to find two mutually exclusive lists of images, one with toilets and boats, and the other with umbrellas and benches in the same image. """ if not annotation_coco: raise ValueError image_sets = [set(annotation_coco.getImgIds(catIds=annotation_coco.getCatIds(catNms=x))) for x in queries] if len(queries) > 1: if exclusive: common = set.intersection(*image_sets) return [[x for x in y if x not in common] for y in image_sets] else: return [list(y) for y in image_sets] else: return [list(image_sets[0])] def get_captions_for_ids(id_list): annotation_ids = caption_coco.getAnnIds(imgIds=id_list) annotations = caption_coco.loadAnns(annotation_ids) return [ann['caption'] for ann in annotations] def get_cats_for_img(image_id): """ Takes an image id and gets a category list for it. """ if not annotation_coco: raise ValueError image_nn_ids = annotation_coco.getAnnIds(imgIds=image_id) image_nns = annotation_coco.loadAnns(image_nn_ids) return list(set([category_dictionary[x['category_id']] for x in image_nns])) def iter_captions(id_lists, categories, batch=1): """ Obtains the corresponding captions from multiple COCO id lists. Randomizes the order. Returns an infinite iterator (do not convert to list!) that returns tuples (captions, categories) as parallel lists at size of batch. """ if not caption_coco: raise ValueError if batch < 1: raise ValueError full = [] for z in zip(id_lists, categories): for x in z[0]: full.append((x, z[1])) while True: random_list = random.sample(full, k=len(full)) captions = [] labels = [] for p in random_list: annotation_ids = caption_coco.getAnnIds(imgIds=[p[0]]) annotations = caption_coco.loadAnns(annotation_ids) for ann in annotations: captions.append(ann['caption']) # For LSTM you may want to do more with the captions # or otherwise distribute the data. labels.append(p[1]) if len(captions) % batch == 0: yield (captions, labels) captions = [] labels = [] def iter_captions_cats(id_lists, cats, batch=1): """ Obtains the corresponding captions from multiple COCO id lists alongside all associated image captions per image. Randomizes the order. Returns an infinite iterator (do not convert to list!) that returns tuples (captions, categories) as parallel lists at size of batch. """ if not caption_coco: raise ValueError if batch < 1: raise ValueError full = [] for z in zip(id_lists, cats): for x in z[0]: full.append((x, z[1])) while True: random_list = random.sample(full, k=len(full)) captions = [] labels = [] for p in random_list: annotation_ids = caption_coco.getAnnIds(imgIds=[p[0]]) annotations = caption_coco.loadAnns(annotation_ids) for ann in annotations: image_id = ann['image_id'] cats = get_cats_for_img(image_id) captions.append((ann['caption'], cats)) # For LSTM you may want to do more with the captions # or otherwise distribute the data. labels.append(p[1]) if len(captions) % batch == 0: yield (captions, labels) captions = [] labels = [] def iter_all_images(): return iter_images([annotation_coco.getImgIds()], ['any']) def iter_images(id_lists, cats, size=(200, 200), batch=1): """ Obtains the corresponding image data as numpy array from multiple COCO id lists. Returns an infinite iterator (do not convert to list!) that returns tuples (imagess, categories) as parallel lists at size of batch. By default, randomizes the order and resizes the image. """ if not annotation_coco: raise ValueError if batch < 1: raise ValueError if not size: raise ValueError # size is mandatory full = [] for z in zip(id_lists, cats): for x in z[0]: full.append((x, z[1])) while True: random_list = random.sample(full, k=len(full)) images = [] labels = [] for r in random_list: image_file = annotation_coco.loadImgs([r[0]])[0]['file_name'] image = io.imread(image_directory + image_file) image_scaled = tform.resize(image, size) # Colour images only. if image_scaled.shape == (size[0], size[1], 3): images.append(image_scaled) labels.append(r[1]) if len(images) % batch == 0: yield (np.array(images), np.array(labels)) images = [] labels = []
# Enter your code here. Read input from STDIN. Print output to STDOUT from cmath import phase z=complex(input()) print(abs(z)) print(phase(z))
# Authentication with the old founder dating backend, to be used for transitioning. # Validate the password with the old PHP method. If it passes, convert the user to # a "new" account by changing the password to the django method. # # This always returns None, so the django method will be called next. from django.contrib.auth.models import User import subprocess import settings class LegacyBackend: supports_object_permissions = False supports_anonymous_user = False def authenticate(self, username=None, password=None): users = User.objects.filter(username=username) if len(users) < 1: # No matching user return None u = users[0] if '$' in u.password: # It's already a django style password, not the old fd style. return None # Exec out to the php auth to validate old_valid = subprocess.call(['php', settings.ROOT_PATH + '/oldfd/Auth.php', password, u.password]) if old_valid == 0: # if found, reset to django style password u.set_password(password) u.save() return None
#!/usr/bin/env python # coding: utf-8 # In[ ]: import pandas as pd import numpy as np import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') df = pd.DataFrame({ 'x': [1, 5, 7, 6.5, 9.5, 13.75, 17.15, 14, 12, 16], 'y':[3, 2, 4, 1.5, 6.25, 8.5, 11.25, 10.6, 8, 19.5]}) np.random.seed(200) k = 3 # pusat[i] = [x, y] pusat = {i+1: [np.random.randint(0, 25), np.random.randint(0, 25)]for i in range(k)} fig = plt.figure plt.scatter(df['x'], df['y'], color='k') colmap = {1: 'r', 2: 'g', 3: 'b'} for i in pusat.keys(): plt.scatter(*pusat[i], color=colmap[i]) plt.xlim(0, 20) plt.ylim(0, 20) plt.show() # In[ ]: # In[10]: # Inisialisasi import pandas as pd import numpy as np import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') df = pd.DataFrame({ 'x': [1, 5, 7, 6.5, 9.5, 13.75, 17.15, 14, 12, 16], 'y':[3, 2, 4, 1.5, 6.25, 8.5, 11.25, 10.6, 8, 19.5]}) np.random.seed(200) k = 3 # pusat[i] = [x, y] pusat = {i+1: [np.random.randint(0, 25), np.random.randint(0, 25)]for i in range(k)} fig = plt.figure plt.scatter(df['x'], df['y'], color='k',) colmap = {1: 'r', 2: 'r', 3: 'r'} for i in pusat.keys(): plt.scatter(*pusat[i], color=colmap[i], marker='*',s=50) plt.xlim(0, 20) plt.ylim(0, 20) plt.show() # In[12]: ## Penugasan def penugasan(df, pusat): for i in pusat.keys(): # sqrt((x1 - x2)^2 - (y1 - y2)^2) df['jarakdari_{}'.format(i)] = ( np.sqrt((df['x'] - pusat[i][0]) ** 2+ (df['y'] - pusat[i][1]) ** 2)) pusat_jarak_cols = ['jarakdari_{}'.format(i) for i in pusat.keys()] df['terdekat'] = df.loc[:, pusat_jarak_cols].idxmin(axis=1) df['terdekat'] = df['terdekat'].map(lambda x: int(x.lstrip('jarakdari_'))) df['warna'] = df['terdekat'].map(lambda x: colmap[x]) return df df = penugasan(df, pusat) print(df.head()) fig = plt.figure plt.scatter(df['x'], df['y'], color=df['warna'], alpha=0.5, edgecolor='k') for i in pusat.keys(): plt.scatter(*pusat[i], color=colmap[i],marker='*') plt.xlim(0, 20) plt.ylim(0, 20) plt.show() # In[15]: import copy pusat_lama = copy.deepcopy(pusat) def update(k): for i in pusat.keys(): pusat[i][0] = np.mean(df[df['terdekat'] == i]['x']) pusat[i][1] = np.mean(df[df['terdekat'] == i]['y']) return k pusat = update(pusat) fig = plt.figure ax = plt.axes() plt.scatter(df['x'], df['y'], color=df['warna'], alpha=0.5, edgecolor='k') for i in pusat.keys(): plt.scatter(*pusat[i], color=colmap[i],marker='*',s=50) plt.xlim(0, 20) plt.ylim(0, 20) for i in pusat_lama.keys(): x_lama = pusat_lama[i][0] y_lama = pusat_lama[i][1] dx = (pusat[i][0] - pusat_lama[i][0]) * 0.75 dy = (pusat[i][1] - pusat_lama[i][1]) * 0.75 plt.show() # In[17]: ## Pengulangan penugasan df = penugasan(df, pusat) # Plot results fig = plt.figure plt.scatter(df['x'], df['y'], color=df['warna'], alpha=0.5, edgecolor='k') for i in pusat.keys(): plt.scatter(*pusat[i], color=colmap[i],marker='*',s=50) plt.xlim(0, 20) plt.ylim(0, 20) plt.show() # In[18]: # Dilanjutkan hingga pusat tidak berubah while True: pusat_terdekat = df['terdekat'].copy(deep=True) pusat = update(pusat) df = penugasan(df, pusat) if pusat_terdekat.equals(df['terdekat']): break fig = plt.figure plt.scatter(df['x'], df['y'], color=df['warna'], alpha=0.5, edgecolor='k') for i in pusat.keys(): plt.scatter(*pusat[i], color=colmap[i],marker='*',s=50) plt.xlim(0, 20) plt.ylim(0, 20) plt.show() # In[ ]:
def bmi(): # What is your height and weight? # My height is 5 feet 11 inches # I weigh about 180 pounds. #Weight in lbs/ height in inches squared (703)
import tensorflow as tf import numpy as np import matplotlib matplotlib.use('Agg') from multiprocessing import Pool from queue import Queue from sklearn.model_selection import ParameterGrid from sklearn import datasets from sklearn.model_selection import train_test_split from pandas import read_csv from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import pandas as pd from tensorflow.contrib import rnn # from model.utils.preprocessing_data import Timeseries # preprocessing_data_forBNN from model.encoder_decoder import Model as encoder_decoder from model.BNN import Model as BNN import traceback queue = Queue() def train_model(item): # try: sliding_encoder = item["sliding_encoder"] sliding_decoder = item["sliding_decoder"] sliding_inference = item["sliding_inference"] batch_size = item["batch_size"] num_units_LSTM = item["num_unit_LSTM"] num_layer = item["num_layer"] activation_decoder = item["activation_decoder"] activation_inference = item["activation_inference"] input_dim = item["input_dim"] num_units_inference = item["num_units_inference"] model = BNN(dataset_original, train_size, valid_size, sliding_encoder = sliding_encoder, sliding_decoder = sliding_decoder, sliding_inference = sliding_inference, batch_size = batch_size, num_units_LSTM = num_units_LSTM, num_layers = num_layer, activation_decoder = activation_decoder, activation_inference = activation_inference, learning_rate = learning_rate, epochs_encoder_decoder = epochs_encoder_decoder, epochs_inference = epochs_inference, input_dim = input_dim, num_units_inference = num_units_inference, patience = patience ) model.fit() # except: # traceback.print_stack() # producer # define constant link = './data/google_trace_timeseries/data_resource_usage_5Minutes_6176858948.csv' colnames = ['cpu_rate','mem_usage','disk_io_time','disk_space'] df = read_csv(link, header=None, index_col=False, names=colnames, usecols=[3,4,9,10], engine='python') scaler = MinMaxScaler(feature_range=(0, 1)) cpu = df['cpu_rate'].values.reshape(-1,1) mem = df['mem_usage'].values.reshape(-1,1) disk_io_time = df['disk_io_time'].values disk_space = df['disk_space'].values dataset_original = cpu train_size = int(0.6 * len(cpu)) # print (train_size) valid_size = int(0.2 * len(cpu)) sliding_encoders = [4] sliding_decoders = [2] sliding_inferences = [10] batch_size_arr = [16] num_units_LSTM_arr = [8] num_layers = [1] # activation for inference and decoder layer : - 1 is sigmoid # - 2 is relu # - 3 is tanh # - 4 is elu activation_decoder = [1] activation_inferences = [1] learning_rate = 0.01 epochs_encoder_decoder = 20 epochs_inference = 20 patience = 40 #number of epoch checking for early stopping # num_units_LSTM_arr - array number units lstm for encoder and decoder input_dim = [1] num_units_inference_arr = [16] # n_output_encoder_decoder = 1 param_grid = { 'sliding_encoder': sliding_encoders, 'sliding_decoder': sliding_decoders, 'sliding_inference': sliding_inferences, 'batch_size': batch_size_arr, 'num_unit_LSTM': num_units_LSTM_arr, 'num_layer': num_layers, 'activation_decoder': activation_decoder, 'activation_inference': activation_inferences, 'input_dim': input_dim, 'num_units_inference': num_units_inference_arr } # Create combination of params. print ("check") a = ParameterGrid(param_grid) print(type(a)) # print (a.__len__()) for item in list(ParameterGrid(param_grid)) : queue.put_nowait(item) # Consumer if __name__ == '__main__': pool = Pool(16) pool.map(train_model, list(queue.queue)) pool.close() pool.join() pool.terminate() # for metric in metrics: # for sliding_encoder in sliding_encoders: # for sliding_decoder in slding_decoders: # for sliding_inference in sliding_inferences: # for activation_inference in activation_inferences: # for num_units_LSTM in num_units_LSTM_arr: # for batch_size in batch_size_arr: # for num_units_inference in num_units_inference_arr: # if(metric==1): # print('mem1') # model = BNN(mem, train_size, valid_size, # sliding_encoder, sliding_decoder, sliding_inference, batch_size, # num_units_LSTM, num_layers, activation_decoder, activation_inference, # # n_input = None, n_output = None, # learning_rate, epochs_encoder_decoder,epochs_inference, input_dim, num_units_inference ) # model.fit() # else: # model = BNN(cpu, train_size, valid_size, # sliding_encoder, sliding_decoder, sliding_inference, batch_size, # num_units, num_layers, activation_decoder, activation_inference, # # n_input = None, n_output = None, # learning_rate, epochs_encoder_decoder,epochs_inference, input_dim, num_units_inference ) # model.fit() # sliding_encoder = 12 # sliding_decoder = 4 # sliding_inference = 6 # activation_decoder = 1 # activation_inference = 1 # num_units = 2 # num_layers = 1 # learning_rate = 0.01 # epochs_encoder_decoder = 200 # epochs_inference = 200 # input_dim = 1 # n_output_encoder_decoder = 1 # batch_size = 4 # num_units_inference = 20 # patience = 20 # model = BNN(cpu, train_size, valid_size, # sliding_encoder, sliding_decoder, sliding_inference, batch_size, # num_units, num_layers, activation_decoder, activation_inference, # # n_input = None, n_output = None, # learning_rate, epochs_encoder_decoder,epochs_inference, input_dim, num_units_inference, patience) # model.fit()
from flask import Flask, request, make_response, render_template from flask_restful import Resource, Api import scraper app = Flask(__name__) api = Api(app) class Home(Resource): def get(self): headers = {'Content-Type': 'text/html'} return make_response(render_template('index.html', test = "TEST"),200, headers) class tours(Resource): def get(self): start = request.args.get('start', type = str, default = None) end = request.args.get('end', type = str, default = None) tour = scraper.get_tours(start, end) return tour class guides(Resource): def get(self): guide = scraper.get_guide_info() return guide api.add_resource(Home, '/') api.add_resource(tours, '/tours') api.add_resource(guides, '/guides') if __name__ == '__main__': app.run()
import re import urllib.request import pprint pattern='title="(.+?)"' data=urllib.request.urlopen('https://book.douban.com/publishers/').read().decode('utf-8') res=re.compile(pattern).findall(str(data)) f=open('出版社信息.txt','w') for i in res: try: f.write(i+'\n') print('%s写入成功 ' % str(i)) except UnicodeEncodeError as ue: print(ue)
from safedelete.managers import SafeDeleteManager class UnemploymentManager(SafeDeleteManager): pass
##encoding=utf-8 """ Import Command -------------- from archives.urlencoder import urlencoder """ import random class UrlEncoder(): base_url = "http://www.archives.com/member/" available_activity_id = [ "32d47e7f-1b40-44af-b6a1-93501b7c2a59", ] def __init__(self): self.birth_record_query_url_template = ( "http://www.archives.com/member/Default.aspx?_act=VitalSearchResult" "&LastName=%s" "&BirthYear=%s" "&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=1" "&activityID=%s" "&pagesize=%s" "&pageNumber=%s" "&pagesizeAP=%s" "&pageNumberAP=%s" ) self.death_record_query_url_template = ( "http://www.archives.com/member/Default.aspx?_act=VitalSearchResult" "&LastName=%s" "&DeathYear=%s" "&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=2" "&activityID=%s" "&pagesize=%s" "&pageNumber=%s" "&pagesizeAP=%s" "&pageNumberAP=%s" ) self.marriage_record_query_url_template = ( "http://www.archives.com/member/Default.aspx?_act=VitalSearchResult" "&LastName=%s" "&MarriageYear=%s" "&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=3" "&activityID=%s" "&pagesize=%s" "&pageNumber=%s" "&pagesizeAP=%s" "&pageNumberAP=%s" ) self.divorce_record_query_url_template = ( "http://www.archives.com/member/Default.aspx?_act=VitalSearchResult" "&LastName=%s" "&DivorceYear=%s" "&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=4" "&activityID=%s" "&pagesize=%s" "&pageNumber=%s" "&pagesizeAP=%s" "&pageNumberAP=%s" ) def get_random_activity_id(self): return random.choice(self.available_activity_id) def url_birth_record(self, lastname, birthyear, pagesize, pagenumber): return self.birth_record_query_url_template % ( lastname, birthyear, self.get_random_activity_id(), pagesize, pagenumber, pagesize, pagenumber, ) def url_death_record(self, lastname, deathyear, pagesize, pagenumber): return self.death_record_query_url_template % ( lastname, deathyear, self.get_random_activity_id(), pagesize, pagenumber, pagesize, pagenumber, ) def url_marriage_record(self, lastname, marriageyear, pagesize, pagenumber): return self.marriage_record_query_url_template % ( lastname, marriageyear, self.get_random_activity_id(), pagesize, pagenumber, pagesize, pagenumber, ) def url_divorce_record(self, lastname, divorceyear, pagesize, pagenumber): return self.divorce_record_query_url_template % ( lastname, divorceyear, self.get_random_activity_id(), pagesize, pagenumber, pagesize, pagenumber, ) urlencoder = UrlEncoder() if __name__ == "__main__": import unittest print(urlencoder.url_birth_record("smith", 2000, 1000, 1)) print(urlencoder.url_death_record("smith", 2000, 1000, 1)) print(urlencoder.url_marriage_record("smith", 2000, 1000, 1)) print(urlencoder.url_divorce_record("smith", 2000, 1000, 1))
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class MovieTimeItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() # 影院名称 cinema_name = scrapy.Field() # 电影名称 movie_name = scrapy.Field() # 时间列表 # begintime begin_time = scrapy.Field() # endtime end_time = scrapy.Field() # lang lang = scrapy.Field() # hall hall = scrapy.Field() # sellprice sellprice = scrapy.Field()
import math import random from game import Game # Check how much of each win condition # I occupy, and pick the one # that looks the best class OccupyBot: def getMove(self, board, whichPlayerAmI): winBoard = self.createWinBoard(board, whichPlayerAmI) enemiesWinBoard = self.createWinBoard(board, 1 if whichPlayerAmI == 0 else 0) compositeWinBoard = [False if x is False else round(x + 0.5 * y,1) for x, y in zip(winBoard, enemiesWinBoard)] maxScore = max(compositeWinBoard) if maxScore > 0 : bestMoveNumerals = [i for i, j in enumerate(compositeWinBoard) if j == maxScore] else: #nothing but stalemates left. Python's max() returns False for [False, 0.0] so pick non-false/valid moves bestMoveNumerals = [i for i, j in enumerate(compositeWinBoard) if j is not False] # print('win') # Game.printBoard(winBoard) # print('loss') # Game.printBoard(enemiesWinBoard) # print('composite') # Game.printBoard(compositeWinBoard) # print(' Score: ', maxScore) # print(' Moves: ', bestMoveNumerals) return random.choice(bestMoveNumerals) def createWinBoard(self, board, whichPlayer): winBoard = [0] * 9 for winCondition in Game.WIN_CONDITIONS: testResult = self.testWinCondition(winCondition, board, whichPlayer) for boardIndex in winCondition: if Game.isMoveValid(board, boardIndex): if testResult is not False: winBoard[boardIndex] += testResult else: winBoard[boardIndex] = False return winBoard #returns an integer with how many moves already in place or false if not available def testWinCondition(self, winCondition, board, whichPlayerAmI): points = 0.0 for boardIndex in winCondition: occupant = board[boardIndex] if occupant is not None and occupant != whichPlayerAmI: return False # player occupies this spot. Can't win elif occupant == whichPlayerAmI: if points >= 1: # I already have one of these spots, this makes 2. I CAN WIN! return 100 points += 1 # I have this spot. Solution looking good else: points += 0.1 # space uncocupied, but i can still win in this solution return points
from .modelfactory import * from .optim import * from .trainer import * from .evaluator import *
from django.contrib.auth.hashers import make_password from django.contrib.auth.password_validation import validate_password from django.contrib.auth.models import User from rest_framework import serializers class UserSerializer(serializers.ModelSerializer): """Serializes a user profile object""" def create(self, validated_data): """Create and return a new user""" user = User.objects.create_user( username=validated_data['username'], email=validated_data['email'], password=validated_data['password'] ) return user # def update(self, instance, validated_data): # # if 'username' in validated_data: # instance.username = validated_data['username'] # if 'email' in validated_data: # instance.email = validated_data['email'] # if 'password' in validated_data: # instance.password = make_password(validated_data['password']) # instance.save() # return instance class Meta: model = User fields = ['id', 'username', 'email', 'password'] # fields = '__all__' extra_kwargs = { 'password': { 'write_only': True, 'style': {'input_type': 'password'} } } class ChangePasswordSerializer(serializers.Serializer): """ Serializer for password change endpoint. """ old_password = serializers.CharField(required=True) new_password = serializers.CharField(required=True)
def mean(L): S = 0 for x in L: S += x return S / len(L)
def myfunc(*args): print(sum(args)*0.05) myfunc(50,50)
from __future__ import annotations import sys import click from ai.backend.client.session import Session from ai.backend.client.output.fields import agent_fields from ..types import CLIContext from . import admin @admin.group() def agent(): """ Agent administration commands. """ @agent.command() @click.pass_obj @click.argument('agent_id') def info(ctx: CLIContext, agent_id: str) -> None: """ Show the information about the given agent. """ fields = [ agent_fields['id'], agent_fields['status'], agent_fields['region'], agent_fields['architecture'], agent_fields['first_contact'], agent_fields['cpu_cur_pct'], agent_fields['available_slots'], agent_fields['occupied_slots'], agent_fields['hardware_metadata'], agent_fields['live_stat'], ] with Session() as session: try: item = session.Agent.detail(agent_id=agent_id, fields=fields) ctx.output.print_item(item, fields) except Exception as e: ctx.output.print_error(e) sys.exit(1) @agent.command() @click.pass_obj @click.option('-s', '--status', type=str, default='ALIVE', help='Filter agents by the given status.') @click.option('--scaling-group', '--sgroup', type=str, default=None, help='Filter agents by the scaling group.') @click.option('--filter', 'filter_', default=None, help='Set the query filter expression.') @click.option('--order', default=None, help='Set the query ordering expression.') @click.option('--offset', default=0, help='The index of the current page start for pagination.') @click.option('--limit', default=None, type=int, help='The page size for pagination.') def list( ctx: CLIContext, status: str, scaling_group: str | None, filter_: str | None, order: str | None, offset: int, limit: int | None, ) -> None: """ List agents. (super-admin privilege required) """ fields = [ agent_fields['id'], agent_fields['status'], agent_fields['architecture'], agent_fields['scaling_group'], agent_fields['region'], agent_fields['first_contact'], agent_fields['cpu_cur_pct'], agent_fields['mem_cur_bytes'], agent_fields['available_slots'], agent_fields['occupied_slots'], ] try: with Session() as session: fetch_func = lambda pg_offset, pg_size: session.Agent.paginated_list( status, scaling_group, fields=fields, page_offset=pg_offset, page_size=pg_size, filter=filter_, order=order, ) ctx.output.print_paginated_list( fetch_func, initial_page_offset=offset, page_size=limit, ) except Exception as e: ctx.output.print_error(e) sys.exit(1) @admin.group() def watcher(): """ Agent watcher commands. Available only for Linux-based agents. """ @watcher.command() @click.pass_obj @click.argument('agent', type=str) def status(ctx: CLIContext, agent: str) -> None: """ Get agent and watcher status. (superadmin privilege required) \b AGENT: Agent id. """ with Session() as session: try: status = session.AgentWatcher.get_status(agent) print(status) except Exception as e: ctx.output.print_error(e) sys.exit(1) @watcher.command() @click.pass_obj @click.argument('agent', type=str) def agent_start(ctx: CLIContext, agent: str) -> None: """ Start agent service. (superadmin privilege required) \b AGENT: Agent id. """ with Session() as session: try: status = session.AgentWatcher.agent_start(agent) print(status) except Exception as e: ctx.output.print_error(e) sys.exit(1) @watcher.command() @click.pass_obj @click.argument('agent', type=str) def agent_stop(ctx: CLIContext, agent: str) -> None: """ Stop agent service. (superadmin privilege required) \b AGENT: Agent id. """ with Session() as session: try: status = session.AgentWatcher.agent_stop(agent) print(status) except Exception as e: ctx.output.print_error(e) sys.exit(1) @watcher.command() @click.pass_obj @click.argument('agent', type=str) def agent_restart(ctx: CLIContext, agent: str) -> None: """ Restart agent service. (superadmin privilege required) \b AGENT: Agent id. """ with Session() as session: try: status = session.AgentWatcher.agent_restart(agent) print(status) except Exception as e: ctx.output.print_error(e) sys.exit(1)
from tkinter import * from tkinter.filedialog import askopenfilename # from tkinter import ttk from PIL import Image, ImageTk#import Image, ImageTk calibUnitChoices = { 'um': 1e6, 'mm': 1e3, 'cm': 1e2, 'm': 1, 'km': 1e-3, 'in': 39.3701, 'ft': 3.28084, 'mi': 0.000621371, } def calibrateBtn(): print('Button Pressed') calibBtn(gui, text='Calibrate', state=DISABLED) if __name__ == "__main__": gui = Tk() gui.title('Measure Distance from Image - by Anatomy3D') # Create interface items calibVariable = StringVar(gui) calibVariable.set(calibUnitChoices['m']) # Pack Style Interface calibMenu = OptionMenu(gui, calibVariable, *calibUnitChoices.keys()).pack(side='top', fill='y') calibLbl = Label(gui, text='TEST').pack(side='top', fill='x') calibBtn = Button(gui, text='Calibrate', command=calibrateBtn).pack(side='top', fill='y') #setting up a tkinter canvas with scrollbars frame = Frame(gui, bd=2, relief=SUNKEN) frame.grid_rowconfigure(0, weight=1) frame.grid_columnconfigure(0, weight=1) xscroll = Scrollbar(frame, orient=HORIZONTAL) xscroll.grid(row=1, column=0, sticky=E+W) yscroll = Scrollbar(frame) yscroll.grid(row=0, column=1, sticky=N+S) canvas = Canvas(frame, bd=0, xscrollcommand=xscroll.set, yscrollcommand=yscroll.set) canvas.grid(row=0, column=0, sticky=N+S+E+W) xscroll.config(command=canvas.xview) yscroll.config(command=canvas.yview) frame.pack(fill=BOTH,expand=1) #adding the image # File = askopenfilename(parent=gui, initialdir="~/Documents/git/measureFromImage/",title='Choose an image.') file = '/home/yuanchueh/Documents/git/measureFromImage/car.png' # # # Load File into Pillow for Picture Dimension Sizes # # img = Image.open(file) # # img = img.convert('RGBA') # # height = img.height # # width = img.width # # mode = img.mode # # print (width, height, mode) # # img.show() # # Load Image into TKinter Interface image = ImageTk.PhotoImage(Image.open(file)) height = image.height() width = image.width() # print(ht, wd) canvas.create_image(0,0,image=image,anchor="nw") canvas.config(scrollregion=canvas.bbox(ALL)) # # # # # # #function to be called when mouse is clicked # def printcoords(event): # #outputting x and y coords to console # print (event.x,event.y) # #mouseclick event # canvas.bind("<Button 1>",printcoords) # gui.minsize(width=1024,height=600); gui.mainloop()
from PIL import Image import sys print sys.argv def check(palette, copy): palette = sorted(Image.open(palette).convert('RGB').getdata()) copy = sorted(Image.open(copy).convert('RGB').getdata()) print 'Success' if copy == palette else 'Failed' check('Goth.png', 'test.png')
from fractions import Fraction def reduce(fraction): """ Shadows built-in name 'reduce' (forced by Codewars) """ f = Fraction(*fraction) return [f.numerator, f.denominator]
# Copyright (C) 2020 THL A29 Limited, a Tencent company. # All rights reserved. # Licensed under the BSD 3-Clause License (the "License"); you may # not use this file except in compliance with the License. You may # obtain a copy of the License at # https://opensource.org/licenses/BSD-3-Clause # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" basis, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # See the AUTHORS file for names of contributors. import unittest import sys import torch import turbo_transformers from transformers.modeling_albert import AlbertConfig, AlbertModel import numpy import os sys.path.append(os.path.dirname(__file__)) import test_helper def create_test(batch_size, seq_length): class TestAlbertModel(unittest.TestCase): def init_data(self, use_cuda: bool) -> None: self.test_device = torch.device('cuda:0') if use_cuda else \ torch.device('cpu:0') if not use_cuda: torch.set_num_threads(4) turbo_transformers.set_num_threads(4) torch.set_grad_enabled(False) self.cfg = AlbertConfig() self.torch_model = AlbertModel(self.cfg) if torch.cuda.is_available(): self.torch_model.to(self.test_device) self.torch_model.eval() self.hidden_size = self.cfg.hidden_size self.input_tensor = torch.randint(low=0, high=self.cfg.vocab_size - 1, size=(batch_size, seq_length), device=self.test_device) self.turbo_model = turbo_transformers.AlbertModel.from_torch( self.torch_model) def check_torch_and_turbo(self, use_cuda): self.init_data(use_cuda=use_cuda) device = "GPU" if use_cuda else "CPU" num_iter = 1 turbo_model = lambda: self.turbo_model( self.input_tensor, attention_mask=None, head_mask=None) turbo_result, turbo_qps, turbo_time = \ test_helper.run_model(turbo_model, use_cuda, num_iter) print( f"AlbertLayer \"({batch_size},{seq_length:03})\" ", f"{device} TurboTransform QPS, {turbo_qps}, time, {turbo_time}" ) torch_model = lambda: self.torch_model(input_ids=self.input_tensor, attention_mask=None, head_mask=None) with turbo_transformers.pref_guard("albert_perf") as perf: torch_result, torch_qps, torch_time = \ test_helper.run_model(torch_model, use_cuda, num_iter) print(f"AlbertModel \"({batch_size},{seq_length:03})\" ", f"{device} Torch QPS, {torch_qps}, time, {torch_time}") # print(turbo_result[-1]) # print(turbo_result, torch_result[0]) # TODO(jiaruifang) Error is too high. Does tensor core introduce more differences? tolerate_error = 1e-2 self.assertTrue( torch.max(torch.abs(torch_result[0] - turbo_result[0])) < tolerate_error) with open("albert_model_res.txt", "a") as fh: fh.write( f"\"({batch_size},{seq_length:03})\", {torch_qps}, {torch_qps}\n" ) def test_layer(self): self.check_torch_and_turbo(use_cuda=False) if torch.cuda.is_available() and \ turbo_transformers.config.is_compiled_with_cuda(): self.check_torch_and_turbo(use_cuda=True) globals()[f"TestAlbertModel{batch_size}_{seq_length:03}"] = \ TestAlbertModel with open("albert_model_res.txt", "w") as fh: fh.write(", torch, turbo_transformers\n") for batch_size in [1, 2]: for seq_length in [10]: create_test(batch_size, seq_length) if __name__ == '__main__': unittest.main()
from .base import BaseEventTestCase from graphql_relay import to_global_id from django.db import transaction from api.models import Interest class InterestTestCase(BaseEventTestCase): """ Test interest queries """ def test_user_can_join_and_unjoin_category(self): # Test for joining a category query = f''' mutation{{ joinCategory(input:{{ categories:"{[to_global_id("CategoryNode", self.category2.id)]}", }}){{ joinedCategoryList{{ id followerCategory{{ id name description }} }} }} }} ''' self.request.user = self.user2 result = self.client.execute(query, context_value=self.request) self.assertMatchSnapshot(result) # Tests unjoining a category query = f''' mutation{{ unjoinCategory(input:{{ categoryId:"{to_global_id("CategoryNode", self.category2.id)}", }}){{ unjoinedCategory{{ id followerCategory{{ id name description }} }} }} }} ''' self.request.user = self.user2 result = self.client.execute(query, context_value=self.request) self.assertMatchSnapshot(result) def test_user_cannot_join_same_category_twice(self): query = f''' mutation{{ joinCategory(input:{{ categories:"{[to_global_id("CategoryNode", self.category2.id)]}", }}){{ joinedCategoryList{{ id followerCategory{{ id name description }} }} }} }} ''' with transaction.atomic(): Interest.objects.create( follower=self.andela_user2, follower_category=self.category2 ) self.request.user = self.user2 result = self.client.execute(query, context_value=self.request) self.assertMatchSnapshot(result) def test_user_cannot_unjoin_category_they_do_not_belong_to(self): query = ''' mutation{ unjoinCategory(input:{ categoryId:"Q2F0ZWdvcnlOb2RlOjI=" }){ unjoinedCategory{ id followerCategory{ id name description } } } } ''' self.request.user = self.user2 result = self.client.execute(query, context_value=self.request) self.assertMatchSnapshot(result)
''' Date:201211 Functionally about collect, clean and wrangling methods data. ''' import pandas as pd #DATAFRAME 1 - 6 BEST MARATHONS MAJORS def checkingdata(): majors = pd.read_csv("/Users/ariadnapuigventos/Documents/CURSOS/BRIDGE/DS_Ejercicios_Python/BootCamp_TheBridge/Proyecto_Navidad_Ariadna/documentation/world_marathon_majors.csv", sep = ";") #It wants to check for the datatypes because there is a time column, that maybe It had to change it to int. table_check = majors.dtypes print(table_check) majors = pd.read_csv("/Users/ariadnapuigventos/Documents/CURSOS/BRIDGE/DS_Ejercicios_Python/BootCamp_TheBridge/Proyecto_Navidad_Ariadna/documentation/world_marathon_majors.csv", sep = ";") def topandtail(): #To display the top 10 in the best marathon majors in the over the world and the last 5 in the ranking. bestones = majors.head(10) print(bestones) print("......") lastones = majors.tail(5) print(lastones) def dimention(): #To knows how many columns and rows and if there are some duplicates rows. dimention_table = majors.shape print(dimention_table) mostrardeduplicados = majors[majors.duplicated()] print("number of duplicate rows: ", mostrardeduplicados) majorsOK = majors.drop_duplicates() print(majorsOK) print(majorsOK.shape) def repite_pais(): #To knows if there are some country who has won more than one time the same marathon or different. nacionalidad = majors.country.value_counts().head(15) print(nacionalidad) def repetidores(): #To knows if there are some winners who has won more than one time the same marathon or different. repetidores = majors.winner.value_counts().head(15) print(repetidores) def changetype(): #This function pretends to changes time column in major dataframe to timedelta to sum() in the future. time_column = pd.to_timedelta(majors["time"].str.strip()) print(time_column) in_seconds = time_column.astype('timedelta64[s]') print(in_seconds) #DATAFRAME 2 - ALTITUDE BY COUNTRIES df = pd.read_csv("/Users/ariadnapuigventos/Documents/CURSOS/BRIDGE/DS_Ejercicios_Python/BootCamp_TheBridge/Proyecto_Navidad_Ariadna/documentation/altitud_countries.csv", sep= ";") def droppingcolumns(): df = pd.read_csv("/Users/ariadnapuigventos/Documents/CURSOS/BRIDGE/DS_Ejercicios_Python/BootCamp_TheBridge/Proyecto_Navidad_Ariadna/documentation/altitud_countries.csv", sep= ";") altitude = df.drop(["country", "Unnamed: 4", "Unnamed: 5", "Unnamed: 6"], axis=1).rename(columns={"name": "country"}) print(altitude) # DATAFRAME TIME SPENT HOURS dict2 = {"Tasks": ["Searching datasets", "Path control", "Working in modules: functions, bucles...", "Wrangling Data: NaN, duplicates, outliers", "Visualization Data: graphics, boxplots", "Analyzing results", "Researching for value added"], "Hours": [32, 15, 72, 24, 192, 24, 12]} Tasks = pd.DataFrame(dict2)
import random import numpy as np import time, datetime from collections import deque import gym import pylab import sys import pickle import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' from tensorflow.python.framework import ops ops.reset_default_graph() import tensorflow as tf from typing import List env = gym.make('CartPole-v1') # get size of state and action from environment state_size = env.observation_space.shape[0] action_size = env.action_space.n game_name = sys.argv[0][:-3] model_path = "save_model/" + game_name graph_path = "save_graph/" + game_name # Make folder for save data if not os.path.exists(model_path): os.makedirs(model_path) if not os.path.exists(graph_path): os.makedirs(graph_path) # DQN Agent for the Cartpole # it uses Neural Network to approximate q function # and replay memory & target q network class DQN: """ Implementation of deep q learning algorithm """ def __init__(self, session: tf.Session, state_size: int, action_size: int, name: str="main") -> None: self.render = False # get size of state and action self.session = session self.progress = " " self.state_size = state_size self.action_size = action_size # train time define self.training_time = 5*60 # These are hyper parameters for the DQN self.learning_rate = 0.001 self.discount_factor = 0.99 self.epsilon_max = 1.0 # final value of epsilon self.epsilon_min = 0.0001 self.epsilon_decay = 0.0005 self.epsilon = self.epsilon_max self.step = 0 self.score = 0 self.episode = 0 self.hidden1, self.hidden2 = 64, 64 self.ep_trial_step = 500 # Parameter for Experience Replay self.size_replay_memory = 5000 self.batch_size = 64 # Experience Replay self.memory = deque(maxlen=self.size_replay_memory) # Parameter for Target Network self.target_update_cycle = 200 self.net_name = name self.model = self.build_model() # def build_model(self, H_SIZE_01 = 256, H_SIZE_02 = 256, H_SIZE_03 = 256, self.learning_rate=0.001) -> None: def build_model(self): with tf.variable_scope(self.net_name): self._X = tf.placeholder(dtype=tf.float32, shape= [None, self.state_size], name="input_X") self._Y = tf.placeholder(dtype=tf.float32, shape= [None, self.action_size], name="output_Y") net_0 = self._X net_1 = tf.layers.dense(net_0, self.hidden1, activation=tf.nn.relu) net_16 = tf.layers.dense(net_1, self.action_size) self._Qpred = net_16 self.Loss = tf.losses.mean_squared_error(self._Y, self._Qpred) optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate) self._train = optimizer.minimize(self.Loss) def predict(self, state: np.ndarray) -> np.ndarray: x = np.reshape(state, [-1, self.state_size]) return self.session.run(self._Qpred, feed_dict={self._X: x}) def update(self, x_stack: np.ndarray, y_stack: np.ndarray) -> list: feed = { self._X: x_stack, self._Y: y_stack } return self.session.run([self.Loss, self._train], feed) # get action from model using epsilon-greedy policy def get_action(self, state): # choose an action_arr epsilon greedily action_arr = np.zeros(self.action_size) action = 0 if random.random() < self.epsilon: # print("----------Random action_arr----------") action = random.randrange(self.action_size) action_arr[action] = 1 else: # Predict the reward value based on the given state Q_value = self.predict(state) action = np.argmax(Q_value[0]) action_arr[action] = 1 return action_arr, action # save sample <s,a,r,s'> to the replay memory def append_sample(self,state,action,reward,next_state,done): #in every action put in the memory self.memory.append((state,action,reward,next_state,done)) while len(self.memory) > self.size_replay_memory: self.memory.popleft() def Copy_Weights(*, dest_scope_name: str, src_scope_name: str) -> List[tf.Operation]: # Copy variables src_scope to dest_scope op_holder = [] src_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name) dest_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name) for src_var, dest_var in zip(src_vars, dest_vars): op_holder.append(dest_var.assign(src_var.value())) return op_holder def train_model(agent, target_agent): minibatch = random.sample(agent.memory, agent.batch_size) for state,action,reward,next_state, done in minibatch: q_update = reward if not done: #Obtain the Q' values by feeding the new state through our network q_update = (reward + agent.discount_factor*np.amax(target_agent.predict(next_state)[0])) q_values = agent.predict(state) q_values[0][action] = q_update agent.update(state,q_values) if agent.epsilon > agent.epsilon_min: agent.epsilon -= agent.epsilon_decay else : agent.epsilon = agent.epsilon_min def main(): # with tf.Session() as sess: sess = tf.Session() agent = DQN(sess, state_size, action_size, name="main") target_agent = DQN(sess, state_size, action_size, name="target") init = tf.global_variables_initializer() saver = tf.train.Saver() sess.run(init) avg_score = 0 episodes, scores = [], [] # start training # Step 3.2: run the game display_time = datetime.datetime.now() print("\n\n",game_name, "-game start at :",display_time,"\n") start_time = time.time() # Initialize target network. copy_ops = Copy_Weights(dest_scope_name="target", src_scope_name="main") sess.run(copy_ops) while time.time() - start_time < agent.training_time and avg_score < 490: state = env.reset() done = False agent.score = 0 ep_step = 0 state = np.reshape(state, [1, state_size]) while not done and ep_step < agent.ep_trial_step: if len(agent.memory) < agent.size_replay_memory: agent.progress = "Exploration" else: agent.progress = "Training" ep_step += 1 agent.step += 1 if agent.render: env.render() action_arr, action = agent.get_action(state) # run the selected action and observe next state and reward next_state, reward, done, _ = env.step(action) next_state = np.reshape(next_state, [1, state_size]) if done: reward = -100 # store the transition in memory agent.append_sample(state, action, reward, next_state, done) # update the old values state = next_state # only train if done observing if agent.progress == "Training": # Training! train_model(agent, target_agent) if done or ep_step % agent.target_update_cycle == 0: # return# copy q_net --> target_net sess.run(copy_ops) agent.score = ep_step if done or ep_step == agent.ep_trial_step: if agent.progress == "Training": agent.episode += 1 scores.append(agent.score) episodes.append(agent.episode) avg_score = np.mean(scores[-min(30, len(scores)):]) print('episode :{:>6,d}'.format(agent.episode),'/ ep step :{:>5,d}'.format(ep_step), \ '/ time step :{:>7,d}'.format(agent.step),'/ status :', agent.progress, \ '/ epsilon :{:>1.4f}'.format(agent.epsilon),'/ last 30 avg :{:> 4.1f}'.format(avg_score) ) break save_path = saver.save(sess, model_path + "/model.ckpt") print("\n Model saved in file: %s" % save_path) pylab.plot(episodes, scores, 'b') pylab.savefig("./save_graph/cartpole_Nature2015.png") e = int(time.time() - start_time) print(' Elasped time :{:02d}:{:02d}:{:02d}'.format(e // 3600, (e % 3600 // 60), e % 60)) # Replay the result episode = 0 scores = [] while episode < 20: state = env.reset() done = False ep_step = 0 state = np.reshape(state, [1, state_size]) while not done and ep_step < 500: env.render() ep_step += 1 q_value = agent.predict(state) action = np.argmax(q_value) next_state, reward, done, _ = env.step(action) state = next_state score = ep_step if done or ep_step == 500: episode += 1 scores.append(score) print("episode : {:>5d} / reward : {:>5d} / avg reward : {:>5.2f}".format(episode, score, np.mean(scores))) if __name__ == "__main__": main()
#!/usr/bin/env python import uproot fname = '/Users/ploskon/data/HFtree_trains/13-06-2019/488_20190613-0256/unmerged/child_1/0001/AnalysisResults.root' print('[i] reading from', fname) file = uproot.open(fname) print(file.keys) all_ttrees = dict(file.allitems(filterclass=lambda cls: issubclass(cls, uproot.tree.TTreeMethods))) print(all_ttrees) tracks = all_ttrees[b'PWGHF_TreeCreator/tree_Particle;1'] print('track keys:',tracks.keys()) tracks['run_number'].show() print('dumping some tracks') pds_trks = tracks.pandas.df() # entrystop=10) print(pds_trks) print('dumping some events') events = all_ttrees[b'PWGHF_TreeCreator/tree_event_char;1'] pds_evs = events.pandas.df() print('number of events', len(pds_evs)) print(pds_evs[0:5]) ntrk = 0 for i, e in pds_evs.head().iterrows(): _ntrk = int(e['n_tracks']) print(i, 'n_tracks =', _ntrk) _trks = tracks.pandas.df(entrystart=ntrk, entrystop=ntrk + _ntrk) for it, t in _trks.head().iterrows(): print(it, t) break ntrk += _ntrk for i, e in pds_evs.head().iterrows(): iev_id = int(e['ev_id']) print(pds_trks.loc[pds_trks['ev_id'] == iev_id])
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import logging from dataclasses import dataclass from pants.backend.kotlin.lint.ktlint.skip_field import SkipKtlintField from pants.backend.kotlin.lint.ktlint.subsystem import KtlintSubsystem from pants.backend.kotlin.target_types import KotlinSourceField from pants.core.goals.fmt import FmtResult, FmtTargetsRequest from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel from pants.core.util_rules.partitions import PartitionerType from pants.engine.internals.selectors import Get from pants.engine.process import ProcessResult from pants.engine.rules import collect_rules, rule from pants.engine.target import FieldSet, Target from pants.engine.unions import UnionRule from pants.jvm.jdk_rules import InternalJdk, JvmProcess from pants.jvm.resolve import jvm_tool from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool, GenerateJvmToolLockfileSentinel from pants.util.logging import LogLevel from pants.util.strutil import pluralize logger = logging.getLogger(__name__) @dataclass(frozen=True) class KtlintFieldSet(FieldSet): required_fields = (KotlinSourceField,) source: KotlinSourceField @classmethod def opt_out(cls, tgt: Target) -> bool: return tgt.get(SkipKtlintField).value class KtlintRequest(FmtTargetsRequest): field_set_type = KtlintFieldSet tool_subsystem = KtlintSubsystem partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION class KtlintToolLockfileSentinel(GenerateJvmToolLockfileSentinel): resolve_name = KtlintSubsystem.options_scope @rule(desc="Format with Ktlint", level=LogLevel.DEBUG) async def ktlint_fmt( request: KtlintRequest.Batch, tool: KtlintSubsystem, jdk: InternalJdk ) -> FmtResult: lockfile_request = await Get(GenerateJvmLockfileFromTool, KtlintToolLockfileSentinel()) tool_classpath = await Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)) toolcp_relpath = "__toolcp" extra_immutable_input_digests = { toolcp_relpath: tool_classpath.digest, } args = [ "com.pinterest.ktlint.Main", "-F", *request.files, ] result = await Get( ProcessResult, JvmProcess( jdk=jdk, argv=args, classpath_entries=tool_classpath.classpath_entries(toolcp_relpath), input_digest=request.snapshot.digest, extra_jvm_options=tool.jvm_options, extra_immutable_input_digests=extra_immutable_input_digests, extra_nailgun_keys=extra_immutable_input_digests, output_files=request.files, description=f"Run Ktlint on {pluralize(len(request.files), 'file')}.", level=LogLevel.DEBUG, ), ) return await FmtResult.create(request, result, strip_chroot_path=True) @rule def generate_ktlint_lockfile_request( _: KtlintToolLockfileSentinel, tool: KtlintSubsystem ) -> GenerateJvmLockfileFromTool: return GenerateJvmLockfileFromTool.create(tool) def rules(): return [ *collect_rules(), *jvm_tool.rules(), *KtlintRequest.rules(), UnionRule(GenerateToolLockfileSentinel, KtlintToolLockfileSentinel), ]
# -*- coding: utf-8 -*- """ Created on Mon Sep 30 09:23:42 2013 @author: bejar """ import scipy.io import numpy as np from scipy import corrcoef from sklearn.cluster import spectral_clustering,affinity_propagation import matplotlib.pyplot as plt from pylab import * from sklearn.metrics import silhouette_score from sklearn.manifold import spectral_embedding from matplotlib.colors import ListedColormap from mpl_toolkits.mplot3d import Axes3D from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.cross_validation import cross_val_score import pylab as pl from scipy import corrcoef cpath='/home/bejar/MEG/Data/' cres='/home/bejar/Documentos/Investigacion/MEG/res/' #name='MMN-201205251030' name='sensorpos' mats=scipy.io.loadmat( cpath+name+'.mat') data= mats['chanpos'] l= mats['label'] llabels=[] mpos=None for i in range(l.shape[0]): cname=l[i][0][0] if cname[0]=='A': llabels.append(cname) if mpos==None: mpos=data[i] else: mpos=np.vstack((mpos,data[i])) X=mpos fig = plt.figure() ax = fig.gca(projection='3d') #ax=pl.subplot(1, 1, 1, projection='3d') pl.scatter(X[:,1],X[:,2],zs=X[:,0],s=25) for i,lab in list(enumerate(llabels)): print i, lab ax.text(X[i,1],X[i,2],X[i,0],lab,'x') pl.show()
# test adding comments to source import marshaltools # pick a test source name = "ZTF19aabfyxn" # load your program prog = marshaltools.ProgramList("AMPEL Test", load_sources=True, load_candidates=False) # try to post a message twice (should fail the second time) prog.comment(name, "AMPEL test comment: to be posted twice (but should appear only once)", comment_type='comment', duplicate_mode='no') prog.comment(name, "AMPEL test comment: to be posted twice (but should appear only once)", comment_type='comment', duplicate_mode='no') input("go to http://skipper.caltech.edu:8080/cgi-bin/growth/view_source.cgi?name=ZTF19aabfyxn and look at the comments") # now post a message and then delete ot prog.comment(name, "AMPEL test comment: to be deleted", comment_type='comment') input("go and refresh your browser: a new comment should be there") prog.delete_comment(name, comment_text="AMPEL test comment: to be deleted", comment_type='comment') input("now it should be gone") # now post another comment prog.comment(name, "AMPEL test comment: to be duplicated", comment_type='comment', duplicate_mode='add') prog.comment(name, "AMPEL test comment: to be duplicated", comment_type='comment', duplicate_mode='add') prog.comment(name, "AMPEL test comment: to be duplicated", comment_type='comment', duplicate_mode='add') input("now you should have 3 duplicated comments. press enter to delete them") prog.delete_comment(name, comment_text="AMPEL test comment: to be duplicated", comment_type='comment') input("no more duplicated comments") # now put another comment and edit it prog.comment(name, "AMPEL test comment: to be edited", comment_type='comment') input("go look for the comment to be edited") # find the id of this comment comments = prog.read_comments(name, comment_type='comment', comment_text="AMPEL test comment: to be edited") print (comments) c_id = comments[0]['id'] prog.comment(name, "AMPEL test comment: has now been edited", duplicate_mode='edit', comment_id=c_id) input("go look for the comment to be edited") # remove all the comments to this source #prog.delete_comment(name, comment_author='self', comment_type='comment') print ("all the comments from your user to this sources have been removed")
import turtle paper = turtle.Screen() leo = turtle.Turtle() paper.bgcolor("lightgreen") leo.shape("arrow") leo.color("pink") leo.pensize(3) def draw_star(n): """ Draw star :param n: length of side :return: """ for i in range (5): leo.right(144) leo.forward(n) def draw_5_stars (n): for i in range(5): draw_star(n) #leo.penup() leo.forward(350) leo.right(144) #leo.pendown() draw_5_stars (100) paper.exitonclick()
import pytest from enumerate_data import enumerate_names_countries expected_lines = ['1. Julian Australia', '2. Bob Spain', '3. PyBites Global', '4. Dante Argentina', '5. Martin USA', '6. Rodolfo Mexico'] @pytest.mark.parametrize("line", expected_lines) def test_enumerate_names_countries(capfd, line): enumerate_names_countries() output = capfd.readouterr()[0] assert line in output, f'{line} not in output'
#User function Template for python3 # Function to check if string # starts and ends with 'gfg' def gfg(a): b = a.lower() if((b.startswith('gfg') or b.startswith('GFG')) and b.endswith('gfg') or b.endswith('GFG')): # use b.startswith() and b.endswith() print ("Yes") else: print ("No")
import os from win32com.client import Dispatch def conectar_com(): CATIA = Dispatch('CATIA.Application') CATIA.Visible = True return CATIA def crear_documento(nombre,objeto_com): parte = objeto_com.Documents.Add('Part') product1 = parte.GetItem("Part1") product1.PartNumber = nombre return parte def crear_circunferencia(coord_ax1,coord_ax2,radio,sketch): newcircle = sketch.Factory2D.CreateClosedCircle(coord_ax1, coord_ax2, radio) return newcircle def abrir_documento(ruta,objeto_cad): partDocument1 = objeto_cad.Documents.Open(ruta) return partDocument1 if __name__ == "__main__": objeto = conectar_com() parte = abrir_documento("C:\\Users\\sandu\\Desktop\\modulos_com_catia\\anillo.CATPart",objeto) #se navega a traves de la infraestructura del com para acceder a los atributos del modelo parte1 = parte.Part geometrias = parte1.GeometricElements figura = geometrias.Item("Circle.1") print(figura.Radius) figura2 = geometrias.Item("Circle.2") print(figura2.Radius) """ referencia_parte = crear_documento('anillo',objeto) Xcoord=100 Ycoord=100 Zcoord=100 NewPoint = objeto.ActiveDocument.Part.HybridShapeFactory.AddNewPointCoord(Xcoord, Ycoord, Zcoord) Mainbody = objeto.ActiveDocument.Part.MainBody Mainbody.InsertHybridShape(NewPoint) AxisXY = objeto.ActiveDocument.Part.OriginElements.PlaneXY Referenceplane = objeto.ActiveDocument.Part.CreateReferenceFromObject(AxisXY) Referencepoint = objeto.ActiveDocument.Part.CreateReferenceFromObject(NewPoint) NewPlane = objeto.ActiveDocument.Part.HybridShapeFactory.AddNewPlaneOffsetPt(Referenceplane, Referencepoint) Mainbody.InsertHybridShape(NewPlane) sketches1 = objeto.ActiveDocument.Part.Bodies.Item("PartBody").Sketches reference1 = referencia_parte.part.OriginElements.PlaneXY NewSketch = sketches1.Add(reference1) objeto.ActiveDocument.Part.InWorkObject = NewSketch NewSketch.OpenEdition() newcircle = crear_circunferencia(0, 0, 20.215757,NewSketch) newellipse = crear_circunferencia(0, 0, 10.069477,NewSketch) NewSketch.CloseEdition() #grosor del bloque LengthBlock=11 NewBlock = objeto.ActiveDocument.Part.ShapeFactory.AddNewPad (NewSketch, LengthBlock) objeto.ActiveDocument.Part.Update() product1 = referencia_parte.GetItem("Rotor_dinamico") """
# -*-coding=utf-8-*- # @Time : 2020/1/1 0:08 # @File : trend.py # 统计发帖趋势 import datetime import numpy as np import pymongo import pandas as pd from settings import send_aliyun,llogger from config import QQ_MAIL logger = llogger('log/trend_.log') db = pymongo.MongoClient('192.168.10.48',17001) doc= db['db_parker']['jsl'] total_list = [] date = datetime.datetime.now() + datetime.timedelta(days=-365) # 一年内的数据 for item in doc.find({'last_resp_date':{'$gt':date}},{'html':0,'resp':0,'content':0}): del item['_id'] total_list.append(item) df = pd.DataFrame(total_list) df['createTime']=pd.to_datetime(df['createTime']) df=df.set_index('createTime',drop=True) new_df = df.resample('W').count() show_data=new_df[['creator']].iloc[:-5:-1] # print(show_data) # 最大值与 max_index = new_df['creator'].idxmax().to_pydatetime().strftime('%Y-%m-%d') max_v=new_df['creator'].max() current = datetime.datetime.now().strftime('%Y-%m-%d') title=f'jsl一周发帖数量分析 {current}' percentage=np.round((show_data['creator'].values[:-1]-show_data['creator'].values[1:])/show_data['creator'].values[1:]*100,0) content = '| 日期 | 贴数 | 环比 |\n' # print(percentage) percentage=np.append(percentage,np.nan) start_index=0 for index,item in show_data.iterrows(): print(index,item['creator']) py_date = index.to_pydatetime().strftime('%Y-%m-%d') count=item['creator'] content+=f'| {py_date} | {count} | {percentage[start_index]}% |\n' start_index+=1 content+=f'最大值发生在 {max_index},贴数为 {max_v}\n' logger.info(title) logger.info(content) try: send_aliyun(title,content,TO_MAIL_=QQ_MAIL) except Exception as e: logger.error(e)
import pygame from pygame import * import sys import random import time # window setup win = pygame.Surface WIDTH = 700 HEIGHT = 700 gameDisplay = pygame.display.set_mode((WIDTH,HEIGHT)) # song setup pygame.mixer.pre_init(44100, -16, 2, 2048) pygame.init() pygame.mixer.init() pygame.mixer.music.load('song.mp3') mixer.music.set_volume(0.3) pygame.mixer.music.play(-1) # set variables WHITE = (255, 255, 255) BLACK = (0, 0, 0) RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (66, 135, 245) class Player(pygame.sprite.Sprite): # player sprite def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = win((40, 40)) self.image.fill(BLUE) self.rect = self.image.get_rect() self.rect.center = (WIDTH / 2, HEIGHT - 70) class Mob(pygame.sprite.Sprite): # enemy sprite def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.Surface((30,40)) self.image.fill(RED) self.rect = self.image.get_rect() self.rect.x = random.randrange(WIDTH - self.rect.width) self.rect.y = random.randrange(-100, -40) self.speedy = random.randrange(1, 2) def update(self): self.rect.y += self.speedy if self.rect.top > HEIGHT + 10: self.rect.x = random.randrange(WIDTH - self.rect.width) self.rect.y = random.randrange(-100, -40) self.speedy = random.randrange(1, 2) def text_objects(text, font): textSurface = font.render(text, True, WHITE) return textSurface, textSurface.get_rect() def message_display(text):# allows you to show text to user using function largeText = pygame.font.Font('ROGFontsv1.6-Regular.ttf',115) TextSurf, TextRect = text_objects(text, largeText) TextRect.center = ((WIDTH/2),(HEIGHT/2)) gameDisplay.blit(TextSurf, TextRect) pygame.display.update() def crash(): # game over function running = False message_display('Game Over') # initialize pygame and create window pygame.init() screen = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption("Blocky Dodge") clock = pygame.time.Clock() pygame.key.set_repeat(True) all_sprites = pygame.sprite.Group() mobs = pygame.sprite.Group() player = pygame.sprite.Group() player = Player() all_sprites.add(player) for i in range(5): m = Mob() all_sprites.add(m) mobs.add(m) # Game loop running = True while running: mixer.music.set_volume(0.3) # Process input for event in pygame.event.get(): # check for closing window if event.type == pygame.QUIT: pygame.quit() sys.exit() # sprite controls elif event.type == pygame.KEYDOWN: if event.key == pygame.K_UP or event.key == K_w: player.rect.centery -= 2 if event.key == pygame.K_SPACE: running = True elif event.key == pygame.K_DOWN or event.key == K_s: player.rect.centery += 2 elif event.key == pygame.K_LEFT or event.key == K_a: player.rect.left -= 2 elif event.key == pygame.K_RIGHT or event.key == K_d: if player.rect.x + player.image.get_width() < WIDTH: player.rect.right += 2 # Update all_sprites.update() # hitboxes hits = pygame.sprite.spritecollide(player, mobs, False) if hits: # game over sequence crash() mixer.music.set_volume(0.1) time.sleep(1) # Draw / render screen.fill((18,18,18)) all_sprites.draw(screen) # *after* drawing everything, flip the display pygame.display.flip()
from datetime import datetime, timedelta import json from celery.decorators import task from celery.utils.log import get_task_logger from .fitbit_push import call_push_api logger = get_task_logger(__name__) @task(name="fitbit.store_health_data") def store_fitbit_data(data): ''' Celery task to store fitbit health data into db ''' try: call_push_api(data) logger.info("Stored fitbit data successfully") except Exception as e: message = """ Storing fitbit data failed REQUEST DATA :{} ERROR MESSAGE:{}""" logger.error(message.format(data,str(e)), exc_info=True)
class Solution(object): def lengthOfLongestSubstring(self, s): """ :type s: str :rtype: int """ longest = 0 currentSubStr = '' for letter in s: while(currentSubStr.find(letter) is not -1): currentSubStr = currentSubStr[1:] currentSubStr += letter if (longest < len(currentSubStr)): longest = len(currentSubStr) return longest
import math value = [] i = 1 while True: z = int(input()) if z == -1: break value.append(z) for x in range(1,len(value)+1): li = [] i = 1 li.append(value[x-1] / 2) while True: li.append(li[i-1] - ((li[i-1]**3 - value[x-1]) / (3 * li[i-1]**2))) if math.fab(li[i]**3 - value[x-1]) < 0.00001*value[x-1]: break i += 1 print(min(li))
function [y,stop] = fcn(u1,u2) persistent pathcount; persistent pathlength; persistent path; %error in code, initially used path but this is already a matlab function persistent pathPre; if isempty(pathcount) pathcount=1; pathPre=u2; [pathlength,~]=size(pathPre); path=[pathPre; pathPre(pathlength,1)+100 pathPre(pathlength,2)+100]; %sim takes multiple iterations to stop after reaching final point, so this prevents it from breaking end pos=[u1(5) u1(6)]; % [x,y] currentPoint=path(pathcount,:); target=path(pathcount+1,:); A = [(currentPoint(2)-target(2)), target(1)-currentPoint(1); target(1)-currentPoint(1), target(2)-currentPoint(2)]; b = [ target(1)*currentPoint(2)-currentPoint(1)*target(2); pos(1)*(target(1)-currentPoint(1)) + pos(2)*(target(2)-currentPoint(2))]; proj = (A\b)'; %projected point on the line between . projLen=dot(proj-currentPoint,target-currentPoint)/norm(target-currentPoint)^2; %the distance along the projected line, where 0 is the start point and 1 is the end point if ((projLen>1)) pathcount=pathcount+1; end %check if at destination and terminate sim if there pathflag=0; if (pathcount==pathlength) pathflag=1; end if ((pathcount==(pathlength-1))||(pathcount==(pathlength))) a=path(pathcount,:); b=path(pathcount+1,:); post=path(pathcount+1,:); else a=path(pathcount,:); b=path(pathcount+1,:); post=path(pathcount+2,:); end y = [a(1);a(2);b(1);b(2);post(1);post(2)]; stop=pathflag;
#!/usr/bin/python import time import json DATA_FILE = "SenseHat.json" CACHE_ALIVE = 5 # seconds PRESSURE_OFFSET = 24.5 # = 206 m altitude class SenseHat2(object): def __init__(self): self.data = None self.humidity = None self.tempH = None self.pressure = None self.tempP = None self.mesureTime = 0 def loadDataFile(self): try: file = open(DATA_FILE, "r") self.data = json.load(file) file.close() self.humidity = self.data['humidity'] self.tempH = self.data['tempH'] self.pressure = self.data['pressure'] self.tempP = self.data['tempP'] self.mesureTime = self.data['mesureTime'] except: print('Error loading data file') def saveDataFile(self): try: file = open(DATA_FILE, "w") self.data = {'mesureTime': self.startTime, 'humidity': self.humidity, 'tempH': self.tempH, 'pressure': self.pressure, 'tempP': self.tempP} json.dump(self.data, file) file.close() except IOError: print('Error saving data file') def readSensor(self): from sense_hat import SenseHat senseHat = SenseHat() self.humidity = senseHat.get_humidity() self.tempH = senseHat.get_temperature_from_humidity() self.pressure = senseHat.get_pressure() + PRESSURE_OFFSET self.tempP = senseHat.get_temperature_from_pressure() self.mesureTime = time.time() def main(self): self.loadDataFile() if (time.time() > (self.mesureTime + CACHE_ALIVE)): try: self.readSensor() self.saveDataFile() except OSError: print('Error reading sensor') self.humidity = self.data['humidity'] self.tempH = self.data['tempH'] self.pressure = self.data['pressure'] self.tempP = self.data['tempP'] print('TempH: {0:.2f} *C - Humidity: {1:.2f} %'.format(self.tempH, self.humidity)) print('TempP: {0:.2f} *C - Pressure: {1:.2f} mbar'.format(self.tempP, self.pressure)) if __name__ == '__main__': SenseHat2().main()
#!/usr/bin/env python # -*- coding:utf-8 -*- import os import sys class getsizeERRO(Exception): pass def get_file_size(file_name): s = os.path.getsize(file_name) if s == 0: raise getsizeERRO('File Size value 0') else : return s def save(file_name,data): f = open(file_name, 'w') f.write(data) f.close() def strbin_to_byte(strbin): a = '' for s in strbin: for b in s : a += chr(int(b,2)) return a def byte_to_strbin(s): a = [] for c in s: a.append(bin(ord(c)).replace('0b', '')) return a def read_file(file_name,read_interval): print '* Read interval:%s' % str(read_interval) f = open(file_name,'r') data = '' dlen = 0 file_bin = [] f.seek(read_interval[0], 1) for i in xrange(read_interval[1]-read_interval[0]): data = data + f.read(1) dlen+=1 if len(data) == 32: file_bin.append(byte_to_strbin(data)) data = '' else : pass file_bin.append(byte_to_strbin(data)) data = '' f.close() print '* Read done.\n* Datalen:%s' % dlen return file_bin if __name__ == '__main__' : mode = 'test' if len(sys.argv) > 1 : for a in sys.argv: al = a.split('=') if al[0] == 'source_file': file_name = al[1] if al[0] == 'save_file': save_file = al[1] if al[0] == 'mode': mode = al[1] else : pass else : print 'crypto_byte.py mode={encrypt|decrypt} source_file=/bin/bash save_file=bash' exit(0) try : file_size = get_file_size(file_name) except getsizeERRO as e: print '*' ,e ,', getFilesizeERRO block device?' file_size = int(raw_input('* Input stop_byte :')) except Exception as e: print e exit() if mode == 'test': strbin = read_file(file_name,[0,file_size]) save(save_file,strbin_to_byte(strbin)) elif mode == 'encrypt' : read_file(file_name,[0,file_size]) elif mode == 'decrypt' : save(save_file,strbin_to_byte(strbin))
from tabulate import tabulate from typing import List class Table: def __init__(self, header: List[str] = []): self.header: List[str] = header self.rows: List[List[str]] = [] self.style: str = "psql" def set_header(self, header: List[str]): self.header = header def add_row(self, row: List[str]): self.rows.append(row) def render(self) -> str: return tabulate(self.rows, headers=self.header, tablefmt=self.style)
import torch import hydra import sys from train import train, evaluate from dataset import VQADataset from models.base_model import VQAModel from torch.utils.data import DataLoader from utils import main_utils, train_utils from utils.train_logger import TrainLogger from omegaconf import DictConfig, OmegaConf from tools.calc_std_mean import calc_std_mean from compute_softscore import load_v2 import os @hydra.main(config_path="config", config_name='config') def evaluate_hw2(cfg: DictConfig) -> float: main_utils.init(cfg) load_v2() # Load dataset path_image_train = '/datashare/train2014/COCO_train2014_' path_question_train = '/datashare/v2_OpenEnded_mscoco_train2014_questions.json' train_dataset = VQADataset(path_answers=cfg['main']['paths']['train'], path_image=path_image_train, path_questions=path_question_train) path_image_val = '/datashare/val2014/COCO_val2014_' path_question_train = '/datashare/v2_OpenEnded_mscoco_val2014_questions.json' val_dataset = VQADataset(path_answers=cfg['main']['paths']['validation'], path_image=path_image_val, path_questions=path_question_train, word_dict=train_dataset.word_dict) eval_loader = DataLoader(val_dataset, cfg['train']['batch_size'], shuffle=True, num_workers=cfg['main']['num_workers']) image_dim = train_dataset.pic_size output_dim = 2410 model = VQAModel(batch_size=cfg['train']['batch_size'], word_vocab_size=train_dataset.vocab_size, lstm_hidden=cfg['train']['num_hid'], output_dim=output_dim, dropout=cfg['train']['dropout'], word_embedding_dim=cfg['train']['word_embedding_dim'], question_output_dim = cfg['train']['question_output_dim'], image_dim= image_dim, last_hidden_fc_dim= cfg['train']['last_hidden_fc_dim']) if torch.cuda.is_available(): model = model.cuda() model.load_state_dict(torch.load('model.pkl',map_location=lambda storage, loc: storage)['model_state']) model.train(False) eval_score, eval_loss = evaluate(model, eval_loader) print(f"The evaluation score is {eval_score}") return eval_score def rel_to_abs_path(path): script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in return os.path.join(script_dir, path) if __name__ == '__main__': evaluate_hw2()
import finitefield import truthtable import primpoly def DFT(array, dpoly): """ Given an array of polynomials, maps it to its discrete fourier transform using as its nth root of unity the root of the primitive polynomial dpoly !!note that order of root must equal size of array!! """ f = [] uroot = [0,1] for i in xrange(len(array)): A = [0] for j in xrange(len(array)): a = finitefield.fieldExp(uroot,j*i,dpoly) x = array[j] A = finitefield.fieldAdd(A, finitefield.fieldMult(x,a,dpoly)) f.append(A) return f def IDFT(array, dpoly): """Given an array of polynomials, maps it to its inverse discrete fourier trandform using as its nth root of unity the root of the primitive polynomial dpoly !!note that order of root must equal size of array!! """ f = [] uroot = [0,1] for i in xrange(len(array)): A1 = [0] for j in xrange(len(array)): a = finitefield.fieldExp(uroot,-1*j*i,dpoly) x = array[j] A1 = finitefield.fieldAdd(A1, finitefield.fieldMult(x,a,dpoly)) A2 = finitefield.fieldMult(finitefield.fieldInv([len(array)%2], dpoly), A1, dpoly) f.append(A2) return f print IDFT(DFT([[1],[0],[1],[1],[1],[1],[1],[1],[0],[1], [0], [0], [0], [1], [0], [0], [1], [1], [0], [0], [0], [1], [0], [1], [0], [1], [1], [0], [0], [0], [1,0,1]], [1,0,1,0,0,1]),[1,0,1,0,0,1]) print DFT([[1,0,1]]*63,[1,1,0,0,0,0,1])
from .login_attempt_record import LoginAttemptRecord from .login_record import LogRecord
from flask import Flask, render_template, g, request import re import praw import sqlite3 import os app = Flask(__name__) DATABASE = os.getcwd() + '\database.db' def get_db(): db = getattr(g, '_database', None) if db is None: db = g._database = sqlite3.connect(DATABASE) return db @app.teardown_appcontext def close_connection(exception): db = getattr(g, '_datatbase', None) if db is not None: db.close() # Insert information from your registered account with Reddit's API here: reddit = praw.Reddit( client_id="Update", client_secret="Update", user_agent="Update" ) commonWords = ['this', 'it', 'but', 'a', 'an', 'the', 'just', 'like', 'me', 'be', 'into', 'you', 'that', 'is', 'to', 'i', 'sec', 'wsb', 'moon', 'says', 'vs', 'yolo', 'get', 'for', 'now', 'go', 'get', 'gang', 'etf', 'hold', 'on', 'out', 'red', 'us', 'ev'] def queryTitles(): titles = [] db = get_db() # Query wallstreetbets 'hot' posts for their titles (adding a space in the front for regex ease) for submission in reddit.subreddit("wallstreetbets").hot(limit=50): titles.append(' ' + submission.title) posts = db.execute( 'SELECT DISTINCT title FROM mentions' ).fetchall() listPosts = [] for post in posts: listPosts.append(post[0]) titles = [x for x in titles if x not in listPosts] return titles def matches(titles): listMatch = {} tickerRegex = re.compile(r'(?:\s|\$)[A-Z]{1,4}(?=\s)') # Add all matches for regex to list of matches for title in titles: matches = set(tickerRegex.findall(title)) matches = [x.strip(' ').strip('$') for x in matches] tickers = [x for x in matches if x.lower() not in commonWords] for ticker in tickers: if ticker not in listMatch: listMatch[ticker] = [title] else: listMatch[ticker].append(title) db = get_db() for key, values in listMatch.items(): for value in values: symbols = db.execute( 'INSERT INTO mentions (tickerTitle, ticker, title) VALUES (?, ?, ?)', (key + value, key, value) ) db.commit() @app.route("/", methods = ['GET', 'POST']) def index(): if request.method == "POST": matches(queryTitles()) db = get_db() posts = db.execute('SELECT DISTINCT ticker, count(ticker) FROM mentions GROUP BY ticker').fetchall() return render_template("index.html", posts=posts)
# -*- coding: utf-8 -*- __author__ = 'lish' import io,MySQLdb #import ImageDraw from PIL import Image,ImageDraw try: # Python2 from urllib2 import urlopen except ImportError: # Python3 from urllib.request import urlopen import requests import hashlib import base64 import sys,os,time,uuid reload(sys) sys.setdefaultencoding('utf-8') base_path='/opt/www/api/attachment/imread/booksheet/cover/' # isExists=os.path.exists(base_path) # if not isExists: # os.makedirs(base_path) def linkSQL(host,user,passwd,db): global cursor,conn conn=MySQLdb.connect(host=host,user=user,passwd=passwd,charset="utf8",db=db) cursor = conn.cursor() return conn def gaininfos(sheetid): try: host="rdsljqv187pt04s68726.mysql.rds.aliyuncs.com" user="crawl" passwd="vDwoiExZ26jYaMsyZokz" conn=linkSQL(host,user,passwd,'ebook_con') sql=""" SELECT a.big_thumb FROM con_book a, ( SELECT content_id,sheet_id,create_time FROM con_booksheet_content WHERE content_type = 1 AND sheet_id = """+str(sheetid)+""" order by id desc ) b WHERE a.book_id = b.content_id order by b.create_time desc limit 3""" n = cursor.execute(sql) infos=[] for row in cursor.fetchall(): infos.append(row[0]) print infos return infos except Exception, e: print e def mergepicture(sheetid): try: picurl=gaininfos(sheetid) #print picurl mxsize =600 mysize =600 toImage = Image.new('RGBA', (mxsize,mysize),'#e7e7e7') lowImage = Image.new('RGBA', (600, 80),'#ddd4c3') toImage.paste(lowImage,( 0, 520)) i=0 for url in picurl[2::-1]: i+=1 try: image_bytes = urlopen(url).read() except: continue data_stream = io.BytesIO(image_bytes) # open as a PIL image object fromImage = Image.open(data_stream) #fromImage = Image.open(url) fromImage_resized = fromImage.resize((180, 240), Image.ANTIALIAS) xsize,ysize=fromImage_resized.size #print xsize,xsize if i==3: x=135 y=80 fromImage_resized = fromImage.resize((330, 440), Image.ANTIALIAS) elif i==1: x=280 y=160 fromImage_resized = fromImage.resize((270, 360), Image.ANTIALIAS) elif i==2: x=50 y=160 fromImage_resized = fromImage.resize((270, 360), Image.ANTIALIAS) toImage.paste(fromImage_resized,( x, y)) lists = os.listdir(base_path) #print lists for tt in lists: if sheetid in tt: print tt os.remove(base_path+tt) image_path=base_path+'cover180240_'+str(sheetid)+'_'+str(uuid.uuid1()).replace('-','')+'.jpg' toImage.save(image_path) image_url=str(image_path.replace('/opt/www/','http://static.imread.com/')) sql="update ebook_con.con_booksheet set image_url='"+image_url+"' where sheet_id="+str(sheetid) n=cursor.execute(sql) conn.commit() cursor.close() conn.close() # host="192.168.0.34" # user="ebook" # passwd="ebook%$amRead" # conn=linkSQL(host,user,passwd,'ebook_con') print image_url release_cdn(image_url) except Exception, e: raise e def release_cdn(image_url): passwd=hashlib.md5(hashlib.sha1('KEUswIa+Tc5/L').hexdigest()).hexdigest() url = 'http://push.dnion.com/cdnUrlPush.do' data = dict( username ='51ss', password = passwd, url = image_url, type='0' ) r = requests.post(url,data=data) print r.headers print r.status_code if __name__ == '__main__': #gaininfos() mergepicture(sys.argv[1])
from sqlalchemy.orm import sessionmaker import creTable Session_class = sessionmaker(bind=creTable.engine) session = Session_class() # b1 = creTable.Book(name = 'Python With Alex',pub_date='2014-05-02') b2 = creTable.Book(name = 'C++ 网络编程',pub_date='2014-05-02') # b3 = creTable.Book(name = 'PHP With Alex',pub_date='2014-05-02') # # a1 = creTable.Author(name = 'Alex') a2 = creTable.Author(name = 'Jack') a3 = creTable.Author(name = 'Rain') # # b1.authors = [a1,a3] b2.authors = [a2,a3] # b3.authors = [a1,a2,a3] # session.add_all([b1,b2,b3,a1,a2,a3]) session.add(b2) author_obj = session.query(creTable.Author).filter(creTable.Author.name == 'jack').first() print(author_obj) book_obj = session.query(creTable.Book).filter(creTable.Book.id == 5).first() print(book_obj.authors) # book_obj.authors.remove(author_obj) session.commit()
import time from multiprocessing import Queue, Process from bot import Bot from helpers import load_configs def start_bot(config, messages, id): bot = Bot(config, messages, bot_id=id) bot.resume() while True: bot.loop() time.sleep(2) def get_queue_and_start(): queue = Queue() id = 0 for config in load_configs(): p = Process(target=start_bot, args=(config, queue, id)) p.start() id += 1 return queue if __name__ == '__main__': id = 0 q = Queue() for config in load_configs(): start_bot(config, q, id) id += 1
"""AppConfig for stats.""" import collections from django.apps import AppConfig from django.utils.translation import gettext, gettext_lazy as _ GLOBAL_PARAMETERS_STRUCT = collections.OrderedDict([ ("general", { "label": _("General"), "params": collections.OrderedDict([ ("logfile", { "label": _("Path to the log file"), "help_text": _("Path to log file used to collect statistics") }), ("rrd_rootdir", { "label": _("Directory to store RRD files"), "help_text": _("Path to directory where RRD files are stored") }), ("greylist", { "label": _("Show greylisted messages"), "help_text": _("Differentiate between hard and soft rejects (greylisting)") }) ]) }) ]) def load_maillog_settings(): """Load app settings.""" from modoboa.maillog import forms from modoboa.maillog.api.v2 import serializers from modoboa.parameters import tools as param_tools param_tools.registry.add( "global", forms.ParametersForm, gettext("Statistics")) param_tools.registry.add2( "global", "maillog", gettext("Statistics"), GLOBAL_PARAMETERS_STRUCT, serializers.MaillogGlobalParemetersSerializer ) class MaillogConfig(AppConfig): """App configuration.""" name = "modoboa.maillog" verbose_name = "Modoboa graphical statistics" def ready(self): load_maillog_settings() from . import handlers
# 先排序 再 回溯加剪枝一下 class Solution: res = [] def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: candidates.sort() n = len(candidates) res = [] def backtrack(i, tmp_sum, tmp): # if tmp_sum > target or i == n: # return # if tmp_sum == target: # res.append(tmp) # return for j in range(i, n): if tmp_sum + candidates[j] > target: break if tmp_sum + candidates[j] == target: res.append(tmp + [candidates[j]]) break backtrack(j, tmp_sum + candidates[j], tmp+[candidates[j]]) backtrack(0, 0, []) return res
# -*- coding: utf-8 -*- import requests from architect.manager.client import BaseClient from celery.utils.log import get_logger logger = get_logger(__name__) DEFAULT_RESOURCES = [ 'spinnaker_account', 'spinnaker_application', # 'spinnaker_artifact', 'spinnaker_pipeline_config', 'spinnaker_pipeline', # 'spinnaker_stage', ] class SpinnakerClient(BaseClient): def __init__(self, **kwargs): super(SpinnakerClient, self).__init__(**kwargs) def auth(self): status = True return status def client(self, path): response = requests.get('{}{}'.format(self.metadata['auth_url'], path)) return response.json() def update_resources(self, resources=None): if self.auth(): if resources is None: resources = DEFAULT_RESOURCES for resource in resources: metadata = self.get_resource_metadata(resource) self.process_resource_metadata(resource, metadata) count = len(self.resources.get(resource, {})) logger.info("Processed {} {} resources".format(count, resource)) self.process_relation_metadata() def get_resource_status(self, kind, metadata): return 'unknown' def get_resource_metadata(self, kind): logger.info("Getting {} resources".format(kind)) metadata = [] if kind == 'spinnaker_account': metadata = self.client('/credentials') elif kind == 'spinnaker_application': metadata = self.client('/applications') elif kind == 'spinnaker_pipeline': applications = self.client('/applications') for app in applications: data = self.client('/applications/{}/' 'pipelines'.fromat(app['name'])) if len(data) > 0: metadata += data elif kind == 'spinnaker_pipeline_config': applications = self.client('/applications') for app in applications: data = self.client('/applications/{}/' 'pipelineConfigs'.format(app['name'])) if len(data) > 0: metadata += data return metadata def process_resource_metadata(self, kind, metadata): if kind == 'spinnaker_account': for resource in metadata: self._create_resource(resource['name'], resource['name'], 'spinnaker_account', metadata=resource) elif kind == 'spinnaker_application': for resource in metadata: self._create_resource(resource['name'], resource['name'], 'spinnaker_application', metadata=resource) elif kind == 'spinnaker_pipeline': for resource in metadata: self._create_resource(resource['id'], resource['name'], 'spinnaker_pipeline', metadata=resource) elif kind == 'spinnaker_pipeline_config': for resource in metadata: logger.info(resource) self._create_resource(resource['id'], resource['name'], 'spinnaker_pipeline_config', metadata=resource) def process_relation_metadata(self): # Define relationships between pipeline configs and applications for resource_id, resource in self.resources.get('spinnaker_pipeline_config', {}).items(): self._create_relation( 'application_pipeline_config', resource_id, resource['metadata']['application']) # Define relationships between pipelines and applications for resource_id, resource in self.resources.get('spinnaker_pipeline', {}).items(): self._create_relation( 'application_pipeline', resource_id, resource['metadata']['application']) # Define relationships between pipelines and pipeline configurations for resource_id, resource in self.resources.get('spinnaker_pipeline', {}).items(): self._create_relation( 'application_pipeline', resource_id, resource['metadata']['pipelineConfigId'])
import os import pathlib from tempfile import NamedTemporaryFile import gzip import re import json from itertools import groupby from collections import Counter from fabric.api import sudo, get from fabric.contrib.files import exists from fabtools import require from appconfig.tasks import * init() def sql(app, sql_): """ Run some SQL remotely on the app's database """ sudo("""mysql -e "{0}" -D {1}""".format(sql_.replace('\n', ' '), app.name)) def read_from_db(app, sql_): """ - Run some SQL remotely on the app's database, which writes results to OUTFILE. - Transfer the resulting file to the local machine. - Return the result rows as list of lists. """ remote_path = "/tmp/query.txt" if exists(remote_path): sudo('rm {0}'.format(remote_path)) sql(app, sql_.replace('OUTFILE', "OUTFILE '{0}'".format(remote_path))) sudo('gzip -f {0}'.format(remote_path)) local_file = NamedTemporaryFile(delete=False, suffix='.gz') get(remote_path + '.gz', local_file.name) sudo('rm {0}.gz'.format(remote_path)) with gzip.open(local_file.name, mode='rt', encoding='utf8') as fp: res = [l.replace('\n', '').split('\t') for l in fp.readlines()] os.remove(local_file.name) return res @task_app_from_environment def shutdown(app): sudo('systemctl stop php7.0-fpm.service') upload_db_to_cdstar(app, dbname='v4') @task_app_from_environment def backup_to_cdstar(app): upload_db_to_cdstar(app, dbname='v4') # # SQL query retrieving information to map soundfiles to existing Transcriptions. # TRANSCRIPTIONS = """\ select distinct l.FilePathPart, w.SoundFileWordIdentifierText, t.LanguageIx, t.IxElicitation, t.IxMorphologicalInstance, t.AlternativePhoneticRealisationIx, t.AlternativeLexemIx into OUTFILE from Transcriptions as t, Languages as l, Words as w where l.LanguageIx = t.LanguageIx and t.IxElicitation = w.IxElicitation and t.IxMorphologicalInstance = w.IxMorphologicalInstance order by l.FilePathPart; """ LANGUAGES = """\ select FilePathPart, LanguageIx into OUTFILE from Languages; """ WORDS = """\ select SoundFileWordIdentifierText, IxElicitation, IxMorphologicalInstance into OUTFILE from Words; """ @task_app_from_environment def load_contributorimages_catalog(app, catalog): """ load available contributor and speaker images into the db Usage: fab load_contributorimages_catalog:production,/path/to/soundcomparisons-data/imagefiles/catalog.json :param catalog: Path to imagesfiles/catalog.json in a clone of the repos clld/soundcomparisons-data """ with pathlib.Path(catalog).open() as fp: cat = json.load(fp) table = NamedTemporaryFile(suffix='.gz', delete=False) with gzip.open(table.name, mode='wt', encoding='utf8') as tbl: for oid, data in cat.items(): md = data['metadata'] if(md['name']): tag = md['name'] fpp = '' # if tag represents a FilePathPart_\d+ (speaker image[s]) # remove trailing indices and fill column 'filepathpart' with FilePathPart only arr = re.split(r'_\d+$', tag) if len(arr) == 2: fpp = arr[0] tbl.write('\t'.join([ tag, fpp, 'https://cdstar.shh.mpg.de/bitstreams/{0}/{1}'.format( oid, md['path'])]) + '\n') remote_path = '/tmp/contributorimages.txt.gz' require.files.file(path=remote_path, source=table.name, use_sudo=True, mode='644') os.remove(table.name) sudo('gunzip -f {0}'.format(remote_path)) remote_path = remote_path[:-3] tsql = """\ CREATE OR REPLACE TABLE soundcomparisons.contributorimages ( tag varchar(255) NOT NULL, filepathpart varchar(255) DEFAULT '', url TEXT NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8;""" sql(app, tsql) sudo('mysqlimport {0} {1}'.format(app.name, remote_path)) sudo('rm {0}'.format(remote_path)) sudo('systemctl restart php7.0-fpm.service') @task_app_from_environment def load_soundfile_catalog(app, catalog): """ load available soundfiles into the db Usage: fab load_soundfile_catalog:production,/path/to/soundcomparisons-data/soundfiles/catalog.json :param catalog: Path to soundfiles/catalog.json in a clone of the repos clld/soundcomparisons-data """ with pathlib.Path(catalog).open() as fp: cat = json.load(fp) # Restructure catalog into {"stem":["uid", [".EXT1", ".EXT2", ...]]} catalog = {} for (k, v) in cat.items(): stem = v['metadata']['name'] fmts = [pathlib.Path(bs['bitstreamid']).suffix for bs in v['bitstreams']] catalog[stem] = [k, fmts] del cat # We keep track of soundfiles for existing Transcriptions by deleting the corresponding catalog # keys from this set: ckeys = set(catalog.keys()) def urls_from_catalog(stem): oid, fmts = catalog[stem] return [ 'https://cdstar.shh.mpg.de/bitstreams/{0}/{1}{2}'.format( oid, stem, fmt) for fmt in list(set(fmts) & set(['.mp3','.ogg']))] transcriptions = read_from_db(app, TRANSCRIPTIONS) def transcription_grouper(row): """ Transcriptions are identified uniquely by the 5-tuple - LanguageIx, - IxElicitation, - IxMorphologicalInstance, - AlternativePhoneticRealisationIx, - AlternativeLexemIx Associated soundfiles OTOH are identified by FilePathPart and SoundFileWordIdentifierText which may vary across Transcriptions for the same (Language, Word). """ return tuple(row[2:]) def write_row(tbl, ixl, ixe, ixm, pron, lex, urls): if urls: tbl.write('\t'.join([ixl, ixe, ixm, str(pron), str(lex), json.dumps(urls)]) + '\n') table = NamedTemporaryFile(suffix='.gz', delete=False) with gzip.open(table.name, mode='wt', encoding='utf8') as tbl: for key, tts in groupby( sorted(transcriptions, key=transcription_grouper), transcription_grouper): LanguageIx, IxElicitation, IxMorphologicalInstance, pron, lex = key pron, lex = int(pron), int(lex) suffix = '' if lex > 1: suffix += '_lex{0}'.format(lex) if pron > 1: suffix += '_pron{0}'.format(pron) urls = [] for tt in tts: stem = tt[0] + tt[1] + suffix if stem in catalog: if stem in ckeys: ckeys.remove(stem) urls.extend(urls_from_catalog(stem)) write_row(tbl, LanguageIx, IxElicitation, IxMorphologicalInstance, pron, lex, urls) # # To be able to assign soundfiles to dummy transcriptions, we need to map soundfile names # to LanguageIx, IxElicitation, IxMorphologicalInstance. # languages = {r[0]: r[1] for r in read_from_db(app, LANGUAGES)} # We sort the FilePathPart by descending length, to prevent matching short prefixes. lkeys = sorted(languages, key=lambda w: len(w), reverse=True) words = {r[0]: r[1:] for r in read_from_db(app, WORDS)} # We sort the SoundFileWordIdentifierText by descending length, to prevent matching # short prefixes. sfwits = sorted(words, key=lambda w: len(w), reverse=True) uwords = Counter() rems = Counter() lex_pattern = re.compile('_lex(?P<n>[0-9])$') pron_pattern = re.compile('_pron(?P<n>[0-9])$') for key in sorted(ckeys): # Loop over soundfiles which haven't been assigned yet. for fpp in lkeys: if key.startswith(fpp): sfwit = key[len(fpp):] break else: # No matching FilePathPart was found! continue for word in sfwits: if sfwit.startswith(word): sfwit = word break else: # No matching SoundFileWordIdentifierText was found! uwords.update([sfwit]) continue lex, pron = 0, 0 # Default for dummy transcriptions. rem = key[len(fpp) + len(sfwit):] if rem: # Check unmatched suffixes of the soundfile stem. m = lex_pattern.match(rem) if m: lex = int(m.group('n')) m = pron_pattern.match(rem) if m: pron = int(m.group('n')) if not (lex or pron): rems.update([rem]) continue # The suffix didn't match the lex or pron pattern. ixe, ixm = words[sfwit] write_row(tbl, languages[fpp], ixe, ixm, pron, lex, urls_from_catalog(key)) remote_path = '/tmp/soundfiles.txt.gz' require.files.file(path=remote_path, source=table.name, use_sudo=True, mode='644') os.remove(table.name) sudo('gunzip -f {0}'.format(remote_path)) remote_path = remote_path[:-3] tsql = """\ CREATE OR REPLACE TABLE soundcomparisons.soundfiles ( LanguageIx bigint(20) unsigned NOT NULL, IxElicitation int(10) unsigned NOT NULL, IxMorphologicalInstance tinyint(3) unsigned NOT NULL, AlternativePhoneticRealisationIx tinyint(3) unsigned, AlternativeLexemIx tinyint(3) unsigned, urls TEXT ) ENGINE=InnoDB DEFAULT CHARSET=utf8;""" sql(app, tsql) sudo('mysqlimport {0} {1}'.format(app.name, remote_path)) sudo('rm {0}'.format(remote_path)) #print('unknown words referenced in soundfiles ({0}):'.format(sum(uwords.values()))) #print(uwords) #print('suffixes ({0}):'.format(sum(rems.values()))) #print(rems) sudo('systemctl restart php7.0-fpm.service')
from ex1 import * def choose_element_list(list_in_which_to_choose:list)->all: nombre_de_la_liste_tirer = random.randint(0,len(list_in_which_to_choose)) nombre_retourner = list_in_which_to_choose[nombre_de_la_liste_tirer] return nombre_retourner liste_alea = gen_list_random_int() print("LISTE DE BASE :",liste_alea,"element aleatoire de la liste",choose_element_list(liste_alea))
from flask import Blueprint from flask import request from flask import render_template from flask_login import login_required from .controller import Log bp = Blueprint('loginfo', __name__) @bp.route('/loginfo/search') @login_required def loginfo_search(): data = Log.find_by_condition() return render_template('/loginfo/search.html', data=data) @bp.route('/loginfo/search/logdisplay_sub', methods=['POST']) @login_required def logdisplay_sub(): uid = request.form['uid'] return render_template('/loginfo/search_logdisplay_sub.html', data=uid)