text
stringlengths 8
6.05M
|
|---|
from django.contrib import admin
from api.models import Item,Cart,Order
# Register your models here.
admin.site.register(Item)
admin.site.register(Cart)
admin.site.register(Order)
|
#!/usr/bin/env python3
import sys
import time
from Nav_funs import Distance, Direction
import rospy
from geometry_msgs.msg import Point
#from mcu_control.msg import RoverPosition, AntennaGoal
def subscriber_callback(message):
rospy.loginfo(message)
if message.x < -900 or message.y < -900:
return
rover['latitude'] = message.x
rover['longitude'] = message.y
if gotAntennaPos:
BS_to_Rover_dir = Direction(antenna['latitude'], antenna['longitude'], rover['latitude'], rover['longitude'])
BS_to_Rover_dis = Distance(antenna['latitude'], antenna['longitude'], rover['latitude'], rover['longitude'])
rotatorAngle = BS_to_Rover_dir - antenna['startDir'] + 180
if rotatorAngle < 0:
rotatorAngle += 360
elif rotatorAngle > 360:
rotatorAngle -= 360
msg = Point()
msg.x = rotatorAngle/10
msg.y = BS_to_Rover_dis
antennaPub.publish(msg)
else:
rospy.loginfo('Waiting for antenna position ROS parameters to be set')
return
if __name__ == '__main__':
node_name = 'antenna_node'
rospy.init_node(node_name, anonymous=False) # only allow one node of this type
rospy.loginfo('Initialized "'+node_name+'" node for pub/sub functionality')
subscribe_topic = '/rover_position'
rospy.loginfo('Beginning to subscribe to "'+subscribe_topic+'" topic')
sub = rospy.Subscriber(subscribe_topic, Point, subscriber_callback)
antenna_pub_topic = '/antenna_goal'
rospy.loginfo('Beginning to publish to "'+antenna_pub_topic+'" topic')
antennaPub = rospy.Publisher(antenna_pub_topic, Point, queue_size=10)
gotAntennaPos = False
antenna = {'latitude':None, 'longitude':None, 'startDir':None, 'recommendedDir':None}
rover = {'latitude':None, 'longitude':None, 'distance':None}
rospy.loginfo('This node needs the antenna starting position (antenna_latitude, '+ \
'antenna_longitude, antenna_start_dir) and will wait until it receives that')
try:
while not rospy.is_shutdown():
if not gotAntennaPos:
try: #rospy.has_param(param) works but requires code rethinking
antenna['latitude'] = rospy.get_param('antenna_latitude')
antenna['longitude'] = rospy.get_param('antenna_longitude')
antenna['startDir'] = rospy.get_param('antenna_start_dir')
gotAntennaPos = True
rospy.loginfo('Got antenna starting position!')
except KeyError: # param not defined
pass
else:
pass
rospy.Rate(100).sleep()
except rospy.ROSInterruptException:
pass
def shutdown_hook():
rospy.logwarn('This node ('+node_name+') is shutting down')
time.sleep(1) # give ROS time to deal with the node closing (rosbridge especially)
rospy.on_shutdown(shutdown_hook)
|
# Written by Muhammad Sarmad
# Date : 31st July
# This file is only to ascertain the value of ZMAX and ZMIN
import torch
import torch.utils.data
import torch.nn.parallel
import Datasets
import models
import numpy as np
import argparse
import torchvision.transforms as transforms
import pc_transforms
np.random.seed(2)
torch.manual_seed(2)
dataset_names = sorted(name for name in Datasets.__all__)
model_names = sorted(name for name in models.__all__)
parser = argparse.ArgumentParser(
description='Point Cloud Training Autoencoder and Shapecompletion Training on Three Datasets',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Arguments for Data Loader
parser.add_argument('-d', '--data', metavar='DIR',
default='/home/sarmad/Desktop/data/shape_net_core_uniform_samples_2048', help='Path to Data Set')
parser.add_argument('-n', '--dataName', metavar='Data Set Name', default='shapenet', choices=dataset_names)
parser.add_argument('-ad', '--adddata', metavar='aDIR', default='', help='Additional path to dataset')
parser.add_argument('-s', '--split_value', default=0.9, help='Ratio of train and test data split')
# Arguments for Torch Data Loader
parser.add_argument('-b', '--batch_size', type=int, default=8, help='input batch size')
parser.add_argument('-w', '--workers', type=int, default=8, help='Set the number of workers')
# Arguments for Model Settings
parser.add_argument('-m', '--model', default='AE_RSnet', help='Chose Your Model Here')
parser.add_argument('--bs', default=1.0, help='size of each block')
parser.add_argument('--stride', default=0.5, help='stride of block')
parser.add_argument('--rx', default=0.02, help='slice resolution in x axis')
parser.add_argument('--ry', default=0.02, help='slice resolution in y axis')
parser.add_argument('--rz', default=0.02, help='slice resolution in z axis')
parser.add_argument('--ZMAX', default=0.499, help='ZMax of the data') # Check Zmax by running script zmax
parser.add_argument('--ZMIN', default=-0.499, help='ZMax of the data') # Check Zmax by running script zmax
args = parser.parse_args()
def main():
co_transform = pc_transforms.Compose([
pc_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0.5, 0.5], std=[1, 1])
])
input_transforms = transforms.Compose([
pc_transforms.ArrayToTensor(),
# transforms.Normalize(mean=[0.5,0.5],std=[1,1])
])
target_transforms = transforms.Compose([
pc_transforms.ArrayToTensor(),
# transforms.Normalize(mean=[0.5, 0.5], std=[1, 1])
])
"""Data Loader"""
# x
[train_dataset, valid_dataset] = Datasets.__dict__[args.dataName](input_root=args.data, target_root=None,
split=args.split_value, net_name='auto_encoder',
input_transforms=input_transforms,
target_transforms=target_transforms)
input, target = train_dataset[1]
omax = 0.0
omin = 0.0
for i, (input_train, target) in enumerate(train_dataset):
nmax = torch.max(input_train)
nmax = np.max([torch.Tensor.numpy(nmax), omax])
omax = nmax
nmin = torch.min(input_train)
nmin = np.min([torch.Tensor.numpy(nmin), omin])
omin = nmin
# 0.499
# - 0.499
for i, (input_valid, target) in enumerate(valid_dataset):
nmax = torch.max(input_valid)
nmax = np.max([torch.Tensor.numpy(nmax), omax])
omax = nmax
nmin = torch.min(input_valid)
nmin = np.min([torch.Tensor.numpy(nmin), omin])
omin = nmin
print('ZMAX:',omax)
print('ZMIN:', omin)
if __name__=='__main__':
main()
|
from django.http import HttpResponse,Http404
from django.shortcuts import render
import datetime
def hello(request):
return HttpResponse('Hello world')
def root_page(request):
return HttpResponse('The root page!')
def current_date(request):
now = datetime.datetime.now()
context = {'current_date':now}
return render(request,'current_datetime.html',context)
def hours_ahead(request,offset):
try:
offset = int(offset)
except ValueError:
raise Http404
dt = datetime.datetime.now() + datetime.timedelta(hours=offset)
now = datetime.datetime.now()
html ="<html><body>In %s hour(s),it will be %s.</body></html>"%(offset,dt)
return HttpResponse(html)
|
#!/usr/bin/env python3
from io import StringIO
from textwrap import dedent
import matplotlib.pyplot as plt
from pandas import read_csv
# %%
data = StringIO(dedent("""\
category,photon,eta_fd
theory,14.5,4.572
theory,15.0,4.448
theory,15.5,4.367
theory,16.0,4.307
theory,16.5,4.258
theory,17.0,4.216
theory,17.5,4.179
theory,18.0,4.146
theory,18.5,4.117
theory,19.0,4.091
theory,19.5,4.068
theory,20.0,4.066
theory,20.5,4.024
theory,21.0,4.007
theory,21.5,3.987
theory,22.0,3.960
theory,22.5,3.924
theory,23.0,3.884
theory,23.5,3.850
theory,24.0,3.822
neg_p,15.9,4.626
neg_p,14.3,4.854
neg_p,19.1,4.538
g_fitting,15.9,4.643
g_fitting,14.3,4.762
g_fitting,19.1,4.466"""
))
df = read_csv(data)
# %%
df_theory = df[df['category'] == 'theory']
df_exp = df[df['category'] != 'theory']
plt.figure(figsize=(4, 6))
plt.subplot(211)
plt.plot(df_theory['photon'], df_theory['eta_fd'], '.-', label='theory')
for k in df_exp['category'].unique():
df_roi = df_exp[df_exp['category'] == k]
plt.plot(df_roi['photon'], df_roi['eta_fd'], 'o', label=k)
# plt.xlabel('photon (eV)')
plt.ylabel('eta_fd (rad)')
plt.xlim(14, 19.5)
plt.ylim(4, 5)
plt.grid(True)
def center(arr):
return (arr[1:] + arr[:-1]) / 2
def diff(arr):
return arr[1:] - arr[:-1]
plt.subplot(212)
plt.plot(center(df_theory['photon'].values),
diff(df_theory['eta_fd'].values) / diff(df_theory['photon'].values)
* 24.1888432651 / 0.0367493 / 2,
'.-', label='theory')
for k in df_exp['category'].unique():
df_roi = df_exp[df_exp['category'] == k]
plt.plot(center(df_roi['photon'].values),
diff(df_roi['eta_fd'].values) / diff(df_roi['photon'].values)
* 24.1888432651 / 0.0367493 / 2,
'o', label=k)
plt.xlabel('photon (eV)')
plt.ylabel('tau_21 (as)')
plt.xlim(14, 19.5)
plt.ylim(-80, -10)
plt.grid(True)
plt.tight_layout()
plt.legend()
plt.show()
|
from .WebHelper import WebHelper
from .models import Query, Counter
def create_counters():
queries = Query.objects.all()
for q in queries:
counter = Counter()
req = WebHelper(q.phrase, q.region)
counter.count = req.get_count()
counter.query = q
counter.save()
print('counters are saved')
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
import os.path as op
# ----------------------------------------------------------------------
# Script qui permet de tracer les figures des résultats d'un entraînement
# d'un modèle.
# ----------------------------------------------------------------------
#Parameters
model_name = 'RChimps3T60'
learning = 'training' #training or transfer_learning
folds = 5 #number of folds
cohort = 'Chimps3T' #full cohort
cohort_train = 'Chimps3T60'
cohort_eval = 'Chimps3T60TestSet'
hemi = 'R'
remove = True #remove or not ventricle and unknown
path = '/host/nfs/neurospin/dico/pauriau/data' #working path
figures = [1, 2, 3, 4, 5]
save = False
path_to_save_figures = ''
# --- Data Import --- #
cohort_name = 'cohort-' + cohort + '_hemi-' + hemi
cohort_train_name = 'cohort-' + cohort_train + '_hemi-' + hemi
cohort_eval_name = 'cohort-' + cohort_eval + '_hemi-' + hemi
#csv file
if folds is not None:
for i, cv in enumerate(range(folds)):
csv_file = op.join(path, learning, 'evaluations', model_name, model_name + '_cv' + str(cv), cohort_eval_name + '.csv')
eval = pd.read_csv(csv_file)
if i == 0:
big_eval = eval.copy()
else:
big_eval = pd.concat((big_eval, eval))
by_row_index = big_eval.groupby(big_eval.index)
evaluation = by_row_index.mean()
else:
csv_file = op.join(path, learning, 'evaluations', model_name, cohort_eval_name + '.csv')
evaluation = pd.read_csv(csv_file)
#data file
data_file = op.join(path, learning, 'data', cohort_train_name + '.json')
data = json.load(open(data_file, 'r'))
#result file
result_file = op.join(path, learning, 'results', model_name + '.json')
result = json.load(open(result_file, 'r'))
#cohort file
cohort_file = op.join(path, learning, 'data', cohort_name + '.json')
cohort_data = json.load(open(cohort_file, 'r'))
# --- Compute Results --- #
### DATA FILE ###
sulci_side_list = data['sulci_side_list']
### COHORT FILE ###
full_sulci_side_list = cohort_data['sulci_side_list']
full_dict_names = cohort_data['dict_names']
dico_cohort = {s: {} for s in full_sulci_side_list}
for sulci in full_sulci_side_list:
dico_cohort[sulci]['sizes'] = [n.count(sulci) for n in full_dict_names.values() if sulci in n]
dico_cohort[sulci]['occurrences'] = np.sum([1 for n in full_dict_names.values() if sulci in n])
sorted_sulci_side_list = sorted(sulci_side_list, key=lambda x: np.mean(dico_cohort[x]['sizes']), reverse=True)
if remove:
sorted_sulci_side_list.remove('unknown')
for s in sorted_sulci_side_list:
if s.startswith('ventricle'):
sorted_sulci_side_list.remove(s)
### CSV FILE ###
dico_eval = {s: {} for s in sulci_side_list}
for sulci in sulci_side_list:
dico_eval[sulci]['accuracy'] = evaluation['acc_' + sulci].mean()
dico_eval[sulci]['sensitivity'] = evaluation['sens_' + sulci].mean()
dico_eval[sulci]['specificity'] = evaluation['spec_' + sulci].mean()
dico_eval[sulci]['balanced_accuracy'] = evaluation['bacc_' + sulci].mean()
dico_eval[sulci]['esi'] = evaluation['ESI_' + sulci].mean()
dico_eval[sulci]['elocal'] = evaluation['Elocal_' + sulci]
dico_eval[sulci]['elocal_mean'] = evaluation['Elocal_' + sulci].mean()
dico_eval[sulci]['elocal_max'] = evaluation['Elocal_' + sulci].max()
dico_eval[sulci]['iou'] = (evaluation['TP_' + sulci] / (evaluation['TP_' + sulci] + evaluation['FN_' + sulci] + evaluation['FP_' + sulci])).mean()
### RESULT FILE ###
epoch_loss_train = result['epoch_loss_train']
epoch_loss_val = result['epoch_loss_val']
epoch_acc_train = result['epoch_acc_train']
epoch_acc_val = result['epoch_acc_val']
best_epoch = result['best_epoch']
best_acc = result['best_acc']
# ---- Print Results ------ #
print('Nombre de sillons dans la cohorte : ', len(full_sulci_side_list))
print("Nombre de sillons dans la cohorte d'entraînement : ", len(sulci_side_list))
print('Average ESI on : ')
print('\t-Train Set: ', 1 - np.mean([np.max(result['epoch_acc_train'][i]) for i in range(folds)]))
print('\t-Validation Set : ', 1 - np.mean(result['best_acc']))
print('\t-Test Set : ', np.mean(evaluation['ESI']))
print('Average best epoch : ', np.mean(result['best_epoch']))
# --- Figures --- #
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
plt.rc('legend', fontsize='x-small', title_fontsize='x-small')
plt.rc('figure', titlesize='small')
plt.rc('axes', titlesize='small')
colors = ['blue', 'orange', 'green', 'red', 'purple']
if hemi == 'R':
n = 6
elif hemi == 'L':
n = 5
#function to add label at the end to the bar
def addlabels(y, x, data):
for i in range(len(x)):
if not np.isnan(data[i]):
color = 'black'
if x[i] > np.max(x) - 0.1:
plt.text(x[i] + 0.01, y[i] - 0.3, round(data[i], 3), ha='left', fontsize='xx-small', color=color,
bbox=dict(boxstyle="square, pad=0.5", color='white'))
else:
plt.text(x[i] + 0.01, y[i] - 0.3, round(data[i], 3), ha='left', fontsize='xx-small', color=color)
if 1 in figures:
plt.figure(1, figsize=(4, 4))
plt.title('Training of Model:' + model_name + '\nOn Cohort:' + cohort_eval + ' | Hemi: ' + hemi)
for cv in range(len(epoch_loss_train)):
plt.plot(epoch_loss_train[cv], linestyle='--', alpha=0.7, color=colors[cv])
plt.plot(epoch_loss_val[cv], linestyle='-', alpha=0.7, color=colors[cv])
a, = plt.plot([0], [0], color='black', linestyle='--', label='train set')
b, = plt.plot([0], [0], color='black', linestyle='-', label='val set')
plt.legend(handles=[a, b])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.subplots_adjust(left=0.125, right=0.95, bottom=0.12, top=0.92)
if save:
plt.savefig(op.join(path_to_save_figures, model_name + '_loss.png'))
if 2 in figures:
plt.figure(2, figsize=(4, 4))
plt.title('Training of Model:' + model_name + '\nOn Cohort:' + cohort_eval + ' | Hemi: ' + hemi)
for cv in range(len(epoch_loss_train)):
plt.plot(epoch_acc_train[cv], linestyle='--', alpha=0.8, color=colors[cv])
plt.plot(epoch_acc_val[cv], linestyle='-', alpha=0.7, color=colors[cv])
a, = plt.plot([0], [0], color='black', linestyle='--', label='train set')
b, = plt.plot([0], [0], color='black', linestyle='-', label='val set')
plt.legend(handles=[a, b])
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.subplots_adjust(left=0.125, right=0.95, bottom=0.12, top=0.92)
if save:
plt.savefig(op.join(path_to_save_figures, model_name + '_acc.png'))
if 3 in figures:
plt.figure(3, figsize=(4, 6))
plt.title('Evaluation of Model: ' + model_name + '\nOn Cohort:' + cohort_eval + '/ Hemi: ' + hemi)
plt.barh(y=range(len(sorted_sulci_side_list)), width=[dico_eval[s]['elocal_mean'] for s in sorted_sulci_side_list], height=0.8, align='center')
xerr = [np.std(dico_eval[s]['elocal']) for s in sorted_sulci_side_list]
plt.yticks(range(len(sorted_sulci_side_list)), [s[:-n] for s in sorted_sulci_side_list], fontsize='x-small')
plt.xlabel('$E_{local}^{mean}$')
addlabels(range(len(sorted_sulci_side_list)), [dico_eval[s]['elocal_mean'] for s in sorted_sulci_side_list], xerr)
plt.subplots_adjust(left=0.25, right=0.96, bottom=0.07, top=0.92)
if save:
plt.savefig(op.join(path_to_save_figures, model_name + '_elocal_mean.png'))
if 4 in figures:
plt.figure(4, figsize=(4, 6))
plt.title('Evaluation of Model: ' + model_name + '\nOn Cohort:' + cohort_eval + '/ Hemi: ' + hemi)
plt.barh(y=range(len(sorted_sulci_side_list)), width=[dico_eval[s]['elocal_max'] for s in sorted_sulci_side_list], height=0.8, align='center')
plt.yticks(range(len(sorted_sulci_side_list)), [s[:-n] for s in sorted_sulci_side_list], fontsize='x-small')
plt.subplots_adjust(left=0.28, right=0.92, bottom=0.08, top=0.92)
plt.xlabel('$E_{local}^{max}$')
if save:
plt.savefig(op.join(path_to_save_figures, model_name + '_elocal_max.png'))
if 5 in figures:
plt.figure(5, figsize=(4, 6))
plt.title('Evaluation of Model: ' + model_name + '\nOn Cohort:' + cohort_eval + '/ Hemi: ' + hemi)
plt.boxplot([dico_eval[sulci]['elocal'] for sulci in sorted_sulci_side_list], vert=False,
showfliers=False, notch=False, showmeans=False, meanprops={}, medianprops={'color': 'orange', 'linewidth': 2},
boxprops={'color': 'blue'}, capprops={'color': 'blue'}, whiskerprops={'color': 'blue'},
positions=range(len(sorted_sulci_side_list)))
plt.yticks(range(len(sorted_sulci_side_list)), [s[:-n] for s in sorted_sulci_side_list], fontsize='x-small')
plt.xlabel('$E_{local}$')
plt.subplots_adjust(left=0.25, right=0.98, bottom=0.07, top=0.92)
if save:
plt.savefig(op.join(path_to_save_figures, model_name + '_elocal.png'))
plt.show()
|
from os import listdir, getcwd
from os.path import isfile, join
import json
def merge_file():
c_dir = getcwd() + f'/service_workers/data/'
onlyfiles = listdir(c_dir)
data = []
for filename in onlyfiles:
with open(getcwd() + f'/service_workers/data/{filename}', 'r') as fp:
data_collected = json.load(fp)
data += data_collected
sorted(data, key=id)
with open(getcwd() + f'/service_workers/result.json', 'a') as fp:
json.dump(data,fp)
merge_file()
with open(getcwd() + f'/service_workers/result.json', 'r') as fp:
data = json.load(fp)
print(len(data))
|
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from model import Todo
app = FastAPI()
from database import(
fetchOneTodo,
fetchAllTodos,
createTodo,
updateTodo,
removeTodo,
)
origins = ['http://localhost:3000', 'http://localhost:4000']
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials= True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
def read_root():
return {"Hello":"Kiki"}
@app.get("/api/todo")
async def get_todo():
response = await fetchAllTodos()
return response
@app.get("/api/todo/{title}", response_model=Todo)
async def get_todo_by_id(title):
response = await fetchOneTodo(title)
if response:
return response
raise HTTPException(404,f"No Todo Item by that {title}")
# Create
@app.post("/api/todo/", response_model=Todo)
async def post_todo(todo:Todo):
response = await createTodo(todo.dict())
if response:
return response
raise HTTPException(400,"Something went wrong. Bad Request")
# Update
@app.put("/api/todo/{title}/", response_model=Todo)
async def put_todo(title:str, desc:str):
response = await updateTodo(title, desc)
if response:
return response
raise HTTPException(404, f"No Todo Item by that {title}")
# Delete
@app.delete("/api/todo/{title}")
async def delete_todo(title):
response = await removeTodo(title)
if response:
return "Deleted todo item"
raise HTTPException(404, f"There is no TODO item with title {title}")
'''
Can run the server with command below
uvicorn main:app --reload --port 4000
'''
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12, GPIO.OUT)
p = GPIO.PWM(12, 50)
p.start(7.5)
class DistanceSensor:
class servoMotion:
"""docstring for servoMotion."""
def __init__(self,angle,distance):
self.angle=0
self.distance=0
theta=0
def wallFinder():
servoMotion.goFront()
def goFront(degree):
if front!=minDist:
self.distance=degree*radius
def turnRight():
leftServo.write(-270)
rightServo.write(270)
self.theta=theta-90
def turnLeft():
leftServo.write(270)
rightServo.write(-270)
self.theta=theta+90
def goBack():
leftServo.write(360)
rightServo.write(360)
sleep(3000)
def keepRight():
if !goFront():
servoMotionturnRight()
def keepLeft():
if !keepRight():
servoMotionturnLeft()
class points():
"""docstring for points."""
def __init__(self, x,y):
self.x=0
self.y=0
self.angle=servoMotion.angle
def markCorner():
if angle==0 :
x=x+servoMotion.goFront
elif angle==90
y=y+servoMotion.goFront
elif angle=-90:
y=y-servoMotion.goFront
elif angle=180 or angle=-180:
x=x-servoMotion.goFront
elif angle=270:
y=y-servoMotion.goFront
elif angle=-270:
y=y+servoMotion.goFront
elif angle=360 or angle=-360
x=x+servoMotion.goFront
def cornerFind():
wallFinder()
if servoMotion.turnRight or servoMotion.turnLeft:
points.markCorner()
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12, GPIO.OUT)
p = GPIO.PWM(12, 50)
p.start(7.5)
class servo:
"""docstring for servo."""
minDist=50 """mm"""
radius=37 """mm"""
x=0
y=0
angle=0
distance=0
def __init__(self, degree):
self.currentPos=0
def rotateServo(self,degree):
self.currentPos=degree
print("curent servo position"+self.currentPos)
def wallFinder():
servo.goFront()
def goFront(degree):
if front!=minDist:
distance=degree*radius
def turnRight():
leftServo.write(-270)
rightServo.write(270)
def turnLeft():
leftServo.write(270)
rightServo.write(-270)
def goBack():
leftServo.write(360)
rightServo.write(360)
sleep(3000)
def keepRight():
if !goFront():
turnRight()
def keepLeft():
if !keepRight():
turnLeft()
def cornerFind():
wallFinder()
if condition:
pass
|
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
import pandas as pd
import sklearn as sk
from sklearn import decomposition
from ICA_noise import FastICA
mydata = pd.read_csv("/Users/tangyehui/UG_Project/dataset/personality/UKDA-7656-tab/tab/bbc_individual_level_data_file.tab", sep ="\t")
Y = mydata.as_matrix()
X = Y[:, 34:78]
fa = sk.decomposition.FactorAnalysis(n_components=5)
fa.fit(X)
print(np.shape(X))
La = np.abs(fa.components_)
labs = [
"Is talkative", "Tends to find fault with others",
"Does a thorough job",
"Is depressed, blue",
"Is original, comes up with new ideas",
"Is reserved",
"Is helpful and unselfish with others",
"Can be somewhat careless",
"Is relaxed, handles stress well",
"Is curious about many different things",
"Is full of energy",
"Starts quarrels with others",
"Is a reliable worker",
"Can be tense",
"Is ingenious, a deep thinker",
"Generates a lot of enthusiasm",
"Has a forgiving nature",
"Tends to be disorganized",
"Worries a lot",
"Has an active imagination",
"Tends to be quiet",
"Is generally trusting",
"Tends to be lazy",
"Is emotionally stable, not easily upset",
"Is inventive",
"Has an assertive personality",
"Can be cold and aloof",
"Perseveres until the task is finished",
"Can be moody",
"Values artistic, aesthetic experiences",
"Is sometimes shy, inhibited",
"Is considerate and kind to almost everyone",
"Does things efficiently",
"Remains calm in tense situations",
"Prefers work that is routine",
"Is outgoing, sociable",
"Is sometimes rude to others",
"Makes plans and follows through with them",
"Gets nervous easily",
"Likes to reflect, play with ideas",
"Has few artistic interests",
"Likes to cooperate with others",
"Is easily distracted",
"Is sophisticated in art, music, or literature"
]
print(len(labs))
# print(np.transpose([labs, fa.noise_variance_, np.sum(La**2,0)]))
# ica = sk.decomposition.FastICA(n_components=5)
# ica.fit(X)
# om = ica.components_
#
ica2 = FastICA(n_components=5)
ica2.fit(X)
om2 = ica2.components_
# plt.figure(figsize=(2.5,2.25))
# ii = np.array([1,6,11,16,21,26,31,36,41])
#
# plt.subplot(1,2,1)
# plt.pcolor(La[:,ii].T,cmap='bwr')
# li = [labs[i] for i in ii]
# plt.yticks(0.5+np.arange(0,len(li)), li, size='small')
# plt.xticks(0.5+np.arange(0,5),np.arange(1,6))
# plt.xlabel('Factor')
#
# plt.subplot(1, 2, 2)
# plt.pcolor(om2[:, ii].T, cmap='bwr')
# plt.yticks(0.5+np.arange(0, len(li)), [], size='small')
# plt.xticks(0.5+np.arange(0, 5), np.arange(1, 6))
# plt.xlabel('IC')
# plt.savefig('/Users/tangyehui/UG_Project/figures/factors_agree.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1)
plt.figure(figsize=(2.5,2.25))
ii = np.array([3,8,13,18,23,28,33,38])
plt.subplot(1,2,1)
plt.pcolor(La[:,ii].T,cmap='bwr')
li = [labs[i] for i in ii]
plt.yticks(0.5+np.arange(0,len(li)), li, size='small')
plt.xticks(0.5+np.arange(0,5),np.arange(1,6))
plt.xlabel('Factor')
plt.subplot(1, 2, 2)
plt.pcolor(om2[:, ii].T, cmap='bwr')
plt.yticks(0.5+np.arange(0, len(li)), [], size='small')
plt.xticks(0.5+np.arange(0, 5), np.arange(1, 6))
plt.xlabel('IC')
plt.savefig('/Users/tangyehui/UG_Project/figures/factors_neuro1.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1)
# plt.figure(figsize=(2.5,2.25))
# ii = np.array([0,5,10,15,20,25,30,35])
# plt.subplot(1,2,1)
# plt.pcolor(La[:,ii].T,cmap='bwr')
# li = [labs[i] for i in ii]
# plt.yticks(0.5+np.arange(0,len(li)), li, size='small')
# plt.xticks(0.5+np.arange(0,5),np.arange(1,6))
# plt.xlabel('Factor')
#
# plt.subplot(1, 2, 2)
# plt.pcolor(om2[:, ii].T, cmap='bwr')
# plt.yticks(0.5+np.arange(0, len(li)), [], size='small')
# plt.xticks(0.5+np.arange(0, 5), np.arange(1, 6))
# plt.xlabel('IC')
# plt.savefig('/Users/tangyehui/UG_Project/figures/factors_extra.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1)
# plt.figure(figsize=(2.5,2.25))
# ii = np.array([4,9,14,19,24,29,34,39,43])
#
# s = 0
# for i in ii:
# s += fa.noise_variance_[i]
# print(s/9)
# plt.subplot(1,2,1)
# plt.pcolor(La[:,ii].T,cmap='bwr')
# li = [labs[i] for i in ii]
# plt.yticks(0.5+np.arange(0,len(li)), li, size='small')
# plt.xticks(0.5+np.arange(0,5),np.arange(1,6))
# plt.xlabel('Factor')
#
# plt.subplot(1, 2, 2)
# plt.pcolor(om2[:, ii].T, cmap='bwr')
# plt.yticks(0.5+np.arange(0, len(li)), [], size='small')
# plt.xticks(0.5+np.arange(0, 5), np.arange(1, 6))
# plt.xlabel('IC')
# plt.savefig('/Users/tangyehui/UG_Project/figures/factors_open.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1)
# plt.figure(figsize=(2.5,2.25))
# ii = np.array([2,7,12,17,22,27,32,37,42])
#
# s = 0
# for i in ii:
# s += fa.noise_variance_[i]
# print(s/9)
# plt.subplot(1,2,1)
# plt.pcolor(La[:,ii].T,cmap='bwr')
# li = [labs[i] for i in ii]
# plt.yticks(0.5+np.arange(0,len(li)), li, size='small')
# plt.xticks(0.5+np.arange(0,5),np.arange(1,6))
# plt.xlabel('Factor')
#
# plt.subplot(1, 2, 2)
# plt.pcolor(om2[:, ii].T, cmap='bwr')
# plt.yticks(0.5+np.arange(0, len(li)), [], size='small')
# plt.xticks(0.5+np.arange(0, 5), np.arange(1, 6))
# plt.xlabel('IC')
# plt.savefig('/Users/tangyehui/UG_Project/figures/factors_conc1.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1)
|
from django.apps import AppConfig
class ArwebConfig(AppConfig):
name = 'Arweb'
|
from tortoise import Model, fields
class TimestampMixin:
created_at = fields.DatetimeField(auto_now_add=True)
updated_at = fields.DatetimeField(auto_now=True)
class AbstractBaseModel(Model):
id = fields.IntField(pk=True)
class Meta:
abstract = True
class User(AbstractBaseModel, TimestampMixin):
telegram_id = fields.IntField(unique=True)
date_created = fields.DatetimeField(auto_now_add=True)
class EnglishWord(AbstractBaseModel, TimestampMixin):
"""Words are english"""
word = fields.CharField(max_length=120, unique=True)
class UserWord(AbstractBaseModel, TimestampMixin):
"""Words of user"""
user = fields.ForeignKeyField('models.User', on_delete=fields.CASCADE, related_name='users')
pair_words = fields.ForeignKeyField("models.Translation", on_delete=fields.CASCADE)
class Translation(AbstractBaseModel, TimestampMixin):
"""Translation pair"""
english_word = fields.ForeignKeyField("models.EnglishWord", on_delete=fields.CASCADE)
russian_word = fields.ForeignKeyField("models.TranslationEnglishWord", on_delete=fields.CASCADE)
class TranslationEnglishWord(AbstractBaseModel, TimestampMixin):
"""Translations for english words"""
id = fields.IntField(pk=True)
russian_translation = fields.CharField(max_length=120, unique=True)
__all__ = ['User', 'EnglishWord', 'UserWord', 'TranslationEnglishWord']
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-08 10:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annonces', '0002_auto_20160308_1056'),
]
operations = [
migrations.AlterField(
model_name='annonce',
name='date_debut',
field=models.DateTimeField(verbose_name='Date de début'),
),
migrations.AlterField(
model_name='annonce',
name='date_fin',
field=models.DateTimeField(verbose_name='Date de fin'),
),
]
|
from googletrans import Translator
import eel
def translate(text,dest,src):
tr = Translator()
translated = tr.translate(text,dest=dest,src=src)
eel.view_log_js(translated.text)
|
import copy
import os
from operator import itemgetter
#pos1 = copy.deepcopy(pos) # copier une liste d'objets
def creer_sudoku() :
sudoku = [[1,2,3,4,5,6,7,8,9],[1,2,3,4,5,6,7,8,9],[1,2,3,4,5,6,7,8,9],[1,2,3,4,5,6,7,8,9],[1,2,3,4,5,6,7,8,9],[1,2,3,4,5,6,7,8,9],[1,2,3,4,5,6,7,8,9],[1,2,3,4,5,6,7,8,9],[1,2,3,4,5,6,7,8,9]]
for x in range(0,8+1) :
for y in range(0,8+1) :
sudoku[x][y] = [1,2,3,4,5,6,7,8,9]
return sudoku
def print_sudoku(sudoku) :
for y in range(0,8+1) :
print(" ",format_case(sudoku[0][y]),format_case(sudoku[1][y]),format_case(sudoku[2][y])," ",format_case(sudoku[3][y]),format_case(sudoku[4][y]),format_case(sudoku[5][y])," ",format_case(sudoku[6][y]),format_case(sudoku[7][y]),format_case(sudoku[8][y]))
if y == 2 or y == 5 or y == 8 :
print(" ")
print("===========================================")
print(" ")
def format_case(table) :
if len(table) > 1 :
return "[ ]"
elif len(table) == 1 :
return table
def sudoku_valide(sudoku) :
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
if sudoku[x1][y1] == [] :
return False
return True
def ligne(sudoku) :
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
for x in range(0,8+1) :
if x != x1 and len(sudoku[x][y1]) == 1 and sudoku[x1][y1].count(sudoku[x][y1][0]) == 1 :
sudoku[x1][y1].remove(sudoku[x][y1][0])
for y in range(0,8+1) :
if y != y1 and len(sudoku[x1][y]) == 1 and sudoku[x1][y1].count(sudoku[x1][y][0]) == 1 :
sudoku[x1][y1].remove(sudoku[x1][y][0])
def range_region(a) :
if a >= 0 and a <= 2 :
return range(0,2+1)
elif a >= 3 and a <= 5 :
return range(3,5+1)
elif a >= 6 and a <= 8 :
return range(6,8+1)
def region(sudoku) :
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
for x in range_region(x1) :
for y in range_region(y1) :
if ( x != x1 or y != y1 ) and len(sudoku[x][y]) == 1 and sudoku[x1][y1].count(sudoku[x][y][0]) == 1 :
sudoku[x1][y1].remove(sudoku[x][y][0])
def ligne_exclusif(sudoku) :
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
if len(sudoku[x1][y1]) > 1 :
table = []
for k in range(0,len(sudoku[x1][y1])) :
a = 0
for x in range(0,8+1) :
if x != x1 and sudoku[x][y1].count(sudoku[x1][y1][k]) == 1 :
a = 1
break
if a == 0 :
table.insert( len(table), sudoku[x1][y1][k])
if len(table) == 1 :
sudoku[x1][y1] = [ table[0] ]
table = []
for k in range(0,len(sudoku[x1][y1])) :
a = 0
for y in range(0,8+1) :
if y != y1 and sudoku[x1][y].count(sudoku[x1][y1][k]) == 1 :
a = 1
break
if a == 0 :
table.insert( len(table), sudoku[x1][y1][k])
if len(table) == 1 :
sudoku[x1][y1] = [ table[0] ]
def region_exclusif(sudoku) :
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
table = []
if len(sudoku[x1][y1]) > 1 :
for k in range(0,len(sudoku[x1][y1])) :
a = 0
for x in range_region(x1) :
for y in range_region(y1) :
if ( x != x1 or y != y1 ) and sudoku[x][y].count(sudoku[x1][y1][k]) == 1 :
a = 1
break
if a == 0 :
table.insert(len(table), sudoku[x1][y1][k])
if len(table) == 1 :
sudoku[x1][y1] = [ table[0] ]
def ligne_paire_exclusif(sudoku) :
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
if len(sudoku[x1][y1]) == 2 :
pair = False
for x in range(0,8+1) :
if x != x1 and len(sudoku[x][y1]) == 2 and sudoku[x][y1].count(sudoku[x1][y1][0]) == 1 and sudoku[x][y1].count(sudoku[x1][y1][1]) == 1 :
pair = True
pair2x = x
break
if pair == True :
for x in range(0,8+1) :
if x != x1 and x != pair2x and len(sudoku[x][y1]) > 1 :
if sudoku[x][y1].count(sudoku[x1][y1][0]) == 1 and len(sudoku[x][y1]) > 1 :
sudoku[x][y1].remove(sudoku[x1][y1][0])
if sudoku[x][y1].count(sudoku[x1][y1][1]) == 1 and len(sudoku[x][y1]) > 1 :
sudoku[x][y1].remove(sudoku[x1][y1][1])
pair = False
for y in range(0,8+1) :
if y != y1 and len(sudoku[x1][y]) == 2 and sudoku[x1][y].count(sudoku[x1][y1][0]) == 1 and sudoku[x1][y].count(sudoku[x1][y1][1]) == 1 :
pair = True
pair2y = y
break
if pair == True :
for y in range(0,8+1) :
if y != y1 and y != pair2y and len(sudoku[x1][y]) > 1 :
if sudoku[x1][y].count(sudoku[x1][y1][0]) == 1 and len(sudoku[x1][y]) > 1 :
sudoku[x1][y].remove(sudoku[x1][y1][0])
if sudoku[x1][y].count(sudoku[x1][y1][1]) == 1 and len(sudoku[x1][y]) > 1 :
sudoku[x1][y].remove(sudoku[x1][y1][1])
def region_paire_exclusif(sudoku) :
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
#table = []
#pair = False
if len(sudoku[x1][y1]) == 2 :
pair = False
for x in range_region(x1) :
for y in range_region(y1) :
if ( x != x1 or y != y1 ) and len(sudoku[x][y]) == 2 and sudoku[x][y].count(sudoku[x1][y1][0]) == 1 and sudoku[x][y].count(sudoku[x1][y1][1]) == 1 :
pair = True
pair2x = x
pair2y = y
break
if pair == True :
for x in range_region(x1) :
for y in range_region(y1) :
if ( x != x1 or y != y1 ) and ( x != pair2x or y != pair2y ) and len(sudoku[x][y]) > 1 :
if sudoku[x][y].count(sudoku[x1][y1][0]) == 1 and len(sudoku[x][y]) > 1 :
sudoku[x][y].remove(sudoku[x1][y1][0])
if sudoku[x][y].count(sudoku[x1][y1][1]) == 1 and len(sudoku[x][y]) > 1 :
sudoku[x][y].remove(sudoku[x1][y1][1])
#table_choix = [ [ x1,y1,len(table1),table1=[] ] , [ x2,y2,len(table2),table2=[] ] , [ x3,y3,len(table3),table3=[] ] ]
#table_choix[0] -> [ x1,y1,table1=[] ]
#table_choix[0][0] -> x1
#table_choix[0][1] -> y1
#table_choix[0][2] -> len(table1) taille table1
#table_choix[0][3] -> table1 = [a1,a2,a3,a4]
#table_choix[0][3][0] -> a1
#list.append(obj) -> ajoute l'object obj to list
def creer_table_choix(sudoku):
table_choix = []
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
if len(sudoku[x1][y1]) > 1 :
table_choix.append([x1,y1,len(sudoku[x1][y1]),sudoku[x1][y1]])
table_choix = sorted(table_choix, key=itemgetter(2))
return table_choix
def choix_multiples(sudoku) :
#print("diabolico")
#diabolico_sudoku = copy.deepcopy(sudoku)
table_choix = creer_table_choix(sudoku)
fin = False
n = 0
while fin == False and n < len(table_choix) :
xdiabolico = table_choix[n][0]
ydiabolico = table_choix[n][1]
v = 0
while fin == False and v < len(table_choix[n][3]) :
diabolico_sudoku = copy.deepcopy(sudoku)
value_diabolico = table_choix[n][3][v]
diabolico_sudoku[xdiabolico][ydiabolico] = [ value_diabolico ]
fin = resoudre(diabolico_sudoku)
v = v+1
n = n+1
return fin
def sudoku_finis(sudoku) :
for x1 in range(0,8+1) :
for y1 in range(0,8+1) :
if len(sudoku[x1][y1]) != 1 :
return False
return True
def resoudre(sudo) :
sudoku = copy.deepcopy(sudo) # Pour ne pas modifier le sudoku non rempli
## os.system('cls')
## print_sudoku(sudoku)
## os.system('pause')
old_sudoku = []
while sudoku_finis(sudoku) == False and sudoku_valide(sudoku) and old_sudoku != sudoku :
old_sudoku = copy.deepcopy(sudoku)
ligne(sudoku)
region(sudoku)
ligne_exclusif(sudoku)
region_exclusif(sudoku)
ligne_paire_exclusif(sudoku)
region_paire_exclusif(sudoku)
#os.system('cls')
#print_sudoku(sudoku)
#os.system('pause')
if sudoku_finis(sudoku) :
fin = sudoku
elif sudoku_valide(sudoku) == False :
fin = False
elif old_sudoku == sudoku :
fin = choix_multiples(sudoku)
return fin
def string_remplir(sudoku,string,y) :
for x in range(0,len(string)) :
if string[x] != "0" :
sudoku[x][y] = [ int(string[x]) ]
pos = creer_sudoku()
##string_remplir(pos,"070000000",0)
##string_remplir(pos,"900400000",1)
##string_remplir(pos,"000000506",2)
##string_remplir(pos,"800904000",3)
##string_remplir(pos,"060030280",4)
##string_remplir(pos,"510000430",5)
##string_remplir(pos,"100080000",6)
##string_remplir(pos,"002003910",7)
##string_remplir(pos,"040060800",8)
##string_remplir(pos,"021080350",0)
##string_remplir(pos,"060910000",1)
##string_remplir(pos,"000020610",2)
##string_remplir(pos,"008279160",3)
##string_remplir(pos,"600531002",4)
##string_remplir(pos,"210468905",5)
##string_remplir(pos,"006040500",6)
##string_remplir(pos,"745090000",7)
##string_remplir(pos,"182050496",8)
##string_remplir(pos,"050400108",0)
##string_remplir(pos,"000035000",1)
##string_remplir(pos,"009080500",2)
##string_remplir(pos,"296310005",3)
##string_remplir(pos,"003650010",4)
##string_remplir(pos,"175800300",5)
##string_remplir(pos,"900203051",6)
##string_remplir(pos,"000501903",7)
##string_remplir(pos,"531978200",8)
##string_remplir(pos,"000020000",0) ## difficile
##string_remplir(pos,"750000000",1)
##string_remplir(pos,"038605000",2)
##string_remplir(pos,"003956017",3)
##string_remplir(pos,"000004020",4)
##string_remplir(pos,"090001400",5)
##string_remplir(pos,"000500260",6)
##string_remplir(pos,"300100054",7)
##string_remplir(pos,"800070100",8)
##string_remplir(pos,"048090370",0) ## diabolique facile
##string_remplir(pos,"009000500",1)
##string_remplir(pos,"000863000",2)
##string_remplir(pos,"084000210",3)
##string_remplir(pos,"030271040",4)
##string_remplir(pos,"000000000",5)
##string_remplir(pos,"800406005",6)
##string_remplir(pos,"001000900",7)
##string_remplir(pos,"007000400",8)
string_remplir(pos,"100007090",0) ## diabolique "AI Escargot"
string_remplir(pos,"030020008",1) ## le plus difficile des sudoku
string_remplir(pos,"009600500",2)
string_remplir(pos,"005300900",3)
string_remplir(pos,"010080002",4)
string_remplir(pos,"600004000",5)
string_remplir(pos,"300000010",6)
string_remplir(pos,"040000007",7)
string_remplir(pos,"007000300",8)
sudoku_resolu = resoudre(pos)
print_sudoku(pos) ## sudoku de départ
print_sudoku(sudoku_resolu)
#os.system('pause')
|
from Utils.parse import tfrecord_reader_dataset
import os
import tensorflow as tf
import matplotlib.pyplot as plt
if __name__ == "__main__":
epoch = 1
# 1.获取所有TFRecord文件的列表
dir = "D:\\data\\COVID19\\TFRecord"
files_name = os.listdir(dir)
train_files_name = [os.path.join(dir, name) for name in files_name if name.startswith("train")]
test_files_name = [os.path.join(dir, name) for name in files_name if name.startswith("test")]
train_dataset = tfrecord_reader_dataset(train_files_name, batch_size=32)
test_dataset = tfrecord_reader_dataset(test_files_name, batch_size=32).repeat(epoch)
# 如果是标准tf,那么使用方式是创建一个dataset迭代器,并且在sess中每次run迭代器即可
iterator = test_dataset.make_one_shot_iterator()
# 如果是keras,那么直接在fit中直接fit(train_dataset, test_dataset)即可
# keras 的可选方式:是通过传入dataset的迭代器
# model.fit(dataset.make_one_shot_iterator(), epochs=10, steps_per_epoch=10)
next_element = iterator.get_next()
with tf.Session() as sess:
while True:
try:
features = sess.run(next_element)
imgs = features["image"]
masks = features["mask"]
plt.imshow(imgs[0], cmap="gray")
plt.show()
plt.imshow(masks[0], cmap="gray")
plt.show()
except tf.errors.OutOfRangeError:
break
|
"""盤面の列挙
盤面を列挙し、gmlファイルに出力する。
"""
import gobbletgobblers as game
import player_ai as ai # ゲームAI.ミニマックスによる行動.ランダムな行動.
import networkx as nx
def main():
print()
print("盤面の列挙を行います。")
print("出力するファイル名を打ち込んで下さい(.gml含む):", end="")
filename = input()
if not filename:
print("filenameが正常に入力されませんでした")
# 3目並べの状態を保持するクラス"State"を初期化する。
state = game.State()
# グラフの初期化
G = nx.DiGraph()
# コマの位置をbinaryでノードに加える
G.add_node(state.get_pieces_for_binary())
# ゲーム終了までループ。(Stateクラスのis_doneで確認)
while not state.is_done():
# 行動前の状態のbinary
# binary_state = state.get_pieces_for_binary()
#
# # 行動の取得
# action_modes = ("MiniMax", "Random")
# action_mode = action_modes[0] if state.is_first_player() else action_modes[1]
# action = ai.action(state, action_mode)
#
# # 行動を状態に反映させた次の状態に更新する。
# state = state.next(action)
print(state)
print()
# ノードの追加
G.add_node(state.get_pieces_for_binary())
# 枝の追加
G.add_edge(binary_state, state.get_pieces_for_binary())
# ネットワークの出力
nx.readwrite.gml.write_gml(G, "output/" + filename)
if __name__ == "__main__":
main()
|
import behaviors
import unittest
class TestTargetFollowingBehavior(unittest.TestCase):
# * {-45}-------{ 0 }-------{45}
# * | +-----[ +y]-----+ |
# * | | | |
# * | | | |
# * {-90} [-x] [0,0] [+x] {90}
# * | | | |
# * | | | |
# * | +-----[ -y]-----+ |
# * {-135}-------{180}-------{135}
def setUp(self):
self.small_angle = 5.0
self.large_angle = 50.0
def test_calculate_motor_speeds_no_correction_facing_north(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, 1000.0],
'compass_heading': 0.0}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[0], 0.0, "speeds[0]")
self.assertEqual(round(speeds[0], 5), round(speeds[1], 5), "speeds[0] == speeds[1]")
def test_calculate_motor_speeds_should_slightly_correct_to_the_right_facing_north(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, 1000.0],
'compass_heading': -self.small_angle}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[1], 0.0, "speeds[1]")
self.assertGreater(speeds[0], speeds[1], "speeds[0] > speeds[1]")
def test_calculate_motor_speeds_should_slightly_correct_to_the_left_facing_north(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, 1000.0],
'compass_heading': self.small_angle}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[0], 0.0, "speeds[1]")
self.assertGreater(speeds[1], speeds[0], "speeds[0] < speeds[1]")
def test_calculate_motor_speeds_should_strongly_correct_to_the_right_facing_north(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, 1000.0],
'compass_heading': -self.large_angle}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[0], 0.0, "speeds[0]")
self.assertEqual(round(speeds[0], 5), round(-speeds[1], 5), "speeds[0] == -speeds[1]")
def test_calculate_motor_speeds_should_strongly_correct_to_the_right_facing_left(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, 1000.0],
'compass_heading': self.large_angle}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[1], 0.0, "speeds[1]")
self.assertEqual(round(speeds[0], 5), round(-speeds[1], 5), "speeds[0] == -speeds[1]")
def test_calculate_motor_speeds_no_correction_facing_south(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, -1000.0],
'compass_heading': 180.0}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[0], 0.0, "speeds[0]")
self.assertEqual(round(speeds[0], 5), round(speeds[1], 5), "speeds[0] == speeds[1]")
def test_calculate_motor_speeds_should_slightly_correct_to_the_right_facing_south(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, -1000.0],
'compass_heading': 180.0 - self.small_angle}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[1], 0.0, "speeds[1]")
self.assertGreater(speeds[0], speeds[1], "speeds[0] > speeds[1]")
def test_calculate_motor_speeds_should_slightly_correct_to_the_left_facing_south(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, -1000.0],
'compass_heading': 180.0 + self.small_angle}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[0], 0.0, "speeds[1]")
self.assertGreater(speeds[1], speeds[0], "speeds[0] < speeds[1]")
def test_calculate_motor_speeds_no_correction_facing_east(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [-1000.0, 0.0],
'compass_heading': 90.0}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[0], 0.0, "speeds[0]")
self.assertEqual(round(speeds[0], 5), round(speeds[1], 5), "speeds[0] == speeds[1]")
def test_calculate_motor_speeds_should_slightly_correct_to_the_right_facing_east(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [-1000.0, 0.0],
'compass_heading': 90.0 - self.small_angle}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[1], 0.0, "speeds[1]")
self.assertGreater(speeds[0], speeds[1], "speeds[0] > speeds[1]")
def test_calculate_motor_speeds_should_slightly_correct_to_the_left_facing_east(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [-1000.0, 0.0],
'compass_heading': 90.0 + self.small_angle}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertGreater(speeds[0], 0.0, "speeds[1]")
self.assertGreater(speeds[1], speeds[0], "speeds[0] < speeds[1]")
def test_calculate_motor_speeds_should_return_zero_speeds_and_done_should_return_true_when_target_reached_exactly(self):
behavior = behaviors.TargetFollowingBehavior()
behavior.repeat = False
behavior.set_target_coordinates_list([[0.0, 0.0]])
sensorData = {
'coordinates': [0.0, 0.0],
'compass_heading': 0.0}
speeds = behavior.calculate_motor_speeds(sensorData)
self.assertEquals(speeds[0], 0.0, "speeds[0]")
self.assertEquals(speeds[1], 0.0, "speeds[1]")
self.assertEquals(behavior.done(), True, "done")
if __name__ == '__main__':
unittest.main()
|
from django.urls import path
from . import views
app_name = 'chains'
urlpatterns = [
path('', views.index, name='index'),
path('view_chain/<int:story_id>/', views.view_chain, name='view_chain'),
path('get_details/<int:story_id>/', views.get_details, name='get_details')
# path('<int:question_id>/', views.detail, name='detail'),
# path('<int:question_id>/results/', views.results, name='results'),
# path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
__author__ = "Panagiotis Garefalakis"
__copyright__ = "Imperial College London"
# The MIT License (MIT)
#
# Copyright (c) 2016 Panagiotis Garefalakis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import numpy as np
import pandas as pd
import plots.utils as utils
import brewer2mpl
# brewer2mpl.get_map args: set name set type number of colors
# bmap = brewer2mpl.get_map('RdBu', 'Diverging', 5)
bmap = brewer2mpl.get_map('Set1', 'Qualitative', 5)
colors = bmap.mpl_colors
files = ["CPLEX-off_stats.csv", "CPLEX-on_stats.csv", "GR-NODE_CAND_stats.csv", "GR-SERIAL_stats.csv", "GR-RANDOM_stats.csv"]
labels = ["ILP-offline", "ILP-online", "Node Candidates", "Random"]
labels_map={"CPLEX-on": "ILP-online", "CPLEX-off": "ILP-offline",
"GR-NODE_CAND": "Node Candidates", "GR-RANDOM": "Greedy", "GR-SERIAL": "Aurora-Prelim"}
# colors = ['r', 'g', 'b', 'black', 'c', 'm']
markers = ['o', '^', 'v', 'h', 'x']
linestyle_list = ['--', '-', ':', '-', '-.']
# Global style configuration
utils.set_rcs()
def latency_logscale(data):
fig = utils.plt.figure()
ax = fig.add_subplot(111)
space = 0.25
conditions = np.unique(data[:, 0])
categories = np.unique(data[:, 1])
# n = len(conditions)
n = len(labels_map)
width = (1 - space) / n
print "width:", width
i = 0
for cond in conditions:
print "cond:", cond
y_vals = data[data[:, 0] == cond][:, 2].astype(np.float)
x_vals = data[data[:, 0] == cond][:, 1].astype(np.int)
pos = [j - (1 - space) / 2. + i * width for j in range(1, len(categories) + 1)]
if labels_map.has_key(str(cond).strip()):
ax.plot(x_vals, y_vals, label=labels_map[str(cond).strip()], color=colors[i], linestyle=linestyle_list[i],
marker=markers[i], linewidth=1.5,)
# , edgecolor=get_colors()[i+1],hatch=hatch_patterns[i])
i +=1
indexes = range(1, len(categories) + 1)
print "Indexes: ", indexes
print "Categories: ", categories
# Add the axis labels
ax.set_ylabel("Latency (ms)")
ax.set_xlabel("Number of Nodes")
# Make Y axis logscale
utils.plt.yscale('log', nonposy='clip')
# Add a legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
utils.plt.tight_layout()
# Create some space for the last marker
utils.plt.xlim((0, x_vals[len(x_vals)-1]+10))
return fig, ax
def file_parser(fnames):
file_data = (pd.read_csv(f) for f in fnames)
all_data = pd.concat(file_data, ignore_index=True)
# grouped_data = all_data.groupby([' Plan technique', ' totJobs'])[' ObjectiveValue '].mean()
print all_data.columns.values
# print grouped_data
numpyMatrix = all_data[[' Plan technique', ' clusterSize', ' runTime(ms)']].values
# print numpyMatrix
return numpyMatrix
if __name__ == '__main__':
print "Sytem Path {}".format(os.environ['PATH'])
if len(sys.argv) < 2:
print "Usage: bars_efficiency.py.py <input PATH>"
sys.exit(-1)
outname = "placement_latency_log"
fpaths = []
for file in files:
fpaths.append(sys.argv[1]+"/"+file)
# labels.append(sys.argv[2 + i])
print 'Files given: {}'.format(" | ".join(fname for fname in fpaths))
# print 'Labels given: {}'.format(" | ".join(label for label in labels))
# print brewer2mpl.print_maps()
data = file_parser(fpaths)
fig, axes = latency_logscale(data)
utils.set_rcs()
utils.prepare_legend(legend_loc="upper left", legend_font=15)
utils.writeout("%s"%outname)
|
# Generated by Django 2.2 on 2021-06-07 01:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store_app', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='amount',
),
migrations.AddField(
model_name='user',
name='total',
field=models.IntegerField(default=0, null=True),
),
]
|
# Work with Python 3.6
import os
import asyncio
import discord
from itertools import cycle
from discord.ext.commands import Bot
BOT_PREFIX = "."
client = Bot(command_prefix=BOT_PREFIX)
extensions = ['Events', 'Fun', 'Roller']
status = [
'against the Fallen.',
'against the Vex.',
'against the Taken.',
'against the Scourge.',
'against the Cabal.'
]
@client.event
# Whenever a message is sent.
async def on_message(message):
"""
The callback invoked when the bot receives a message.
The only difference from the default is that when an error occurs,
the message and error are printed here.
:param message: The message that was received.
"""
try:
await client.process_commands(message)
except Exception as error:
print(message, error)
@client.event
async def on_ready():
print('Bot Online.')
@client.event
# Rotates through status' every few minutes.
async def change_status():
await client.wait_until_ready()
msgs = cycle(status)
while not client.is_closed:
current_status = next(msgs)
await client.change_presence(game=discord.Game(name=current_status))
await asyncio.sleep(300)
@client.event
# Lists the currently connected servers' in console
async def list_servers():
await client.wait_until_ready()
while not client.is_closed:
print("Current servers:")
for server in client.servers:
print(server.name)
await asyncio.sleep(600)
if __name__ == '__main__':
for extension in extensions:
try:
client.load_extension(extension)
except Exception as err:
print('{} cannot be loaded. [{}]'.format(extension, err))
client.loop.create_task(list_servers())
client.loop.create_task(change_status())
client.run(os.getenv('TOKEN'))
|
#ファイルを利用する
file = open('Company.txt', 'r', encoding='utf-8')
print(file.read())
file.close()
#自動で閉じるためにwithを利用します。
with open('Company.txt', 'r', encoding='utf-8') as file:
print(file.read())
#文字列を1行ずつ取得する
file = open('Company.txt', 'r')
for line in file:
print(line)
file.close()
#文字列を1行ずつ取得する(while)
file = open('Company.txt', 'r')
line = file.readline()
while line: # lineが取得できる限り繰り返す
print(line)
line = file.readline()
file.close()
#改行コードを削除する
file = open('Company.txt', 'r')
line = file.readline()
while line: # lineが取得できる限り繰り返す
line = line.strip()
print(line)
line = file.readline()
file.close()
#ファイルの有無を確認して処理を分ける
import os
if os.path.isfile('Company.txt'):
print('Company.txt', 'があります')
else:
print('Company.txt', 'はありませんでした')
if os.path.isfile('ABC.txt'):
print('ABC.txt', 'があります')
else:
print('ABC.txt', 'はありませんでした')
#複数のファイルの読み込み
import glob #globモジュールを宣言
print(glob.glob("GameCompany_*.txt"))
# ['GameCompany_1.txt', 'GameCompany_2.txt']
for file in glob.glob("GameCompany_*.txt"):
file = open(file, "r")
print(file.read())
#書込み
file = open('GameCompany_3.txt', 'w', encoding='utf-8')
file.write('Craft Egg \n')
file.write('BANDAI ')
file.write('Cygames ')
file.close();
#追記
file = open('GameCompany_3.txt', 'a')
file.write('\nSquareEnix')
file.close();
|
#!/usr/bin/python
import math
n = 100000000
count = 3
isPrime = [True] * (n + 1)
k = int(math.sqrt(n))
for i in range(2, k + 1):
if isPrime[i]:
j = i * i
while j <= n:
isPrime[j] = False
j += i
def valid(num):
sq = int(math.sqrt(num))
for d in range(1, sq + 1):
if num % d == 0:
if not isPrime[d + num // d]:
return False
return True
for i in range(6, n, 4):
if not isPrime[1 + i]:
continue
if not isPrime[2 + i // 2]:
continue
if valid(i):
count += i
print count
|
from django.db import models
from django.urls import reverse
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.models import User
fs = FileSystemStorage(location='/media/img')
class Product(models.Model):
name = models.CharField(max_length=200, verbose_name = 'Название продукта' )
price = models.FloatField(verbose_name = 'Стоимость продукта')
count = models.IntegerField(verbose_name = 'Количество продукта')
description = models.TextField(verbose_name = 'Описание продукта')
image = models.ImageField(max_length=100, verbose_name ='Изображение продукта',
upload_to='img', blank=True, null=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Товар'
verbose_name_plural = 'Товары'
def get_absolute_url(self):
return reverse("product_url", kwargs={"product_id": self.pk})
def get_update_url(self):
return reverse("update_product_url", kwargs={"product_id": self.pk})
def get_delete_url(self):
return reverse("delete_product_url", kwargs={"product_id": self.pk})
class Comment(models.Model):
comment = models.TextField(verbose_name='Текст комментария')
author = models.CharField( verbose_name='Автор комментария',max_length=200)
time_stamp = models.DateTimeField(auto_now=True, verbose_name='Дата комментария')
fk_product = models.ForeignKey(Product, on_delete=models.CASCADE, verbose_name='Продукт', related_name = 'comments')
class Meta:
verbose_name ="Комментарий"
verbose_name_plural ="Комментарии"
def __str__(self):
return self.author
# def get_absolute_url(self):
# return reverse("product_url", kwargs={"product_id": self.pk})
class Order(models.Model):
product_fk = models.ForeignKey(Product, on_delete=models.CASCADE,
verbose_name = 'Продукт')
user_fk = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name = 'Пользователь')
product_count = models.IntegerField(verbose_name = 'Количество')
cost = models.FloatField(verbose_name = 'Сумма')
order_code = models.CharField(max_length=20, verbose_name = 'Код заказа', blank=True, null=True)
date_order = models.DateTimeField(auto_now=True,
verbose_name = 'Время и дата заказа')
accepted = models.BooleanField(verbose_name='Статус заказа', default=False)
def __str__(self):
return '{}'.formft(self.date_order)
|
'''
Utility functions for the monitoring and log generation.
'''
import time
import logging
def init_logger():
'''
Initialise the logger.
Returns:
logger (Logger): Logger to use for the monitoring.
'''
log_format = '%(asctime)s %(levelname)s %(message)s'
logger = logging.getLogger('Log monitoring console')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(
'simulation-'+time.strftime('%d-%b-%Y-%H:%M:%S')+'.log'
)
fh.setLevel(logging.INFO)
formatter = logging.Formatter(log_format)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
import os
import pickle
import subprocess
import warnings
from datetime import datetime
import fsspec
from rubicon_ml import client, domain
from rubicon_ml.client.utils.exception_handling import failsafe
from rubicon_ml.client.utils.tags import filter_children
from rubicon_ml.exceptions import RubiconException
class ArtifactMixin:
"""Adds artifact support to a client object."""
def _validate_data(self, data_bytes, data_file, data_object, data_path, name):
"""Raises a `RubiconException` if the data to log as
an artifact is improperly provided.
"""
if not any([data_bytes, data_file, data_object, data_path]):
raise RubiconException(
"One of `data_bytes`, `data_file`, `data_object` or `data_path` must be provided."
)
if name is None:
if data_path is not None:
name = os.path.basename(data_path)
else:
raise RubiconException("`name` must be provided if not using `data_path`.")
if data_bytes is None:
if data_object is not None:
data_bytes = pickle.dumps(data_object)
else:
if data_file is not None:
f = data_file
elif data_path is not None:
f = fsspec.open(data_path, "rb")
with f as open_file:
data_bytes = open_file.read()
return data_bytes, name
@failsafe
def log_artifact(
self,
data_bytes=None,
data_file=None,
data_object=None,
data_path=None,
name=None,
description=None,
tags=[],
):
"""Log an artifact to this client object.
Parameters
----------
data_bytes : bytes, optional
The raw bytes to log as an artifact.
data_file : TextIOWrapper, optional
The open file to log as an artifact.
data_object : python object, optional
The python object to log as an artifact.
data_path : str, optional
The absolute or relative local path or S3 path
to the data to log as an artifact. S3 paths
must be prepended with 's3://'.
name : str, optional
The name of the artifact file. Required if
`data_path` is not provided.
description : str, optional
A description of the artifact. Use to provide
additional context.
tags : list of str, optional
Values to tag the experiment with. Use tags to organize and
filter your artifacts.
Notes
-----
Only one of `data_bytes`, `data_file`, `data_object`, and `data_path`
should be provided. If more than one is given, the order
of precedence is `data_bytes`, `data_object`, `data_file`, `data_path`.
Returns
-------
rubicon.client.Artifact
The new artifact.
Examples
--------
>>> # Log with bytes
>>> experiment.log_artifact(
... data_bytes=b'hello rubicon!', name='bytes_artifact', description="log artifact from bytes"
... )
>>> # Log with file
>>> with open('some_relevant_file', 'rb') as f:
>>> project.log_artifact(
... data_file=f, name='file_artifact', description="log artifact from file"
... )
>>> # Log with file path
>>> experiment.log_artifact(
... data_path="./path/to/artifact.pkl", description="log artifact from file path"
... )
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
data_bytes, name = self._validate_data(data_bytes, data_file, data_object, data_path, name)
artifact = domain.Artifact(
name=name,
description=description,
parent_id=self._domain.id,
tags=tags,
)
project_name, experiment_id = self._get_identifiers()
self.repository.create_artifact(
artifact, data_bytes, project_name, experiment_id=experiment_id
)
return client.Artifact(artifact, self)
def _get_environment_bytes(self, export_cmd):
"""Get the working environment as a sequence of bytes.
Parameters
----------
export_cmd : list of str
The command to export the environment.
Returns
-------
bytes
A bytes sequence of the environment.
"""
try:
completed_process = subprocess.run(export_cmd, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
raise RubiconException(e.stderr)
return completed_process.stdout
@failsafe
def log_conda_environment(self, artifact_name=None):
"""Log the conda environment as an artifact to this client object.
Useful for recreating your exact environment at a later date.
Parameters
----------
artifact_name : str, optional
The name of the artifact (the exported conda environment).
Returns
-------
rubicon.client.Artifact
The new artifact.
Notes
-----
Relies on running with an active conda environment.
"""
if artifact_name is None:
artifact_name = f"environment-{datetime.now().strftime('%Y_%m_%d-%I_%M_%S_%p')}.yml"
env_bytes = self._get_environment_bytes(["conda", "env", "export"])
artifact = self.log_artifact(data_bytes=env_bytes, name=artifact_name)
return artifact
@failsafe
def log_pip_requirements(self, artifact_name=None):
"""Log the pip requirements as an artifact to this client object.
Useful for recreating your exact environment at a later date.
Parameters
----------
artifact_name : str, optional
The name of the artifact (the exported pip environment).
Returns
-------
rubicon.client.Artifact
The new artifact.
"""
if artifact_name is None:
artifact_name = f"requirements-{datetime.now().strftime('%Y_%m_%d-%I_%M_%S_%p')}.txt"
requirements_bytes = self._get_environment_bytes(["pip", "freeze"])
artifact = self.log_artifact(data_bytes=requirements_bytes, name=artifact_name)
return artifact
@failsafe
def artifacts(self, name=None, tags=[], qtype="or"):
"""Get the artifacts logged to this client object.
Parameters
----------
name : str, optional
The name value to filter results on.
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
Returns
-------
list of rubicon.client.Artifact
The artifacts previously logged to this client object.
"""
project_name, experiment_id = self._get_identifiers()
artifacts = [
client.Artifact(a, self)
for a in self.repository.get_artifacts_metadata(
project_name, experiment_id=experiment_id
)
]
self._artifacts = filter_children(artifacts, tags, qtype, name)
return self._artifacts
@failsafe
def artifact(self, name=None, id=None):
"""Get an artifact logged to this project by id or name.
Parameters
----------
id : str
The id of the artifact to get.
name : str
The name of the artifact to get.
Returns
-------
rubicon.client.Artifact
The artifact logged to this project with id `id` or name 'name'.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
if name is not None:
artifacts = self.artifacts(name=name)
if len(artifacts) == 0:
raise RubiconException(f"No artifact found with name '{name}'.")
if len(artifacts) > 1:
warnings.warn(
f"Multiple artifacts found with name '{name}'. Returning most recently logged."
)
artifact = artifacts[-1]
else:
project_name, experiment_id = self._get_identifiers()
artifact = client.Artifact(
self.repository.get_artifact_metadata(project_name, id, experiment_id), self
)
return artifact
@failsafe
def delete_artifacts(self, ids):
"""Delete the artifacts logged to with client object
with ids `ids`.
Parameters
----------
ids : list of str
The ids of the artifacts to delete.
"""
project_name, experiment_id = self._get_identifiers()
for artifact_id in ids:
self.repository.delete_artifact(project_name, artifact_id, experiment_id=experiment_id)
class DataframeMixin:
"""Adds dataframe support to a client object."""
@failsafe
def log_dataframe(self, df, description=None, name=None, tags=[]):
"""Log a dataframe to this client object.
Parameters
----------
df : pandas.DataFrame or dask.dataframe.DataFrame
The `dask` or `pandas` dataframe to log.
description : str, optional
The dataframe's description. Use to provide
additional context.
tags : list of str
The values to tag the dataframe with.
Returns
-------
rubicon.client.Dataframe
The new dataframe.
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
dataframe = domain.Dataframe(
parent_id=self._domain.id,
description=description,
name=name,
tags=tags,
)
project_name, experiment_id = self._get_identifiers()
self.repository.create_dataframe(dataframe, df, project_name, experiment_id=experiment_id)
return client.Dataframe(dataframe, self)
@failsafe
def dataframes(self, name=None, tags=[], qtype="or"):
"""Get the dataframes logged to this client object.
Parameters
----------
name : str, optional
The name value to filter results on.
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
Returns
-------
list of rubicon.client.Dataframe
The dataframes previously logged to this client object.
"""
project_name, experiment_id = self._get_identifiers()
dataframes = [
client.Dataframe(d, self)
for d in self.repository.get_dataframes_metadata(
project_name, experiment_id=experiment_id
)
]
self._dataframes = filter_children(dataframes, tags, qtype, name)
return self._dataframes
@failsafe
def dataframe(self, name=None, id=None):
"""
Get the dataframe logged to this client object.
Parameters
----------
id : str
The id of the dataframe to get.
name : str
The name of the dataframe to get.
Returns
-------
rubicon.client.Dataframe
The dataframe logged to this project with id `id` or name 'name'.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
elif name is not None:
dataframes = self.dataframes(name=name)
if len(dataframes) == 0:
raise RubiconException(f"No dataframe found with name '{name}'.")
elif len(dataframes) > 1:
warnings.warn(
f"Multiple dataframes found with name '{name}'."
" Returning most recently logged."
)
dataframe = dataframes[-1]
else:
project_name, experiment_id = self._get_identifiers()
dataframe = client.Dataframe(
self.repository.get_dataframe_metadata(
project_name, experiment_id=experiment_id, dataframe_id=id
),
self,
)
return dataframe
@failsafe
def delete_dataframes(self, ids):
"""Delete the dataframes with ids `ids` logged to
this client object.
Parameters
----------
ids : list of str
The ids of the dataframes to delete.
"""
project_name, experiment_id = self._get_identifiers()
for dataframe_id in ids:
self.repository.delete_dataframe(
project_name, dataframe_id, experiment_id=experiment_id
)
class TagMixin:
"""Adds tag support to a client object."""
def _get_taggable_identifiers(self):
project_name, experiment_id = self._parent._get_identifiers()
entity_identifier = None
# experiments do not return an entity identifier - they are the entity
if isinstance(self, client.Experiment):
experiment_id = self.id
# dataframes and artifacts are identified by their `id`s
elif isinstance(self, client.Dataframe) or isinstance(self, client.Artifact):
entity_identifier = self.id
# everything else is identified by its `name`
else:
entity_identifier = self.name
return project_name, experiment_id, entity_identifier
@failsafe
def add_tags(self, tags):
"""Add tags to this client object.
Parameters
----------
tags : list of str
The tag values to add.
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
project_name, experiment_id, entity_identifier = self._get_taggable_identifiers()
self._domain.add_tags(tags)
self.repository.add_tags(
project_name,
tags,
experiment_id=experiment_id,
entity_identifier=entity_identifier,
entity_type=self.__class__.__name__,
)
@failsafe
def remove_tags(self, tags):
"""Remove tags from this client object.
Parameters
----------
tags : list of str
The tag values to remove.
"""
project_name, experiment_id, entity_identifier = self._get_taggable_identifiers()
self._domain.remove_tags(tags)
self.repository.remove_tags(
project_name,
tags,
experiment_id=experiment_id,
entity_identifier=entity_identifier,
entity_type=self.__class__.__name__,
)
def _update_tags(self, tag_data):
"""Add or remove the tags in `tag_data` based on
their key.
"""
for tag in tag_data:
self._domain.add_tags(tag.get("added_tags", []))
self._domain.remove_tags(tag.get("removed_tags", []))
@property
def tags(self):
"""Get this client object's tags."""
project_name, experiment_id, entity_identifier = self._get_taggable_identifiers()
tag_data = self.repository.get_tags(
project_name,
experiment_id=experiment_id,
entity_identifier=entity_identifier,
entity_type=self.__class__.__name__,
)
self._update_tags(tag_data)
return self._domain.tags
|
# -*- coding: utf-8 -*-
class Coche(object):
#Abstracción de los objetos coche.
def __init__(self, gasolina):
self.gasolina = gasolina
print ("Tenemos " + str(gasolina) + " litros")
def arrancar(self):
if self.gasolina > 0:
print ("Arranca")
else:
print ("No arranca")
def conducir(self):
if self.gasolina > 0:
self.gasolina -= 1
print ("Quedan " + str(self.gasolina) + " litros")
else:
print ("No se mueve")
micoche = Coche(5)
micoche.arrancar()
micoche.conducir()
micoche.conducir()
micoche.conducir()
micoche.conducir()
micoche.conducir()
micoche.conducir()
micoche.conducir()
micoche.arrancar()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
##############################################################################
import models
import wizard
import account
|
# -*- coding: utf8 -*-
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest
from aliyunsdkecs.request.v20140526 import StopInstanceRequest
from app.utils.personal_config import aliAccessKeyID, aliAccessKeySecret, region_lst_ali
import json, re
from app.utils.tx_cvm_info_sync import parse_message_dict
# 创建 AcsClient 实例
def get_instance(region):
client = AcsClient(
aliAccessKeyID,
aliAccessKeySecret,
region
);
request = DescribeInstancesRequest.DescribeInstancesRequest()
request.set_PageSize(10)
# 发起 API 请求并打印返回
response = client.do_action_with_exception(request)
res_info = json.loads(response.decode('utf-8'))
return res_info['Instances']['Instance']
def parse_info(instance_info):
res_info = dict()
res_info['sp_name'] = 'aliyun'
for k, v in instance_info.items():
if k == 'ZoneId':
res_info['zoneName'] = v
if k == 'Cpu':
res_info['cpu'] = v
if k == 'PublicIpAddress':
res_info['wanIpSet'] = ' '.join(v['IpAddress'])
if k == 'InternetMaxBandwidthOut':
res_info['bandwidth'] = v
if k == 'Memory':
res_info['mem'] = v / 1024
if k == 'CreationTime':
res_info['createTime'] = '-'.join(re.findall('(\d+)-(\d+)-(\d+)', v)[0])
if k == 'InnerIpAddress':
res_info['lanIp'] = ' '.join(v['IpAddress'])
if k == 'VpcAttributes':
if v['PrivateIpAddress']['IpAddress']:
res_info['lanIp'] = ' '.join(v['PrivateIpAddress']['IpAddress'])
if k == 'ExpiredTime':
res_info['deadlineTime'] = '-'.join(re.findall('(\d+)-(\d+)-(\d+)', v)[0])
if k == 'Status':
res_info['status'] = v
if k == 'OSName':
res_info['os'] = v
# else:
# res_info[k] = v
return res_info
def get_ali_vps_data():
res_lst = []
for region in region_lst_ali:
instance_lst = get_instance(region)
for instance_info in instance_lst:
instance_info = parse_info(instance_info)
res_lst.append(instance_info)
return res_lst
|
from zutils.task.task_redis import TaskRedis
from zutils.tensorflow.tf_session import TFSession
from zutils.convert import convert_dict,convert_dict_log
from zutils.logger import Logger
from zutils.utils import relative_project_path
import traceback
import time
import json
import os
class Task:
def __init__(self, task_name, model_class, model_config,
redis_host, redis_port, redis_timeout,
cuda, gpu_mem, allow_growth,
log_level, is_debug):
cuda = str(cuda)
self.task_name = task_name
self.task_uname = task_name + cuda
self.task_redis = TaskRedis(task_name, redis_host, redis_port, redis_timeout)
self.task_redis.upload_data_convert(model_class)
self.input_convert, self.output_convert = self.task_redis.create_data_convert()
self.input_convert = self.input_convert()
self.output_convert = self.output_convert()
if cuda is not None:
self.sess = TFSession(cuda, gpu_mem, allow_growth).get_sess()
else:
self.sess = None
self.logger = Logger(log_level, self.task_uname, is_debug)
self.model_class_instance = model_class(self.sess, self.task_redis,model_config)
self.task_run = self.model_class_instance.run
self.kill_last_task()
def kill_last_task(self):
os.makedirs(relative_project_path('logs', 'pid'), exist_ok=True)
pid_filepath = relative_project_path(relative_project_path('logs', 'pid', self.task_uname))
if os.path.isfile(pid_filepath):
with open(pid_filepath) as f:
pid = f.readline()
if len(os.popen('ps -ef | grep %s | grep src | grep -v grep' % pid).readlines()) > 0:
os.system('kill -9 %s' % pid)
with open(pid_filepath, 'w') as f:
f.write(str(os.getpid()))
def get_task(self, get_func):
try:
task = get_func()
if task is None:
self.logger().info(self.task_uname + ' is free')
return None
self.logger().info('RECV: ' + task['taskId'])
task = self.input_convert.server_convert(task)
return task
except:
self.logger().error('%s' % traceback.format_exc())
time.sleep(5)
return None
def set_task_result(self, set_func, result):
try:
result = self.output_convert.server_convert(result)
if 'json' in getattr(set_func, '__name__'):
convert_dict(result)
r = {
'code': 0,
'data': result,
'errorInfo': ""
}
except Exception as e:
self.logger().error('%s' % traceback.format_exc())
r = {
'code': 1,
'errorInfo': str(e)
}
try:
set_func(r)
self.logger().info('SEND:', result['taskId'])
except:
self.logger().error('%s' % traceback.format_exc())
time.sleep(5)
def run(self):
pass
class B(Task):
def xxx(self):
print('xx')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('query', '0004_auto_20150507_1024'),
]
operations = [
migrations.CreateModel(
name='Newspaper',
fields=[
('id', models.CharField(max_length=9, serialize=False, primary_key=True)),
('title', models.CharField(max_length=500)),
('start_date', models.DateField()),
('end_date', models.DateField()),
('editions', models.PositiveIntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Pillar',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=200)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='newspaper',
name='pillar',
field=models.ForeignKey(to='query.Pillar', null=True),
preserve_default=True,
),
]
|
from flask import Blueprint, jsonify, request
from mybib.bibtext.reader import load_from_string
from mybib.graphql.access_layer import EntityAlreadyExistsError
from mybib.graphql.access_layer import get_paper as get_paper_da
from mybib.graphql.access_layer import insert_paper
from mybib.web.authentication import requires_auth
papers_api = Blueprint("papers_api", __name__)
@papers_api.route("/api/papers/<paper_id:identifier>", methods=["GET"])
def get_paper(identifier):
return jsonify(get_paper_da(identifier))
@papers_api.route("/api/papers", methods=["POST"])
@requires_auth
def post_paper():
bibtex_text = request.data.decode("utf-8")
[paper_dict] = load_from_string(bibtex_text)
paper_dict["_bibtex"] = bibtex_text
response = jsonify()
try:
insert_paper(paper_dict)
response.status_code = 201
response.autocorrect_location_header = False
except EntityAlreadyExistsError:
response.status_code = 409
response.headers["location"] = f"/api/papers/{paper_dict['ID']}"
return response
@papers_api.route("/api/papers/search", methods=["GET"])
def search_papers():
title = request.args["title"]
res = None
return jsonify(res)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rentacar', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='rentacar',
options={'verbose_name': 'Aluguel', 'verbose_name_plural': 'Alugu\xe9is'},
),
migrations.AddField(
model_name='rentacar',
name='is_back',
field=models.BooleanField(default=False, verbose_name=b'J\xc3\xa1 foi devolvido?'),
),
]
|
from blog.models import Entry, Comment, Category
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from datetime import date
# Create your views here.
def list(request, template='web/blog/list.html'):
categories, dates = blog_base()
print 'list'
if 'category_id' in request.GET:
category_id = request.GET['category_id']
print 'category_id',category_id
category = categories.filter(id=category_id);
queryset = Entry.objects.filter(category=category)
else:
queryset = Entry.objects.all()
paginator = Paginator(queryset, 5)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
return render_to_response('web/blog/list.html', {"posts": posts,"categories":categories,"dates":dates }, context_instance=RequestContext(request))
def blog_base():
dates = []
entryies = Entry.objects.all()
for entry in entryies:
#if entry.post_date.year not in years:
# years.append(entry.post_date.year)
blog_date = date(entry.post_date.year, entry.post_date.month, 1)
if blog_date not in dates:
dates.append(blog_date)
categories = Category.objects.all()
#
for category in categories:
category.article_num = 0
for entry in entryies:
if entry.category == category:
category.article_num += 1
category.save()
return categories, dates;
def list_category(request, category_id):
categories = Category.objects.all()
if category_id:
category = categories.filter(id=category_id);
queryset = Entry.objects.filter(category=category)
else:
queryset = Entry.objects.all()
paginator = Paginator(queryset, 5)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
return render_to_response('web/blog/list.html', {"posts": posts,"categories":categories,"request":request,}, context_instance=RequestContext(request))
def comment_list(id):
entry = Entry.objects.get(id=id)
comments = entry.comment_set.all()
return comments
def entry(request, id):
print 'entry'
categories, dates = blog_base()
post = Entry.objects.get(pk=id)
post.hits += 1
post.save()
comments = comment_list(id)
return render_to_response("web/blog/entry.html", {'post':post, "categories":categories, 'comments':comments, "dates": dates}, context_instance=RequestContext(request))
import json
def comment_insert(request, id):
print 'comment_insert'
entry = Entry.objects.get(id=id)
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
content = request.POST['message']
comment = Comment(name=name, content=content, email=email, entry=entry)
comment.save()
return HttpResponse(json.dumps({"status": "1", "msg": "insert message success"}), content_type= 'Application/json')
else:
return HttpResponse(json.dumps({"status": "0", "msg": "insert message failed"}), content_type= 'Application/json')
from django_ajax.decorators import ajax
@ajax
def ajax_comment_insert(request, id):
print 'ajax_comment_insert'
entry = Entry.objects.get(id=id)
if request.method == 'POST':
print 'post'
name = request.POST['name']
email = request.POST['email']
content = request.POST['message']
comment = Comment(name=name, content=content, email=email, entry=entry)
comment.save()
return {"status": "1", "msg": "insert message success"}
else:
return {"status": "0", "msg": "insert message failed"}
|
# Generated by Django 3.0.3 on 2020-02-13 22:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('article_title', models.CharField(max_length=200, verbose_name='Name of article')),
('article_text', models.TextField(verbose_name='Content of article')),
('date_of_publication', models.DateTimeField(verbose_name='Date of publishing')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_of_comment', models.CharField(max_length=50, verbose_name='Author of comment')),
('text_of_comment', models.CharField(max_length=200, verbose_name='Text of comment')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mblogone.Article')),
],
),
]
|
from pylearn2.utils import serial
from glob import glob
import pickle
import os
import numpy as np
from copy import deepcopy
from sklearn.metrics import confusion_matrix
import itertools
import logging
from collections import Counter, OrderedDict
from braindecode.util import dict_equal, dict_is_subset
import shutil
log = logging.getLogger(__name__)
class QuickResult(object):
def __init__(self):
self.dict = dict()
def __getitem__(self, name):
if name not in self.dict:
self.dict[name] = []
return self.dict[name]
def append(self, name,val):
if name not in self.dict:
self.dict[name] = []
self.dict[name].append(val)
class Result:
""" Class for holding result values"""
def __init__(self, parameters, templates, training_time,
monitor_channels, predictions, targets):
self.__dict__.update(locals())
del self.self
def get_misclasses(self):
misclasses = {}
monitor_channels = self.monitor_channels
for key in monitor_channels:
if '_misclass' in key:
misclass_by_epoch = monitor_channels[key]
keyname = key.replace('_misclass', '')
misclasses[keyname] = misclass_by_epoch
return misclasses
def save_result(model_or_models, result_save_path, training_time, parameters={},
templates={}):
result = None
if (isinstance(model_or_models, list)):
result = []
for model in model_or_models:
fold_result = _create_result_for_model(model,
parameters, templates, training_time)
result.append(fold_result)
else:
model = model_or_models
result = _create_result_for_model(model, parameters, templates,
training_time)
# Overwrite shouldn't happen I think?
serial.save(result_save_path, result, on_overwrite="ignore")
def _create_result_for_model(model, parameters, templates, training_time):
result = Result(parameters, templates, training_time,
model.monitor_channels, model.info_predictions, model.info_targets)
return result
class ResultPool():
def load_results(self, folder_name, start=None, stop=None, params=None):
self._determine_file_names(folder_name, start, stop)
self._load_result_objects()
if params is not None:
self._filter_by_params(params)
self._collect_results()
self._collect_parameters()
self._split_parameters_into_constant_and_varying_parameters()
def _determine_file_names(self, folder_name, start, stop):
# get all result_obj file names, should always have digit and then .pkl at end
# (results have result.pkl instead)
self._result_file_names = glob(os.path.join(folder_name, '*.result.pkl'))
if len(self._result_file_names) == 0:
log.warn("No result files found..")
# sort numerically by last part of filename (before .pkl extension)
# i.e. sort bla/12.pkl and bla/1.pkl
# -> bla/bla/1.pkl, bla/12.pkl
self._result_file_names.sort(
key=lambda s:int(s.split('.result.pkl')[0].split('/')[-1]))
if start is not None:
self._result_file_names = self._result_file_names[start:]
if stop is not None:
self._result_file_names = self._result_file_names[:stop]
if len(self._result_file_names) == 0:
log.warn("No result files left after start stop..")
def _load_result_objects(self):
self._result_objects = []
for file_name in self._result_file_names:
results = serial.load(file_name)
self._result_objects.append(results)
if (len(self._result_objects) > 0 and
isinstance(self._result_objects[0], list)):
self._cross_validation = True
else:
self._cross_validation = False
def _filter_by_params(self, params):
# check given params if result should be included or not
include_mask = []
for results_or_result in self._result_objects:
include_result=True
res_params = dict()
if (isinstance(results_or_result, list)):
# all result objects should have same parameters/templates
# so just take first result object for parameters/templates
res_params = results_or_result[0].parameters
else:
res_params = results_or_result.parameters
for key in params:
if res_params.get(key, None) != params[key]:
include_result=False
include_mask.append(include_result)
include_mask = np.array(include_mask)
if np.sum(include_mask) == 0 and len(self._result_objects) > 0:
log.warn("Removed all results by param filter...")
self._result_objects = np.array(self._result_objects)[include_mask]
self._result_file_names = np.array(self._result_file_names)[include_mask]
def _collect_results(self):
self._misclasses = {}
self._training_times = []
result_obj = None
debug_i = 1
for result_or_results in self._result_objects:
if (isinstance(result_or_results, list)):
result_objs = result_or_results
misclasses = self._collect_cross_validation_results(result_objs)
# all result objects should have same parameters/templates
# so just take first result object for parameters/templates
result_obj = result_objs[0]
else:
result_obj = result_or_results
misclasses = self._collect_result(result_obj)
# Append misclasses from this experiment
for key in misclasses:
misclasses_so_far = self._misclasses.get(key, [])
misclasses_so_far.append(misclasses[key])
self._misclasses[key] = misclasses_so_far
try:
self._training_times.append(result_obj.training_time)
except:
print("No Info for {:d}".format(debug_i))
self._training_times.append(-1)
debug_i += 1
# Convert to numpy array and add empty dimension for train_test results
# making np.mean on axis=1 work for cross val and train_test
for key in self._misclasses:
this_misclasses = np.array(self._misclasses[key])
if (this_misclasses.ndim == 1):
this_misclasses = np.expand_dims(this_misclasses, 1)
self._misclasses[key] = this_misclasses
def _collect_cross_validation_results(self, result_objects):
""" Collect results from single cross validation"""
misclasses = {}
all_misclasses = [self._collect_result(res) for res in result_objects]
# init keys using first fold (should all be same)
for key in all_misclasses[0]:
misclasses[key] = []
for fold_misclass in all_misclasses:
for key in fold_misclass:
misclasses[key].append(fold_misclass[key])
return misclasses
def _collect_result(self, result_obj):
misclasses = result_obj.get_misclasses()
return misclasses
def _collect_parameters(self):
self._parameters = []
self._templates = []
debug_i = 1
for result_or_results in self._result_objects:
if (isinstance(result_or_results, list)):
# all result objects should have same parameters/templates
# so just take first result object for parameters/templates
result_obj = result_or_results[0]
else:
result_obj = result_or_results
try:
self._templates.append(result_obj.templates)
self._parameters.append(result_obj.parameters)
except:
print("No Info for {:d}".format(debug_i))
self._templates.append({})
self._parameters.append({})
debug_i += 1
def _split_parameters_into_constant_and_varying_parameters(self):
params = deepcopy(self._parameters)
varying_params_keys = []
constant_params = params[0]
# go through parameters, if parameters with different values
# appear remove from constant params and add to varying params
# do same if new parameter appears that does not exist in constant params
for param_dict in params:
for param_name, value in param_dict.iteritems():
if (constant_params.has_key(param_name) and
constant_params[param_name] != value):
constant_params.pop(param_name)
varying_params_keys.append(param_name)
elif ((not constant_params.has_key(param_name)) and
(param_name not in varying_params_keys)):
varying_params_keys.append(param_name)
# also check if all constant params are in this dict, otherwise add them
missing = set(constant_params.keys()) - set(param_dict.keys())
missing = missing - set(varying_params_keys)
for missing_key in missing:
varying_params_keys.append(missing_key)
constant_params.pop(missing_key)
self._constant_params = constant_params
# create varying param dicts by removing constant parameters
# from all/original parameters
varying_params = deepcopy(self._parameters)
for paramdict in varying_params:
for constant_key in self._constant_params:
# maybe not event presentas a key, thats why ",None"
paramdict.pop(constant_key, None)
for varying_key in varying_params_keys:
if (not paramdict.has_key(varying_key)):
paramdict[varying_key] = "-"
self._varying_params = varying_params
def have_varying_datasets(self):
# now all varying paramsshould have same keys
# so just use first dict to see if there are different filesets
return ('dataset_filename' in self._varying_params[0].keys() or
'filename' in self._varying_params[0].keys() or
'trainer_filename' in self._varying_params[0].keys())
def have_varying_leave_out(self):
return 'transfer_leave_out' in self._varying_params[0].keys()
def get_misclasses(self):
""" Get misclassifications as array of dicts (one dict per experiment)"""
num_experiments = len(self._parameters)
misclass_array = []
for i in range(0, num_experiments):
experiment_misclass = {}
for key in self._misclasses:
experiment_misclass[key] = self._misclasses[key][i]
misclass_array.append(experiment_misclass)
return misclass_array
def num_experiments(self):
return len(self._result_file_names)
def constant_params(self):
return self._constant_params
def varying_params(self):
return self._varying_params
def result_file_names(self):
return self._result_file_names
def training_times(self):
return self._training_times
def template(self):
"All templates should be same, so return first one"
return self._templates[0]
def result_objects(self):
return self._result_objects
class DatasetAveragedResults:
def extract_results(self, result_pool):
self._result_pool = result_pool
experiments_same_params = self._extract_experiments_with_same_params()
results = self.create_results(experiments_same_params)
self._results = results
def create_results(self, experiments_same_params, ):
results = []
for experiment_ids in experiments_same_params:
result = self._create_results_one_param_set(experiment_ids)
results.append(result)
return results
def _extract_experiments_with_same_params(self):
""" Extract experiment ids of experiments that have the same parameters
except for the dataset filename or the leave out from transfer."""
params_without_dataset = deepcopy(self._result_pool.varying_params())
sorted_params_without_dataset = []
for params in params_without_dataset:
params.pop('filename', None)
params.pop('dataset_filename', None)
params.pop('transfer_leave_out', None)
params.pop('test_filename', None)
params.pop('trainer_filename', None)
# sort param dicts in same way to be able to compare
# all param dicts with the __str__ method...
sorted_keys = sorted(params.keys())
sorted_params = OrderedDict()
for key in sorted_keys:
sorted_params[key] = params[key]
sorted_params_without_dataset.append(sorted_params)
params_without_dataset = sorted_params_without_dataset
params_to_experiment = {}
for experiment_i in range(len(params_without_dataset)):
params = params_without_dataset[experiment_i]
# check if params already exist, if yes, add at appropriate parts
if str(params) in params_to_experiment:
params_to_experiment[str(params)].append(experiment_i)
else:
params_to_experiment[str(params)] = [experiment_i]
# same param ids will be like
# [[0,2,4], [3,5,8,9]] in case experiments 0,2,4 and 3,5,8,9 have the
# same parameters
same_param_ids = params_to_experiment.values()
# sort so that list of lists is sorted
# by lowest experiment id in each list =>
# appear in same order as in original table
same_param_ids = sorted(same_param_ids, key=np.min)
# check that there are no duplicate filenames among same parameter "blocks"
original_params = self._result_pool.varying_params()
for i_averaged_result, same_param_id_arr in enumerate(same_param_ids):
all_dataset_filenames = [original_params[i]['dataset_filename'] for i in same_param_id_arr]
if len(all_dataset_filenames) != len(np.unique(all_dataset_filenames)):
log.warn("Duplicate filenames for dataset averaged result "
"{:d}".format(i_averaged_result))
# from http://stackoverflow.com/a/11528581
duplicates = [item for item, count in Counter(all_dataset_filenames).iteritems() if count > 1]
log.warn("Duplicates {:s}".format(duplicates))
# sort so that list of lists is sorted
# by lowest experiment id in each list =>
# appear in same order as in original table
return same_param_ids
def _create_results_one_param_set(self, experiment_ids):
""" Create result for one sequence of experiment ids with same parameters"""
results = []
varying_params = self._result_pool.varying_params()
misclasses = self._result_pool.get_misclasses()
training_times = self._result_pool.training_times()
result_objects = self._result_pool.result_objects()
for experiment_id in experiment_ids:
this_result = {'parameters': varying_params[experiment_id],
'misclasses': misclasses[experiment_id],
'training_time': training_times[experiment_id],
'result_objects': result_objects[experiment_id]}
results.append(this_result)
return results
def results(self):
return self._results
def get_confusion_mats(results):
return [get_confusion_mat(r) for r in results]
def get_confusion_mat(result):
targets = result.targets
predictions = result.predictions
return confusion_matrix(targets,predictions)
def load_result_objects_for_folder(result_folder):
resultpool = ResultPool()
resultpool.load_results(result_folder)
return resultpool.result_objects()
def load_dataset_grouped_result_objects_for(result_folder, params):
resultpool = ResultPool()
resultpool.load_results(result_folder, params=params)
dataset_averaged_pool = DatasetAveragedResults()
dataset_averaged_pool.extract_results(resultpool)
results = dataset_averaged_pool.results()
result_objects_per_group = []
for group_results in results:
result_objects = [res['result_objects'] for res in group_results]
result_objects_per_group.append(result_objects)
return result_objects_per_group
def delete_results(result_folder, params, test=True):
res_pool = ResultPool()
res_pool.load_results(result_folder, params=params)
if test:
log.warn("Would delete {:d} results from {:s}".format(
len(res_pool._result_file_names), result_folder))
else:
log.warn("Deleting {:d} results from {:s}".format(
len(res_pool._result_file_names), result_folder))
for file_name in res_pool._result_file_names:
if test:
log.info("Would delete {:s}".format(file_name))
else:
log.info("Deleting {:s}".format(file_name))
yaml_file_name = file_name.replace('.result.pkl', '.yaml')
model_file_name = file_name.replace('.result.pkl', '.pkl')
model_param_file_name = file_name.replace('.result.pkl', '.npy')
lock_file_name = file_name.replace('.result.pkl', 'lock.pkl')
delete_if_exists(file_name)
delete_if_exists(yaml_file_name)
delete_if_exists(model_file_name)
delete_if_exists(model_param_file_name)
delete_if_exists(lock_file_name)
def set_result_parameters_to(result_folder, params, **update_params):
res_pool = ResultPool()
res_pool.load_results(result_folder, params=params)
for file_name in res_pool._result_file_names:
result = np.load(file_name)
# check if result already same, mainly for info
if not dict_is_subset(update_params, result.parameters):
log.info("Updating result {:s}".format(file_name))
result.parameters.update(**update_params)
pickle.dump(result, open(file_name,'w'))
def set_nonexisting_parameters_to(result_folder, params, **update_params):
res_pool = ResultPool()
res_pool.load_results(result_folder, params=params)
for file_name in res_pool._result_file_names:
result = np.load(file_name)
# check if result already has parameter, else set it
params_changed = False
for param in update_params:
if param not in result.parameters:
result.parameters[param] = update_params[param]
if params_changed:
log.info("Updating result {:s}".format(file_name))
pickle.dump(result, open(file_name,'w'))
def delete_if_exists(filename):
"""Delete file if it exists. Else do nothing."""
try:
os.remove(filename)
except OSError:
pass
def delete_duplicate_results(result_folder):
res_pool = ResultPool()
res_pool.load_results(result_folder)
var_params = res_pool.varying_params()
unique_var_params = []
duplicate_ids = []
all_result_file_names = res_pool.result_file_names()
for i_exp, params in enumerate(var_params):
if np.any([dict_equal(params, p) for p in unique_var_params]):
log.warn("Duplicate result {:s}".format(all_result_file_names[i_exp]))
duplicate_ids.append(i_exp)
else:
unique_var_params.append(params)
# Delete result/experiment/model(outdated, used to exist)/param files
for i_exp in duplicate_ids:
result_file_name = all_result_file_names[i_exp]
yaml_file_name = result_file_name.replace('.result.pkl', '.yaml')
model_file_name = result_file_name.replace('.result.pkl', '.pkl')
model_param_file_name = result_file_name.replace('.result.pkl', '.npy')
delete_if_exists(result_file_name)
delete_if_exists(yaml_file_name)
delete_if_exists(model_file_name)
delete_if_exists(model_param_file_name)
def mark_duplicate_results(result_folder, tag_dict):
res_pool = ResultPool()
res_pool.load_results(result_folder)
var_params = res_pool.varying_params()
unique_var_params = []
duplicate_ids = []
all_result_file_names = res_pool.result_file_names()
for i_exp, params in enumerate(var_params):
if np.any([dict_equal(params, p) for p in unique_var_params]):
log.warn("Duplicate result {:s}".format(all_result_file_names[i_exp]))
duplicate_ids.append(i_exp)
else:
unique_var_params.append(params)
# Update parameters
for i_exp in duplicate_ids:
result_file_name = all_result_file_names[i_exp]
result = np.load(result_file_name)
result.parameters.update(tag_dict)
pickle.dump(result, open(result_file_name, 'w'))
def move_results(source_folder, target_folder, params):
# Load results to determine filenames
res_pool = ResultPool()
res_pool.load_results(source_folder, params=params)
# Determine largest number of existing result to determine new
# filenames
existing_result_files = glob(os.path.join(target_folder, "*[0-9].result.pkl"))
existing_result_nrs = [int(file_name.split('/')[-1][:-len('.result.pkl')])\
for file_name in existing_result_files]
existing_lock_files = glob(os.path.join(target_folder, "*[0-9].lock.pkl"))
existing_lock_nrs = [int(file_name.split('/')[-1][:-len('.lock.pkl')])\
for file_name in existing_lock_files]
lower_offset = max([0] + existing_lock_nrs + existing_result_nrs)
# Do actual copying for all possible existing files
for i_res, res_file_name in enumerate(res_pool.result_file_names()):
for extension in ('.result.pkl', '.yaml', '.pkl', '.npy'):
existing_file_name = res_file_name.replace('.result.pkl', extension)
if os.path.exists(existing_file_name):
new_file_name = os.path.join(target_folder,
str(i_res + lower_offset + 1) + extension)
assert not os.path.exists(new_file_name)
log.info("Moving {:s} to {:s}".format(existing_file_name,
new_file_name))
shutil.move(existing_file_name, new_file_name)
def extract_combined_results(folder, params, folder_2, params_2):
res = extract_single_group_result_sorted(folder, params=params)
res2 = extract_single_group_result_sorted(folder_2, params=params_2)
combined_res = np.concatenate((res, res2))
return combined_res
def sort_results_by_filename(results):
return sorted(results, key=lambda r: r.parameters['dataset_filename'])
def get_final_misclasses(results, set_type='test'):
return np.array([r.get_misclasses()[set_type][-1] for r in results])
def get_training_times(results):
return np.array([r.training_time for r in results])
def get_all_misclasses(results):
misclass_dict = dict()
for set_type in ['train', 'valid', 'test']:
misclass_dict[set_type] = [r.get_misclasses()[set_type] for r in results]
return misclass_dict
def get_padded_chan_vals(all_exp_chan_vals, pad_by='last'):
"""Pad values for several experiments to maximum number of epochs across experiments.
Pad with the last value for given experiment. For example if you have two experiment with values:
[0.5,0.2,0.1]
[0.6,0.4]
Then pad to:
[0.5,0.2,0.1]
[0.6,0.4,0.4]
Parameters
--------
all_exp_chan_vals: list of 1d arrays
pad_by: 'last' or a concrete value
Returns
-------
padded_values: 2d array
Values padded to same length/number of epochs.
n_exps_by_epoch: 1d array
Number of experiments that were still running in per epoch.
"""
max_length = max([len(m) for m in all_exp_chan_vals])
padded_values = np.ones((len(all_exp_chan_vals), max_length)) * np.nan
n_exps_by_epoch = np.zeros(max_length)
for i_exp, misclasses in enumerate(all_exp_chan_vals):
padded_values[i_exp,:len(misclasses)] = misclasses
if pad_by == 'last':
padded_values[i_exp,len(misclasses):] = misclasses[-1]
else:
padded_values[i_exp,len(misclasses):] = pad_by
n_exps_by_epoch[:len(misclasses)] += 1
if not (pad_by is np.nan):
assert not np.any(np.isnan(padded_values))
return padded_values, n_exps_by_epoch
def compute_confusion_matrix(result_objects):
try:
targets = [fold_res.targets for dataset_result_obj in result_objects for fold_res in dataset_result_obj]
except:
targets = [dataset_result_obj.targets for dataset_result_obj in result_objects]
test_targets = [target['test'] for target in targets]
test_labels = [np.argmax(test_target, axis=1) for test_target in test_targets]
test_labels_flat = list(itertools.chain(*test_labels))
try:
predictions = [fold_res.predictions for dataset_result_obj in result_objects for fold_res in dataset_result_obj]
except:
predictions = [dataset_result_obj.predictions for dataset_result_obj in result_objects]
test_predictions = [prediction['test'] for prediction in predictions]
test_predicted_labels = [np.argmax(test_prediction, axis=1) for test_prediction in test_predictions]
test_predicted_labels_flat = list(itertools.chain(*test_predicted_labels))
confusion_mat = confusion_matrix(y_true=test_labels_flat,
y_pred=test_predicted_labels_flat)
return confusion_mat
def compute_confusion_matrix_csp(result_objects):
test_labels = [r.multi_class.test_labels for r in result_objects]
# have to "chain" both folds and datasets thats why two times itertools chain
# to flatten the list
# TODELAY: in this case i guess number of folds always same so maybe just wrap with
# np array and then flatten? instead of itertools chain?
test_labels_flat = list(itertools.chain(*itertools.chain(*test_labels)))
test_predicted_labels = [r.multi_class.test_predicted_labels for r in result_objects]
test_predicted_labels_flat = list(itertools.chain(*itertools.chain(*test_predicted_labels)))
confusion_mat = confusion_matrix(y_true=test_labels_flat,
y_pred=test_predicted_labels_flat)
return confusion_mat
def extract_single_group_result_sorted(folder, params):
res = load_dataset_grouped_result_objects_for(folder, params=params)
assert len(res) == 1, "Assuming just one group result here"
res = res[0]
# sort by filename!!
res = sort_results_by_filename(res)
return res
def extract_single_group_misclasses_sorted(folder, params):
results = extract_single_group_result_sorted(folder, params)
misclasses = get_final_misclasses(results)
return misclasses
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-03-06 05:38
from __future__ import unicode_literals
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='student_data',
fields=[
('roll_no', models.CharField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('sem', models.IntegerField(default=0)),
('mobile', models.CharField(blank=True, max_length=10, null=True)),
('email', models.CharField(blank=True, max_length=200, null=True)),
('linkedin_url', models.CharField(blank=True, max_length=500, null=True)),
('github_url', models.CharField(blank=True, max_length=500, null=True)),
('photo', models.ImageField(default='media/default.png', upload_to='student_images/')),
('skill', tinymce.models.HTMLField()),
],
),
]
|
import numpy as np
import sys
def sub_partition(S):
types = np.unique(S)
tuple_number = choose(len(types),2)
sub_partitions = []
for i in types:
for j in types:
if i != j:
# remove the first ith and jth elements
S = [e for e in S]
# add (i+j)th element
sub_partitions = np.append(sub_partitions, new_p)
def get_partitions_of(num):
p = np.array([1 for i in xrange(num)])
types = np.unique(p)
while len(p) > 1:
if len(types) == 1:
p = np.append(p[:-2], sum(p[-2:]))
else:
# get the number of new elements that can be made
tuple_number = choose(len(types),2)
for i in xrange(tuple_number):
p =
def choose(n,k):
return factorial(n)/(factorial(n-k)*factorial(k))
def factorial(n):
if n == 1: return 1
else: return n*factorial(n-1)
if __name__ == '__main__':
main(int(sys.argv[1]))
|
"""
La idea es crear un script que tenga una funcion que me de Rc'\D' y R0'/D'
dandole como entrada beta, para toda i posible. Luego, dar un intervalo para
Rc/Ro y para R0/D y de la salida que me dio ver cuales valores de i dan un R0
y Rc que caigan en el intervalo, y guardar esos valores de i en un archivo, y
graficar despues
"""
#import libprop
import numpy as np
import argparse
import matplotlib.pyplot as plt
from scipy.optimize import bisect, leastsq
from equation6 import Shell
import json
from scipy.interpolate import interp1d
"""
* First step: Set interval for R0 and Rc
* Second step: Create the bowshock NA
* Third step: Measure Ro and Rc NA
* 2nd A step: Read the file with de R0-Rc curves
Repeat second A -and third step- for all possible inclinations (End of first test)
* Fourth step: Check if R0 and Rc fits into the interval (Possible results: Yes or No)
* Fifth step: if forurth is true, graph the corresponding b & i, if false, don't do anything
* Sixth step: Repeat from second to fifth step for many i's and b's
"""
"""
set interval:
For LV3, for example:
R0/D = 0.336 +- 1e-3
Rc/R0 = 2.0342262 +- 0.139
And so on for the rest of the proplyds
"""
shelldata = json.load(open("rc-r0.json"))
#input of observational measurements of R0/D
proplyd = ["LV2","LV3","LV4","LV5","177-341","167-328"]
color = ['r.','g.','b.','y.','c.','m.']
R0m = np.array([0.2385,0.336,0.188,0.2125,0.132,0.096])
#input of observational inputs for Rc and errorbars
Rcm = np.array([1.468,2.034,1.987,1.501,1.405,1.297])
Rcd = np.array([0.194,0.139,0.072,0.146,0.118,0.269])
ylow = Rcm-Rcd
yhigh = Rcm+Rcd
for j,p in enumerate(proplyd):
for model in shelldata.items():
uni_beta,radii = model
beta = float(uni_beta)
r0 = np.array(radii["R0'"])
rc = np.array(radii['Rc'])
inc = np.array(radii['inc'])
f = interp1d(r0,rc/r0)
g = interp1d(r0,inc)
# choose the matching radii with observations, supposing that we can neglect the errorbars in R0/D
# and checking if the interpolated Rc/R0 value matches with the y errorbar
try:
m1 = (f(R0m[j]) < yhigh[j]) & (f(R0m[j]) > ylow[j])
except ValueError:
continue
# if the m1 condition is satisfied, then plot the data, the y axis is beta and the
# x axis is the inclination. This only applies for LV3 so far
# print R0m,f(R0m),beta,ylow,yhigh,m1
if m1 == True:
plt.plot(np.degrees(g(R0m[j])),beta,color[j]) #How to set the legend just once?
#Add the data from GAH (2002)
obs_beta = [0.126,0.061,0.040,0.073,0.135]
obs_inc = [60,45,45,45,60]
del_inc = [7,15,15,15,7]
plt.errorbar(obs_inc,obs_beta,xerr=del_inc,fmt='ko')
plt.yscale('log')
plt.grid()
plt.xlim(0,90)
plt.ylim(0.001 - 1e-4,0.08 + 1e-4)
plt.xlabel("i(deg)")
plt.ylabel("beta")
plt.title("i vs beta plausible for proplyds")
plt.savefig("i-beta.pdf")
|
# -*- coding: utf-8 -*-
import numpy as np
import pylab as pl
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn import metrics
# Для воспроизводимости случайного выбора центроидов из множества
# объектов в случае вызова kmeans с параметром init='random'
np.random.seed(42)
zoo_data = np.loadtxt("data/nutrient.csv", delimiter=",")
data = scale(zoo_data[:,:zoo_data.shape[1]])
print data
n_samples, n_features = data.shape
print("n_samples %d, \t n_features %d"
% (n_samples, n_features))
print(79 * '_')
# # 'k-means++' выбирает начальные центры кластеров таким образом, что это
# # ускоряет сходимость метода
# basic_benchmark(KMeans(init='k-means++', n_clusters=n_classes, n_init=10),
# name="k-means++", data=data, labels=labels)
# # начальные центроиды выбираются случайно из объектов
# basic_benchmark(KMeans(init='random', n_clusters=n_classes, n_init=10),
# name="random", data=data, labels=labels)
# # K-means, инициализированный главными компонентами данных
# pca = PCA(n_components=n_classes).fit(data)
# basic_benchmark(KMeans(init=pca.components_, n_clusters=n_classes, n_init=1),
# name="PCA-based",
# data=data, labels=labels)
###############################################################################
# Визуализация двух выделенных компонент (с помощью PCA) в данных
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans_3clust = KMeans(init='k-means++', n_clusters=3, n_init=50)
kmeans_3clust.fit(data)
print kmeans_3clust.labels_
print(metrics.silhouette_score(data, kmeans_3clust.labels_,
metric='euclidean'))
kmeans_4clust = KMeans(init='k-means++', n_clusters=4, n_init=30)
kmeans_4clust.fit(data)
print kmeans_4clust.labels_
print(metrics.silhouette_score(data, kmeans_4clust.labels_,
metric='euclidean'))
# Сетка с( шагом h в прямоугольнике [x_min, x_max][y_min, y_max]
# для раскрашивания в соответствии с меткой класса
h = .01
x_min, x_max = reduced_data[:, 0].min(), reduced_data[:, 0].max()
y_min, y_max = reduced_data[:, 1].min(), reduced_data[:, 1].max()
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Z - вектор меток класса для узлов сетки
Z = kmeans_4clust.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.figure(1)
pl.clf()
# Метки классов используются для окраски фона
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=pl.cm.Paired,
aspect='auto', origin='lower')
# Отображение точек, соответствующих двум главным компонентам
# начальных данных
pl.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# центроиды отображаются белыми крестами
centroids = kmeans_4clust.cluster_centers_
pl.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
pl.title('K-means clustering on the zoo dataset (PCA-reduced data)\n'
'Centroids are marked with white crosses.')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
|
import numpy as np
import math
# from sklearn.linear_model import LinearRegression
from curvature import calcurvature
from scipy.optimize import curve_fit
# prev_leftdy = 0
# prev_rightdy = 0
# prev_leftc = 0
# prev_rightc = 0
def curve(left_lane, right_lane):
xleft_plot = np.arange(5,40,0.01).reshape(-1,1)
xright_plot = np.arange(5,40,0.01).reshape(-1,1)
if len(left_lane) < 10 and len(right_lane) < 10:
leftdy, leftc = 0,2
rightdy, rightc = 0,-2
yleft_plot = np.ones(3500)*2
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-2)
yright_plot = yright_plot.reshape(-1,1)
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
elif len(left_lane)< 10:
left_r, left_center, direction = calcurvature(left_lane)
# left_center = direction*left_r +1.5
right_r = left_r+direction*3
right_center = left_center
if 20 <right_r:
yright_plot = curve_equation(xright_plot, right_fit).reshape(-1,1)
yleft_plot = yright_plot+3
left_fit = [left_r,left_center, left_direction]
right_fit = [right_r,right_center, right_direction]
else:
yleft_plot = np.ones(3500)*2
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-2)
yright_plot = yright_plot.reshape(-1,1)
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
elif len(right_lane)<10:
left_r, left_center, direction = calcurvature(left_lane)
# left_center = direction*left_r +1.5
right_r = left_r+direction*3
# right_center = direction*right_r -1.5
right_center = left_center
if 20 <left_r:
yleft_plot = curve_equation(xleft_plot, left_fit).reshape(-1,1)
yright_plot = yleft_plot+3
left_fit = [left_r,left_center, left_direction]
right_fit = [right_r,right_center, right_direction]
else:
yleft_plot = np.ones(3500)*2
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-2)
yright_plot = yright_plot.reshape(-1,1)
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
else:
left_r, left_center, left_direction = calcurvature(left_lane)
# left_center = left_direction*left_r +1.5
right_r, right_center, right_direction = calcurvature(right_lane)
# right_center = right_direction*right_r -1.5
left_fit = [left_r,left_center, left_direction]
right_fit = [right_r,right_center, right_direction]
if 20 <left_r and 20 <right_r:
yleft_plot = curve_equation(xleft_plot, left_fit).reshape(-1,1)
yright_plot = curve_equation(xright_plot, right_fit).reshape(-1,1)
left_fit = [left_r,left_center, left_direction]
right_fit = [right_r,right_center, right_direction]
elif 20 <left_r:
yleft_plot = curve_equation(xleft_plot, left_fit).reshape(-1,1)
yright_plot = yleft_plot+3
left_fit = [left_r,left_center, left_direction]
right_fit = [right_r,right_center, right_direction]
elif 20 <right_r:
yright_plot = curve_equation(xright_plot, right_fit).reshape(-1,1)
yleft_plot = yright_plot+3
left_fit = [left_r,left_center, left_direction]
right_fit = [right_r,right_center, right_direction]
else:
yleft_plot = np.ones(3500)*2
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-2)
yright_plot = yright_plot.reshape(-1,1)
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
# print(yleft_plot)
left_lane = np.append(xleft_plot,yleft_plot,axis =1)
right_lane = np.append(xright_plot,yright_plot,axis =1)
# print(right_lane)
print('r: ',right_r)
print(left_r)
return left_lane, right_lane, left_fit, right_fit
def line_equation(x,line_fit):
line_dy = line_fit[1]
line_c = line_fit[2]
y = (x-line_c)/line_dy
return y
def curve_equation(x,curve_fit):
x = np.array(x)
r = curve_fit[0]
center = curve_fit[1]
direction = curve_fit[2]
y = center-direction*(r**2-x**2)**0.5
return y
def circle(y,a,b,r):
return (r**2-(y-b)**2)**0.5 +a
def invadeROI(point, left_fit, right_fit):
if left_fit[0] == False:
y_left = line_equation(point, left_fit)
y_right = line_equation(point, right_fit)
else:
left_r = left_fit[1]
left_center = left_fit[2]
left_direction = left_fit[3]
right_r= right_fit[1]
right_center = right_fit[2]
right_direction = right_fit[3]
y_left = curve_equation(point, left_fit)
y_right = curve_equation(point, right_fit)
if point[1]>y_left and point[1]<y_right: invade = True
else: invade = False
return invade
# def roi_box(left_lane, right_lane, line1_fit, line2_fit):
# line1pred = line1_fit.predict(left_lane[:,0]).reshape([len1,1])
# line2pred = line2_fit.predict(right_lane[:,0]).reshape([len2,1])
# left_max = left_lane[:][np.argmax(line1pred),:2]
# left_min = left_lane[:][np.argmin(line1pred),:2]
# left_min = left_lane[:][np.argmin(line1pred),:2]
|
#!/usr/bin/env python3
"""
Analysis class to read a ROOT TTree of MC track information
and do jet-finding, and save response histograms.
Author: James Mulligan (james.mulligan@berkeley.edu)
"""
from __future__ import print_function
# General
import os
import sys
import argparse
# Data analysis and plotting
import numpy as np
import ROOT
import yaml
from array import *
# Fastjet via python (from external library heppy)
import fastjet as fj
import fjcontrib
import fjtools
# Analysis utilities
from pyjetty.alice_analysis.process.base import process_io
from pyjetty.alice_analysis.process.base import process_io_emb
from pyjetty.alice_analysis.process.base import jet_info
from pyjetty.alice_analysis.process.user.substructure import process_mc_base
from pyjetty.alice_analysis.process.base import thermal_generator
from pyjetty.mputils import CEventSubtractor
################################################################
class ProcessMC_jet_axis(process_mc_base.ProcessMCBase):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, input_file='', config_file='', output_dir='', debug_level=0, **kwargs):
# Initialize base class
super(ProcessMC_jet_axis, self).__init__(input_file, config_file, output_dir, debug_level, **kwargs)
self.observable = self.observable_list[0]
#---------------------------------------------------------------
# Initialize histograms
#---------------------------------------------------------------
def initialize_user_output_objects_R(self, jetR):
for i, axes in enumerate(self.obs_settings[self.observable]):
grooming_setting = self.obs_grooming_settings[self.observable][i]
if grooming_setting:
grooming_label = self.utils.grooming_label(grooming_setting)
else:
grooming_label = ''
name = 'hResidual_JetPt_{}_R{}_{}{}'.format(self.observable, jetR, axes, grooming_label)
h = ROOT.TH2F(name, name, 300, 0, 300, 100, -1*jetR, jetR)
h.GetXaxis().SetTitle('p_{T,truth}')
h.GetYaxis().SetTitle('#frac{#DeltaR_{det}-#DeltaR_{truth}}{#DeltaR_{truth}}')
setattr(self, name, h)
# Create THn of response for jet axis deltaR
dim = 4;
title = ['p_{T,det}', 'p_{T,truth}', '#DeltaR_{det}', '#DeltaR_{truth}']
nbins = [30, 30, 80, 40]
min = [0., 0., 0., 0.]
max = [150., 300., jetR, jetR]
name = 'hResponse_JetPt_{}_R{}_{}{}'.format(self.observable, jetR, axes, grooming_label)
self.create_thn(name, title, dim, nbins, min, max)
#---------------------------------------------------------------
# Calculate the WTA jet given a 'standard' jet
#---------------------------------------------------------------
def get_wta_jet(self, jet, jetR):
# Recluster with WTA (with larger jet R)
jet_def_wta = fj.JetDefinition(fj.cambridge_algorithm, 2*jetR)
jet_def_wta.set_recombination_scheme(fj.WTA_pt_scheme)
if self.debug_level > 3:
print('WTA jet definition is:', jet_def_wta)
reclusterer_wta = fjcontrib.Recluster(jet_def_wta)
return reclusterer_wta.result(jet)
#---------------------------------------------------------------
# Calculate the observable given a jet
#---------------------------------------------------------------
def calculate_observable(self, observable, jet, jet_groomed_lund,
jetR, obs_setting, grooming_setting, obs_label, jet_pt_ungroomed):
if obs_setting == 'Standard_SD':
return jet.delta_R(jet_groomed_lund.pair())
elif obs_setting == 'Standard_WTA':
return jet.delta_R(self.get_wta_jet(jet, jetR))
elif obs_setting == 'WTA_SD':
return jet_groomed_lund.pair().delta_R(self.get_wta_jet(jet, jetR))
# Should not be any other observable
raise ValueError("Observable %s not implemented" % observable)
#---------------------------------------------------------------
# This function is called once for each jet subconfiguration
# Fill 2D histogram of (pt, obs)
#---------------------------------------------------------------
def fill_observable_histograms(self, observable, hname, jet, jet_groomed_lund,
jetR, obs_setting, grooming_setting, obs_label, jet_pt_ungroomed):
deltaR = self.calculate_observable(observable, jet, jet_groomed_lund,
jetR, obs_setting, grooming_setting, obs_label, jet_pt_ungroomed)
getattr(self, hname.format(observable, jetR, obs_label)).Fill(jet_pt_ungroomed, deltaR)
#---------------------------------------------------------------
# Fill matched jet histograms
#---------------------------------------------------------------
def fill_matched_jet_histograms(self, observable, jet_det, jet_det_groomed_lund,
jet_truth, jet_truth_groomed_lund, jet_pp_det, jetR, obs_setting,
grooming_setting, obs_label, jet_pt_det_ungroomed, jet_pt_truth_ungroomed,
R_max, suffix, **kwargs):
deltaR_det = self.calculate_observable(observable, jet_det, jet_det_groomed_lund,
jetR, obs_setting, grooming_setting, obs_label, jet_pt_det_ungroomed)
deltaR_truth = self.calculate_observable(observable, jet_truth, jet_truth_groomed_lund,
jetR, obs_setting, grooming_setting, obs_label, jet_pt_truth_ungroomed)
# Fill response
self.fill_response(observable, jetR, jet_pt_det_ungroomed, jet_pt_truth_ungroomed,
deltaR_det, deltaR_truth, obs_label, R_max, prong_match = False)
##################################################################
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description='Process MC')
parser.add_argument('-f', '--inputFile', action='store',
type=str, metavar='inputFile',
default='AnalysisResults.root',
help='Path of ROOT file containing TTrees')
parser.add_argument('-c', '--configFile', action='store',
type=str, metavar='configFile',
default='config/analysis_config.yaml',
help="Path of config file for analysis")
parser.add_argument('-o', '--outputDir', action='store',
type=str, metavar='outputDir',
default='./TestOutput',
help='Output directory for output to be written to')
# Parse the arguments
args = parser.parse_args()
print('Configuring...')
print('inputFile: \'{0}\''.format(args.inputFile))
print('configFile: \'{0}\''.format(args.configFile))
print('ouputDir: \'{0}\"'.format(args.outputDir))
# If invalid inputFile is given, exit
if not os.path.exists(args.inputFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.inputFile))
sys.exit(0)
# If invalid configFile is given, exit
if not os.path.exists(args.configFile):
print('File \"{0}\" does not exist! Exiting!'.format(args.configFile))
sys.exit(0)
analysis = ProcessMC_jet_axis(input_file=args.inputFile, config_file=args.configFile, output_dir=args.outputDir)
analysis.process_mc()
|
def search(budget,prices):
return ','.join([str(x) for x in sorted(prices) if x<=budget])
'''
You love coffee and want to know what beans you can afford to buy it.
The first argument to your search function will be a number which represents your budget.
The second argument will be an array of coffee bean prices.
Your 'search' function should return the stores that sell coffee within your budget.
The search function should return a string of prices for the coffees beans you can afford.
The prices in this string are to be sorted in ascending order.
'''
|
from typing import Iterable, List
from parseridge.corpus.sentence import Sentence
from parseridge.corpus.signature import Signature
from parseridge.utils.helpers import Relation, T
from parseridge.utils.logger import LoggerMixin
class Relations(LoggerMixin):
def __init__(self, sentences: List[Sentence]):
relations = set()
for sentence in sentences:
for token in sentence:
if token.relation:
relations.add(token.relation)
relations = list(sorted(relations))
self.label_signature = Signature[str](warn_on_oov=True)
self.signature = Signature[str](warn_on_oov=True)
self.labels = [Relation(T.SHIFT, None), Relation(T.SWAP, None)]
self.label_signature.add(Relation(T.SHIFT, None))
self.label_signature.add(Relation(T.SWAP, None))
for relation in relations:
self.signature.add(relation)
self.labels.append(Relation(T.LEFT_ARC, relation))
self.label_signature.add(Relation(T.LEFT_ARC, relation))
self.labels.append(Relation(T.RIGHT_ARC, relation))
self.label_signature.add(Relation(T.RIGHT_ARC, relation))
self.slices = {
T.SHIFT: slice(0, 1),
T.SWAP: slice(1, 2),
T.LEFT_ARC: slice(2, len(self.labels), 2),
T.RIGHT_ARC: slice(3, len(self.labels), 2),
}
@property
def relations(self) -> Iterable[str]:
return self.signature.get_items()
def __len__(self) -> int:
return len(self.labels)
|
import pathlib
print(pathlib.Path(__file__).resolve().parents[3])
|
from django.contrib import admin
from products.models import *
admin.site.register(Vendor)
admin.site.register(Category)
admin.site.register(Product)
admin.site.register(Page)
|
import sys
# we need this hack to pass a static library
# from command line
last_opt = sys.argv[-1]
static_libs = [last_opt] if last_opt != "GUARD" else []
sys.argv = sys.argv[:-1]
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
include_dirs = ['/Users/nathan/Code/RingDecomposerLib/src/RingDecomposerLib']
library_dirs = []
shared_libs = []
extensions = [
Extension('py_rdl.wrapper.DataInternal',
['/Users/nathan/Code/RingDecomposerLib/src/python/py_rdl/wrapper/DataInternal.pyx'],
extra_objects=static_libs,
include_dirs=include_dirs,
libraries=shared_libs,
library_dirs=library_dirs)]
# we need this hack for windows, because that path has to be native
# (by THIS path must be native porbably no one will ever know...)
# and \ must be escaped for python
native_path = r'/Users/nathan/Code/RingDecomposerLib/src/python'
setup(name = 'py_rdl',
ext_modules = cythonize(extensions),
package_dir={'' : native_path},
packages=['py_rdl', 'py_rdl.wrapper'])
|
print("Muhammad Saad Hasan\n18B-117-CS\nPractice Problem 4.4")
def even(num):
for x in range(2,num):
if x%2==0 or x%3==0:
print(x,end=", ")
user = int(input("Please enter a number :"))
even(user)
|
import numpy as np
from isolation_forest.node import Node
from isolation_forest.tree import Tree
class TreeGrowerBasic:
def __init__(self, X, sample_size):
self.X = X
self.dim_cnt = self.X.shape[1]
self.sample_size = sample_size
self.indeces = np.arange(0, self.X.shape[0])
self.X_samples = []
def make_train_datasets(self, ds_cnt):
for i in range(ds_cnt):
self.X_samples.append(self.make_sample_dataset())
def make_empty_tree(self):
return Tree()
def grow_forest(self, tree_cnt, limit):
self.make_train_datasets(tree_cnt)
trees = [self.make_tree(X_sample, limit) for X_sample in self.X_samples]
return trees
def regrow_trees(self, limit = None):
if (limit is None):
limit = int(np.ceil(np.log2(self.sample_size)))
trees = []
for i in range(len(self.X_samples)):
self.grown_tree = Tree()
self.grown_tree.root = self.recursively_grow(self.X_samples[i], 0, limit)
trees.append(self.grown_tree)
return trees
def make_tree(self, X_sample = None, limit = None):
if (limit is None):
limit = int(np.ceil(np.log2(self.sample_size)))
if (X_sample is None):
X_sample = self.make_sample_dataset()
self.X_samples.append(X_sample)
self.grown_tree = self.make_empty_tree()
self.grown_tree.root = self.recursively_grow(X_sample, 0, limit)
return self.grown_tree
def recursively_grow(self, X, tree_depth,depth_limit):
self.grown_tree.tree_depth = tree_depth
if tree_depth >= depth_limit or len(X) <= 1:
self.grown_tree.exnodes += 1
return Node(X, self.grown_tree.rot_op, self.grown_tree.border, tree_depth,
left = None, right = None, node_type = 'exNode' )
else:
self.grown_tree.rot_op = self.get_rot_operator(X)
X_rot = X[:,self.grown_tree.rot_op]
if X_rot.min()==X_rot.max():
self.grown_tree.exnodes += 1
return Node(X, self.grown_tree.rot_op, self.grown_tree.border,
tree_depth, left = None, right = None, node_type = 'exNode' )
self.grown_tree.border = self.get_border(X_rot)
w = np.where(X_rot <= self.grown_tree.border,True,False)
return Node(X, self.grown_tree.rot_op, self.grown_tree.border, tree_depth,\
left=self.recursively_grow(X[w,:], tree_depth+1, depth_limit),\
right=self.recursively_grow(X[~w,:],tree_depth+1,depth_limit), node_type = 'inNode' )
def make_sample_dataset(self):
selected_indeces = np.random.choice(self.indeces, self.sample_size, replace = False)
return self.X[selected_indeces,:]
def getDataTransformer(self):
return None
#methods to be redefined in the child classes - we can modify the axis selection and split value generation
def get_rot_operator(self, X):
index = int(np.random.choice(np.arange(0, self.dim_cnt)))
return index
def get_border(self, X):
min_val = min(X)
max_val = max(X)
return np.random.rand()*(max_val - min_val) + min_val
|
from unittest.case import TestCase
from pythonbrasil.lista_2_estrutura_de_decisao.ex_06_mostrar_maior_de_tres_numeros import obter_maior_numero
class ObterMaiorNumeroTests(TestCase):
def test_todos_numeros_iguais(self):
self.assertEqual(1, obter_maior_numero(1, 1, 1))
self.assertEqual(5, obter_maior_numero(5, 5, 5))
self.assertEqual(10, obter_maior_numero(10, 10, 10))
def test_dois_numeros_iguais(self):
self.assertEqual(10, obter_maior_numero(10, 10, 1))
self.assertEqual(10, obter_maior_numero(1, 10, 10))
self.assertEqual(10, obter_maior_numero(10, 1, 10))
def test_primeiro_numero_maior(self):
self.assertEqual(10, obter_maior_numero(10, 5, 2))
self.assertEqual(10, obter_maior_numero(10, 2, 5))
self.assertEqual(10, obter_maior_numero(10, 5, 5))
def test_segundo_numero_maior(self):
self.assertEqual(10, obter_maior_numero(5, 10, 2))
self.assertEqual(10, obter_maior_numero(2, 10, 5))
self.assertEqual(10, obter_maior_numero(5, 10, 5))
def test_terceiro_numero_maior(self):
self.assertEqual(10, obter_maior_numero(5, 2, 10))
self.assertEqual(10, obter_maior_numero(2, 5, 10))
self.assertEqual(10, obter_maior_numero(5, 5, 10))
|
import datetime
x = datetime.datetime.now()
print(x) # date present tanggal dan jam second dari system computer
print(x.year) # cetak tahun saat ini
print(x.month) # cetak bulan saat ini
a = datetime.datetime(2020, 6, 4)
print(a)
print(a.month)
print(x.strftime("%B")) # string dari nama month
print(x.strftime("%A")) # string dari nama hari
print(x.strftime("%a")) # string dari nama hari (singkat)
|
t1 = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
t2 = [ ' Janvier ' , ' Février ' , ' Mars ' , ' Avril ' , ' Mai ' , ' Juin ' ,
' Juillet ' , ' Août ' , ' Septembre ' , 'Octobre ' , ' Novembre ' , ' Décembre ' ]
a = 0
t3 = []
while a < len(t1):
t3.append(t2[a])
t3.append(t1[a])
a = a+1
print(t3)
|
import flask
from flask import Flask
import sklearn.externals
import joblib
from flask import render_template, request
import numpy as np
app = Flask(__name__)
model = joblib.load('model.pkl')
@app.route("/")
def home():
return render_template('Iris.html')
@app.route('/predict', methods=['POST'])
def make_prediction():
if request.method=='POST':
f1 = request.form['f1']
f2 = request.form['f2']
f3 = request.form['f3']
f4 = request.form['f4']
feature_array = [f1,f2,f3,f4]
feature = np.asarray(feature_array, dtype='float64').reshape(1,-1)
prediction = model.predict(feature)
if prediction == 0:
prediction='Iris-setosa'
elif prediction == 1:
prediction='Iris-versicolor'
else:
prediction='Iris-virginica'
return render_template('Iris.html', prediction='{}'.format(prediction))
if __name__ == '__main__':
app.run(debug=True)
|
from inspect import stack # To write log
import matter_lib as matt
import structure as sct # Structure objects hierarchy
log_file_heading = 'File: '+__file__.split('/')[-1]+' '
class MRI_inspect(object):
files_MRI_path = ''
files_MRI_names = []
matter_obj_list = []
related_blob_list = []
imobj_slice_obj_list = []
def __init__(self, log):
self.log = log
self.log_class_heading = log_file_heading+"Class: "\
+self.__class__.__name__
log_method_heading = self.log_class_heading+" Method: "+stack()[0][3]\
+" ::: "
self.log.write_log("log",log_method_heading+"Initialization called")
self.struct = sct.Structure(self.log)
def set_files_path(self, files_path):
'''
Stores as an attribute the path Magnetic Resonance Image files are
stored in.
Input parameters:
files_path: <str>
Path to MRI directory.
/path/to/directory/
Updates object atributes:
files_MRI_path: <str>
Path to MRI directory
'''
self.files_MRI_path = files_path
def set_files_names(self, files_names):
'''
Stores as an attribute the MRI file names.
Input parameters:
files_names: <list>
List with the names of files.
['filename.extension',...]
Updates object atributes:
files_MRI_names: <list>
List with the names of files.
'''
self.files_MRI_names = files_names
def make_matter_objs(self, filespath=None, filesnames=None):
'''
Stores as an attribute the list resulting of using the files pointed by
'filespath' and 'filesnames' to create objects of class 'Matter'.
By default takes self object attributes.
Input parameters:
filespath: <str>
Path to files directory
/path/to/directory/
filesnames: <list>
List with the names of files.
['filename.extension',...]
Updates object atributes:
matter_obj_list: <list>
List of objects of class 'Matter'.
'''
if filespath is None:
filespath = self.files_MRI_path
if filesnames is None:
filesnames = self.files_MRI_names
self.matter_obj_list = self.struct.create_matter_objs(filespath, filesnames)
# def essay_matter_access(self, matters_to_name):
# '''
# Description.
#
# Parameters:
# name: <classtype>
# description
#
# Returns:
# name: <classtype>
# description
# '''
#
# raw_input('matters_to_name: type: {}'.format(type(matters_to_name)))
#
# for matter in matters_to_name:
# if matter.name=='I3TWM':
# i3twm_obj = matter
# if matter.name=='I3TGM':
# i3tgm_obj = matter
# if matter.name=='I3TCSF':
# i3tcsf_obj = matter
# if matter.name=='I3T':
# i3t_obj = matter
#
# raw_input('i3twm_obj: type: {}'.format(type(i3twm_obj)))
# raw_input('i3tgm_obj: type: {}'.format(type(i3tgm_obj)))
# raw_input('i3tcsf_obj: type: {}'.format(type(i3tcsf_obj)))
# raw_input('i3t_obj: type: {}'.format(type(i3t_obj)))
#
# return i3twm_obj, i3tgm_obj, i3tcsf_obj, i3t_obj
def make_imobj_slice_objects(self, matter_objs=None):
'''
Stores as an attribute the list resulting of using the 'Matter' objects
pointed by 'matter_objs' to create objects of class 'ImageObjectSlice'.
Each object is conformed by the slices of all matters and there are as
many objects as slices are extracted from each matter.
By default takes self object attributes.
Parameters:
matter_objs: <list>
List of objects of class 'Matter'
Updates object atributes:
imobj_slice_obj_list: <list>
List of objects of class 'ImageObjectSlice'
'''
if matter_objs is None:
matter_objs = self.matter_obj_list
self.imobj_slice_obj_list = self.struct.create_imobj_slice_objs(matter_objs)
|
n,m = map(int,input().split())
arr = []
first = []
second = []
for k in range(n):
a = list(map(int,input().split()))[:m]
arr.append(a)
for i in range(n):
for j in range(m):
if arr[i][j] == 1:
x = i+1
y = j+1
for j in range(2):
print(x,y)
'''for i in range(n):
for j in range(m):
print(arr[i][j],end=' ')
print()'''
|
import unittest
from header import Header
from message import Message
from set import Set
class TestList(unittest.TestCase):
def test_init(self):
for t in 'bBhHiIqQfds':
Set(t)
Set('m', Message)
Set('m', Header)
self.assertRaises(TypeError, lambda: Set('x'))
self.assertRaises(TypeError, lambda: Set('m'))
def test_add(self):
s = Set('b')
s.add(23)
self.assertRaises(ValueError, s.add, 129)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.1.1 on 2018-09-20 07:59
import django.contrib.postgres.fields.jsonb
import django.core.serializers.json
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('manager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='manager',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True),
),
migrations.AlterField(
model_name='resource',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True),
),
]
|
from datetime import date
from selenium.webdriver.support.ui import Select
from .base import FunctionalTest
DAYNUMS = {'mandag': 1, 'tirsdag': 2, 'onsdag': 3, 'torsdag': 4, 'fredag': 5, 'lørdag': 6, 'søndag': 7}
class NewVisitorTest(FunctionalTest):
def assert_elements_in_table(self, elements):
table = self.browser.find_element_by_id('id_meal_table')
cols = [row.find_elements_by_tag_name('td') for row in
table.find_elements_by_tag_name('tr')]
entries = [e.text for c in cols for e in c]
for element in elements:
self.assertIn(element, entries)
def enter_meal(self, day, meal, meal_name):
table = self.browser.find_element_by_id('id_meal_table')
row = table.find_elements_by_tag_name('tr')[DAYNUMS[day]]
input_name = '{}_{}'.format(day, meal)
row.find_element_by_css_selector('input[name={}]'.format(input_name)).send_keys('{}\n'.format(meal_name))
def test_can_view_meals_planned(self):
# Line finner frem siden vår
self.browser.get(self.live_server_url)
# Hun ser at header og title nevner denne ukes matplan
self.assertIn('Matplan',self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
current_week = date.today().isocalendar()[1]
self.assertIn('Matplan for uke {}'.format(current_week), header_text)
# Hun finner en tabell med overskrifter for dag, lunsj og middag.
self.assert_elements_in_table(['Dag', 'Lunsj', 'Middag'])
# Ingen måltider er fyllt inn, men hun kan legge til en rett ved å
# velge fra en nedtrekksmeny. Hun prøver å legge til lunsj for mandag
self.assert_elements_in_table(['Mandag'])
self.enter_meal('mandag', 'lunch', 'Salat')
# Siden oppdateres, og tabellen viser nå en link til oppskriften
# istedenfor nedtrekksmenyen
self.assert_elements_in_table(['Salat'])
# Hun finner fortsatt en nedtrekksmeny for middagen
self.enter_meal('mandag', 'dinner', 'Pizza')
# Siden oppdateres igjen, og tabellen viser nå linker til både middag of lunsj
self.assert_elements_in_table(['Salat', 'Pizza'])
# Fornøyd med at denne ukes måltid er planlagt ser hun at det er mulig å planlegge frem i
# tid. Hun klikker seg frem til neste uke
self.browser.find_element_by_id('id_next_week_link').click()
# Tittelen viser nå riktig uke
next_week = date.today().isocalendar()[1] + 1
self.assertIn(
'Matplan for uke {}'.format(next_week),
header_text)
# Hun legger inn en lunsj, og ser at siden oppdateres
self.enter_meal('mandag', 'lunch', 'Omelett')
self.assert_elements_in_table(['Omelett'])
# Hun klikker seg tilbake til den inneværende uke, og ser at måltidene hun la inn fortsatt
# er der
self.browser.find_element_by_id('id_previous_week_link').click()
self.assert_elements_in_table(['Salat', 'Pizza'])
|
# -*- coding: utf-8 -*-
from mod_base import *
def get_mimetype(url):
import httplib
from urlparse import urlparse
urlobj = urplarse(url)
domain = urlobj.netloc
get = urlobj.path + urlobj.query
conn = httplib.HTTPConnection(domain, timeout=5)
conn.request("HEAD", get)
res = conn.getresponse()
headers = res.getheaders()
content_type = False
for item in headers:
if item[0] == 'content-type':
content_type = item[1].split(";")[0].strip()
break
return content_type
def find_title(data):
title = False
try: # Try to use HTMLParser
from HTMLParser import HTMLParser
class TitleParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.title = ""
self.in_title = False
def handle_starttag(self, tag, attrs):
if tag == "title":
self.in_title = True
def handle_endtag(self, tag):
if self.in_title:
self.in_title = False
self.title = self.title.replace("\n", "")
self.title = self.title.replace("\r", "")
def handle_data(self, data):
if self.in_title:
self.title += data
# instantiate the parser and fed it some HTML
parser = TitleParser()
parser.feed(data)
title = parser.title or false
except: # Fall back on regex
titleRE = re.compile("<title>(\\s.*?)</title>", re.IGNORECASE)
the_title = titleRE.findall(data)
if the_title:
title = the_title[0]
if title:
return title.strip()
return False
# Get <title> of web page
def get_url_title(url, logf):
ignore_ext = ["jpg", "png", "gif", "tiff", "psd", "zip", "rar", "sh"]
if url[-3:] in ignore_ext:
logf("Invalid extension.")
return False
# Check that the the resource content type is something relevant
try:
content_type = get_content_type(url)
if content_type and not content_type in ["text/html", "text/xhtml", "text/plain"]:
logf("Invalid content-type.")
return False
except:
pass
try:
if urllib2 != False:
u = urllib2.urlopen(url, timeout=5)
else:
u = urllib.urlopen(url)
except:
return False
if u.getcode() != 200: # Only proceed if request is ok
logf("Invalid response code.")
return False
# Read max 50 000 bytes to avoid Out of Memory
data = u.read(50000)
#logf(data)
try:
data = data.decode("utf-8")
except:
pass
logf("Got url data.")
title = find_title(data)
if not title:
return False
try:
import HTMLParser
hp = HTMLParser.HTMLParser()
title = hp.unescape(title)
except Exception, e:
title = title.replace("ä", u"ä")
title = title.replace("ö", u"ö")
title = title.replace("&", u"&")
return title
class Url(Listener):
"""Find links in messages and post link titles when found."""
def init(self):
self.events = [IRC_EVT_MSG, IRC_EVT_CHAN_MSG]
def event(self, event):
urls = find_urls(event.msg)
#<<<<<<< HEAD
# if urls != []:
# title = get_url_title_new(urls[0])
#=======
titles = []
for url in urls:
title = get_url_title(url, self.Log)
#>>>>>>> 71c025ceeee47d288c62f986b24b8df6ec022f0a
if title:
titles.append('"'+title+'"')
if titles:
event.win.Send(", ".join(titles))
module = {
"class": Url,
"type": MOD_LISTENER,
"zone": IRC_ZONE_BOTH,
}
|
from .Fraction import Fraction
class _Real:
def __init__(self, value):
if type(value) != int:
if type(value) != float:
if type(value) != Fraction:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
raise ValueError("invalid literal for real: {0}".format(str(value)))
else:
float(Fraction)
self.value = value
def __abs__(self):
return abs(self.value)
def __add__(self, other):
return self.value + other
def __sub__(self, other):
return self.value - other
def __mul__(self, other):
return self.value * other
def __div__(self, other):
return self.value / other
def __mod__(self, other):
return self.value % other
def __pow__(self, power, modulo=None):
return pow(self.value, power, modulo)
def __cmp__(self, other):
if type(other) == Real:
if self.value > other.value:
return 1
elif self.value == other.value:
return 0
else:
return -1
else:
try:
other = Real(other)
except ValueError:
raise ValueError(other + " is not of the correct type")
if self.value > other.value:
return 1
elif self.value == other.value:
return 0
else:
return -1
Real = type("Real", [_Real, object], {})
|
from __future__ import division
import sys
import csv
import pandas as pd
import numpy as np
import re
import math
"""
this script analyze cabin and print some result in %
"""
"""
iter on dataframe :
for index,row in new_csv.iterrows():
write in csv :
new_csv.to_csv('data/train_extract.csv',mode = 'w', index=False)
"""
if __name__ == '__main__':
from_csv = pd.read_csv('data/train.csv')
new_csv = from_csv
new_csv = new_csv.dropna(subset= ['Cabin'])
for index,row in new_csv.iterrows():
new_csv.ix[index,'Cabin'] = row['Cabin'][0:1]
all_per_cabin = new_csv.Cabin.value_counts()
new_csv = new_csv[new_csv.Survived == 1]
s_per_cabin = new_csv.Cabin.value_counts()
print s_per_cabin/all_per_cabin
|
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import vggish_params as params
from network import ClassifyNet, Vggish, generator, discriminator
from vggish_datasets import MNIST
import pdb
mode_save_path = './save_models'
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def load_weight(model, model_path):
own_state = model.state_dict()
save_state_dict = torch.load(model_path)
save_state = save_state_dict['shared_layers']
load_state = {}
for name in save_state.keys():
if 'vggish' in name:
new_name = name[7:]
load_state[new_name] = save_state.pop(name)
model.load_state_dict(load_state, strict=False)
def train(vggish_model, class_model, G_model, D_model, train_loader, optimizer, epoch):
vggish_model.train()
class_model.train()
G_model.train()
criterion = nn.BCELoss().cuda()
for batch_idx, (data, target) in enumerate(train_loader):
data = data.to(device)
target = torch.squeeze(target).to(device)
batch_size = data.size(0)
label_real = torch.ones(batch_size).to(device)
output = vggish_model(data)
G_result = G_model(output.view(-1, 100, 1, 1))
output = class_model(G_result)
cls_loss = F.cross_entropy(output, target)
loss = cls_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{}]\tLoss: {:.6f}'.format(
epoch, batch_idx, len(train_loader), loss.item()))
def test(vggish_model, class_model, G_model, test_loader, epoch):
vggish_model.eval()
class_model.eval()
G_model.eval()
with torch.no_grad():
test_loss = 0
correct = 0
for data, target in test_loader:
data = data.to(device)
target = torch.squeeze(target).to(device)
output = vggish_model(data)
G_result = G_model(output.view(-1, 100, 1, 1))
img_result = G_result
output = class_model(img_result)
test_loss += F.cross_entropy(output, target).item()
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
n_rows = n_cols = 8
is_gray=True
fig, axes = plt.subplots(n_rows, n_cols, figsize=(8, 8))
for ax, img in zip(axes.flatten(), G_result):
ax.axis('off')
ax.set_adjustable('box-forced')
if is_gray:
img = img.cpu().data.view(64, 64).numpy()
ax.imshow(img, cmap='gray', aspect='equal')
else:
img = (((img - img.min()) * 255) / (img.max() - img.min())).cpu().data.numpy().transpose(1, 2, 0).astype(np.uint8)
ax.imshow(img, cmap='None', aspect='equal')
plt.subplots_adjust(wspace=0, hspace=0)
title = 'Epoch {0}'.format(epoch)
fig.text(0.5, 0.04, title, ha='center')
plt.savefig(os.path.join('./results', 'train_all_emnist_mnist_epoch_{}.png'.format(epoch)))
plt.close()
return 100. * correct / len(test_loader.dataset)
import torchvision
import os, time
if __name__ == '__main__':
torchvision.datasets.MNIST(root='./data',download=True)# add for download mnist
train_loader = torch.utils.data.DataLoader(
MNIST(root='./data/mnist', train=True),
batch_size=128, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
MNIST(root='./data/mnist', train=False),
batch_size=128, shuffle=False, num_workers=4, pin_memory=True)
vggish_model = Vggish().to(device)
load_weight(vggish_model, './save_models/vggish_mnist_best.pth')
class_model = ClassifyNet().to(device)
class_model.load_state_dict(torch.load('./save_models/classify_mnist_best.pth')['shared_layers'])
G_model = generator(out_size=3).to(device)
G_model.load_state_dict(torch.load('./save_models/emnist_G_best.pth.tar'))
D_model = discriminator(in_size=3, ndf=128).to(device)
D_model.load_state_dict(torch.load('./save_models/emnist_D_best.pth.tar'))
optimizer = optim.Adam(vggish_model.parameters(), lr=1e-3)
prec = test(vggish_model, class_model, G_model, test_loader, 0)
best_prec = prec
for epoch in range(1, 5):
if epoch > 3:
optimizer = optim.Adam(list(vggish_model.parameters())+list(G_model.parameters()), lr=1e-4)
train(vggish_model, class_model, G_model, D_model, train_loader, optimizer, epoch)
prec = test(vggish_model, class_model, G_model, test_loader, epoch)
if best_prec < prec:
best_prec = prec
save_state = {'vgg_layers': vggish_model.cpu().state_dict(),
'class_layers': class_model.cpu().state_dict(),
'gen_layers': G_model.cpu().state_dict(),
'best_prec': best_prec}
time.sleep(10)
torch.save(save_state, './save_models/train_all_emnist_mnist_best.pth')
|
import torch.nn as nn
from pathlib import Path
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data_utils
def calculate_f1(log_proba: torch.tensor,
target: torch.tensor,
pos: int = 0,
thresh: float = 2./3):
"""Calculate F1 score. By default the "positive" class is 0 - ILS."""
if len(log_proba.shape) > 1:
preds = log_proba.argmax(1)
else:
preds = torch.sigmoid(log_proba) > thresh
tp = torch.logical_and(preds == pos, target == pos).sum()
fp = torch.logical_and(preds == pos, target != pos).sum()
fn = torch.logical_and(preds != pos, target == pos).sum()
return tp/(tp+(fp+fn).float()/2)
def calculate_mcc(log_proba, target, pos=0):
preds = log_proba.argmax(1)
tp = torch.logical_and(preds == pos, target == pos).sum()
fp = torch.logical_and(preds == pos, target != pos).sum()
fn = torch.logical_and(preds != pos, target == pos).sum()
# there may be multiple non-target values
tn = torch.logical_and(preds != pos, target == preds).sum()
return (tp*tn-fp*fn) / ((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn).float()/2)
def calculate_accuracy(log_proba, target):
return torch.mean((log_proba.argmax(1) == target).float())
class Net(nn.Sequential):
def __init__(self, input_size: int,
layer_sizes=[120, 849, 1],
dropout: float = 0.1):
layers = [nn.Linear(input_size, layer_sizes[0]),
nn.ReLU()]
for input_size, output_size in zip(layer_sizes[: -2], layer_sizes[1: -1]):
layers.extend([nn.Dropout(p=dropout),
nn.Linear(input_size, output_size),
nn.ReLU()])
layers.append(nn.Linear(output_size, layer_sizes[-1]))
# only add sigmoid if output is a probability
# TODO: need to fix everything to handle logits output. pytorch BCELossWithLogits is unstable.
if layer_sizes[-1] > 1:
layers.append(nn.LogSoftmax(dim=1))
elif layer_sizes[-1] == 1:
layers.append(nn.Sigmoid())
super().__init__(*layers)
device = get_device()
self.to(device)
self.device = device
def instantiate_model(dim, layers, clip_value: float = 10) -> Net:
net = Net(dim, layers, dropout=.05)
if torch.cuda.device_count() > 1:
net = nn.DataParallel(net)
print(f'loaded net {net} to {net.device}')
for p in net.parameters():
p.register_hook(lambda grad: torch.clamp(
grad, -clip_value, clip_value))
return net
def get_device():
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
return device # torch.device(device)
def load_model(config: dict,
dataset,
checkpoint_dir: Path = None,
with_optimizer: bool = False,
optimizer=optim.Adam):
"""load a network, and optionally an optimizer state"""
if isinstance(dataset, int):
n_features = dataset
else:
n_features = dataset[0][0].shape[0]
net = instantiate_model(n_features, config["layers"])
if with_optimizer:
# optimizer = optim.SGD(net.parameters(), lr=config["lr"], momentum=0.9)
opt = optimizer(net.parameters(), lr=config["lr"])
if checkpoint_dir:
checkpoint_path = checkpoint_dir / "checkpoint"
if checkpoint_path.exists():
model_state, optimizer_state = torch.load(
checkpoint_path,
map_location=net.device)
net.load_state_dict(model_state)
print('loaded model state')
if with_optimizer:
return net, opt
return net
def test_error(net: Net,
testset: data_utils.TensorDataset,
loss=F.mse_loss,
predict: bool = False):
'''must call net.eval() first.
Loss function must be additive.'''
device = net.device
testloader = data_utils.DataLoader(
testset,
batch_size=256,
shuffle=False,
num_workers=1)
mse = 0.
total = 0
preds = []
with torch.no_grad():
for data in testloader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
total += labels.size(0)
if predict:
preds.append(outputs)
mse += loss(outputs.squeeze(), labels, reduce='sum')
mse /= total
if predict:
return mse, torch.cat(preds)
return mse
def df_to_tensor(df):
"""convert a df to tensor to be used in pytorch"""
device = get_device()
return torch.from_numpy(df.values).float().to(device)
def corrcoef(x: torch.Tensor, y: torch.Tensor):
x = x.squeeze()
y = y.squeeze().to(x.device)
N = x.shape[0]
if N != y.shape[0]:
raise ValueError(f'{x.shape}, {y.shape}')
mx, my = x.mean(), y.mean()
return torch.dot(x-mx, y-my)/x.std()/y.std()/N**2
|
def ground_shipping_cost(weight):
if weight <= 2:
return 20.00 + (1.50 * weight)
elif 2 < weight <= 6:
return 20.00 + (3.00 * weight)
elif 6 < weight <= 10:
return 20.00 + (4.00 * weight)
else:
return 20.00 + (4.75 * weight)
print("$%.2f" % ground_shipping_cost(8.4))
premium_ground_shipping_cost = 125.00
def drone_shipping_cost(weight):
if weight <= 2:
return 0.00 + (4.50 * weight)
elif 2 < weight <= 6:
return 0.00 + (9.00 * weight)
elif 6 < weight <= 10:
return 0.00 + (12.00 * weight)
else:
return 0.00 + (14.25 * weight)
print("$%.2f" % drone_shipping_cost(1.5))
def cheapest_shipping(weight):
if (ground_shipping_cost(weight) < drone_shipping_cost(weight)) and (
ground_shipping_cost(weight) < premium_ground_shipping_cost):
print("The cheapest shipping method is by ground shipping at a cost of " + str(
"$%.2f" % ground_shipping_cost(weight)))
elif (drone_shipping_cost(weight) < ground_shipping_cost(weight)) and (
drone_shipping_cost(weight) < premium_ground_shipping_cost):
print("The cheapest shipping method is by drone shipping at a cost of " + str(
"$%.2f" % drone_shipping_cost(weight)))
else:
print("The cheapest shipping method is by premium ground shipping at a cost of " + str(
"$%.2f" % premium_ground_shipping_cost))
cheapest_shipping(4.8)
cheapest_shipping(41.5)
|
#!/usr/bin/env python3
import argparse
import subprocess
def parse_args():
parser = argparse.ArgumentParser(description='Execute Terraform')
parser.add_argument('-d', '--dry-run', action='store_true')
parser.add_argument('--destroy', action='store_true')
parser.add_argument('--plan', action='store_true', help='Create a plan')
parser.add_argument('--plan-file', default='terraform.tfplan')
parser.add_argument('--apply', action='store_true')
parser.add_argument('--taint', action='store_true')
parser.add_argument('--target', choices=['master', 'minion', 'wiki'])
parser.add_argument('--reset', action='store_true', help='reset and recreate everything')
args = parser.parse_args()
if args.reset:
args.destroy = False
args.taint = True
args.plan = True
args.apply = True
return args
def terraform(args):
commands = []
target_str = None
if args.target == 'master':
target_str = '-target=digitalocean_droplet.master'
elif args.target == 'minion':
target_str = '-target=digitalocean_droplet.minion'
elif args.target == 'wiki':
target_str = '-target=digitalocean_droplet.wiki'
if not (args.destroy or args.plan or args.apply):
print('Nothing to do')
return()
if args.destroy:
command = ['terraform', 'plan', '-destroy', '-out=delete.tfplan']
if target_str:
command.append(target_str)
commands.append(command)
commands.append(['terraform', 'apply', 'delete.tfplan'])
print('Taint: %s', args.taint)
if args.taint:
command = ['terraform', 'taint']
taint_prefix = 'digitalocean_droplet'
taint_str = '%s.%s' % (taint_prefix, args.target)
command.append(taint_str)
commands.append(command)
if args.plan:
command = ['terraform', 'plan', '-out=%s' % args.plan_file]
if target_str:
command.append(target_str)
commands.append(command)
if args.apply:
commands.append(['terraform', 'apply', args.plan_file])
print('Executing')
for command in commands:
print(command)
if args.dry_run is False:
p = subprocess.run(command)
if __name__ == '__main__':
args = parse_args()
terraform(args)
|
import device
from time import sleep
from peak_detect2 import peakdetect
def peak_found(maxtab,mintab):
print maxtab
print mintab
pass
def test_live_peaks():
'''run peak detection on device stream or archive data'''
from device import sensor_stream
gen=sensor_stream()
data=[]
t=[]
x=[]
y=[]
z=[]
lookahead=10
first=True
#iterate through the device stream (or archive data)
while True:
#get rid of the first value sent from the device, it's usually bogus
if first==True:
gen.next()
first=False
sample=gen.next()
print sample
#go until the device data runs out
if sample!=None:
data.append(sample)
for sample in data:
#time - (considered the 'x' axis in peak detection function)
t.append(sample['time'])
x.append(sample['accel'][0])
y.append(sample['accel'][1])
z.append(sample['accel'][2])
#for each axis x,y, and z, look for peaks
peaks={}
peaks['x']=peakdetect(x,t,lookahead=lookahead)
peaks['y']=peakdetect(y,t,lookahead=lookahead)
peaks['z']=peakdetect(z,t,lookahead=lookahead)
# dom_axis=''
# if dom_axis=='':
#wait until min an max both have values for all three axes
test=sum([1 for i in peaks.values() if i[0]!=[] and i[1]!=[]])
for i in peaks:
if peaks[i]!=([],[]): print i+": "+ str(peaks[i])
print peaks
#find longest range
big_range=0
if test==6: #completed peak
print peaks
for axis in peaks:
r=abs(peaks[axis][0][1]-peaks[axis][1][1])
if r > big_range:
big_range=r
dom_axis=axis
# else:
#peaks found
# for axis in peaks:
# if peaks[axis]!=([], []):
# print axis + str(peaks[axis])
#psuedocode
#wait until min an max both have values for all three axes
#take the axis with the longest range to be the indicative one
# if peaks[0] != [] and peaks[1]!=[]: #Peaks found
# print str(peaks)
# peaks=[[],[]]
# del data[:-1*lookahead/2]
# break
if __name__ == '__main__':
test_live_peaks()
|
import random
class Board(object):
''' Board for the tic tac toe game '''
def __init__(self):
'''initialize game state'''
self.gameState = ['_', '_', '_',
'_', '_', '_',
'_', '_', '_']
self.symbols = ['x', 'o']
self.userSymbol = 'o'
def get_user_symbol(self):
'''ask user for his preference of symbol'''
self.userSymbol = raw_input("Enter the symbol you want to play with: ")
def __str__(self):
'''ugly print function for board current situation'''
returnString = ''
i = 1
while i!=10:
returnString = returnString + self.gameState[i-1] + " "
if(i%3 == 0):
returnString = returnString + "\n"
i+=1
return returnString
def get_user_action(self, randomize = True):
'''ask for user action'''
if randomize == True:
flag = 0
while flag == 0:
index = random.randrange(0, 9)
if(self.gameState[index] == '_'):
flag = 1
else:
x, y = map(int, raw_input("Please enter coordinate of the board you would like to mark your symbol (top left is (0,0))").split())
index = x*3 + y
try:
if(self.gameState[index] == '_'):
self.gameState[index] = self.userSymbol
except Exception as e:
print "Our of bounds player. You need to concentrate."
#print self #debug
def determine_end(self):
'''ugly function to determine board state'''
listsOfCombinations = ([0, 1, 2], [3, 4, 5], [6, 7, 8],[0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6])
for list in listsOfCombinations:
x = [self.gameState[ind] for ind in list]
if(x == ['x','x','x'] or x==['o','o','o']):
if (x ==['x','x','x']):
if(self.userSymbol == 'x'):
return True, -100
else:
return True, 100
elif(x ==['o','o','o']):
if(self.userSymbol == 'o'):
return True, -100
else:
return True, 100
try:
inde = self.gameState.index('_')
return False, 1
except Exception as e:
return True, 3 #game ended with no one winnig
def get_possible_actions(self):
'''returns list of possible indices that can be taken by the bot'''
actions = []
for i in range(len(self.gameState)):
if(self.gameState[i] == '_'):
actions.append(i)
return actions
#debug
#a = Board()
#while 1:
# print a
# a.get_user_action()
#print a.determine_end()
#print a.get_possible_actions()
|
from ns_event_codes import event_codes
from constants import headers, ns_login_dict, ns_login_url, ns_tracing_url
from datetime import datetime
class NorfolkSouthern:
def __init__(self, session):
self.session = session
def login(self):
self.session.headers.update(headers)
self.session.headers.update({'Content-Type': 'application/json'})
return self.session.post(ns_login_url, json=ns_login_dict)
def get_token(self):
login_response = self.login()
login_results_dict = login_response.json()
return login_results_dict['result']['token']
def get_tracing_results_dict(self, container, session):
token = self.get_token()
self.session.headers.update({'CSRFTOKEN': token})
search_response = session.post(ns_tracing_url, json={"searchList": container})
return search_response.json()
def get_most_recent_event(self, _dict, index):
event_code_key = _dict['result']['validEquipmentDataList'][index]['lastAAREventCode']
event_description = event_codes[event_code_key]
location = _dict['result']['validEquipmentDataList'][index]['currentTerminalLocation']
event_date_time = _dict['result']['validEquipmentDataList'][index]['eventTime']
return '{} {} {}'.format(event_description, location, event_date_time)
def get_eta(self, _dict, index):
return _dict['result']['validEquipmentDataList'][index]['etg']
def get_last_free_day(self, _dict, index):
return _dict['result']['validEquipmentDataList'][index]['lastFreeDateTime']
def get_scheduled_event(self, _dict, index):
eta = self.get_eta(_dict, index)
location = _dict['result']['validEquipmentDataList'][index]['onlineDestination']
return 'On route to {} ETA: {}'.format(location, eta)
def get_formatted_tracing_results(self, recent_event, scheduled_event):
return 'Most Recent Event: {}\nScheduled Event: {}\nLast Checked On: {}'.format(
recent_event,
scheduled_event,
datetime.now(),
)
|
# Generated by Django 2.2.4 on 2019-08-31 14:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('encuestas', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='respuesta',
name='codigo',
field=models.PositiveSmallIntegerField(),
),
migrations.AddConstraint(
model_name='respuesta',
constraint=models.UniqueConstraint(fields=('pregunta', 'codigo'), name='codigo_unico'),
),
]
|
import cv2
img = cv2.imread("lena.jpg")
im_scaled = cv2.resize(img, None, fx=0.75, fy=0.75)
im_scaled2 = cv2.resize(img, (640, 480), fx=0.75, fy=0.75,interpolation=cv2.INTER_AREA) # dsize is used to desirable size # Skewed size
im_scaled_interplotation=cv2.resize(img, None, fx=2, fy=2,interpolation=cv2.INTER_CUBIC)
cv2.imshow("original", img)
cv2.imshow("scaled", im_scaled)
cv2.imshow("scaled2", im_scaled)
cv2.imshow("scaled_interplotation", im_scaled_interplotation)
cv2.waitKey(0)
cv2.destroyAllWindows()
print(img.shape)
print(im_scaled.shape)
print(im_scaled2.shape)
print(im_scaled_interplotation.shape)
|
import dash_bootstrap_components as dbc
from dash import html
tab1_content = dbc.Card(
dbc.CardBody(
[
html.P("This is tab 1!", className="card-text"),
dbc.Button("Click here", color="success"),
]
),
className="mt-3",
)
tab2_content = dbc.Card(
dbc.CardBody(
[
html.P("This is tab 2!", className="card-text"),
dbc.Button("Don't click here", color="danger"),
]
),
className="mt-3",
)
tabs = dbc.Tabs(
[
dbc.Tab(tab1_content, label="Tab 1"),
dbc.Tab(tab2_content, label="Tab 2"),
dbc.Tab(
"This tab's content is never seen", label="Tab 3", disabled=True
),
]
)
|
import turtle # Turtleモジュールのインポート
t = turtle.Pen()
for x in range(50):
t.left(90)
t.forward(30 + x * 5)
#
|
#python imports
import sys
import os
import json
import time
import extract_msg
import email
from email import policy
from email.header import decode_header, make_header
from email.parser import BytesParser
from termcolor import colored
#third-party imports
#No third-party imports
#programmer generated imports
from logger import logger
from fileio import fileio
'''
***BEGIN DESCRIPTION***
Type: Email - Description: Reads an eml or msg file and outputs the contents.
***END DESCRIPTION***
'''
def POE(POE):
if (POE.logging == True):
LOG = logger()
newlogentry = ''
reademail_dump = ''
reademail_output_data = ''
output = POE.logdir + 'Reademail.txt'
FI = fileio()
if (POE.logging == True):
newlogentry = 'Module: reademail'
LOG.WriteStrongLog(POE.logdir, POE.targetfilename, newlogentry)
print (colored('\r\n[*] Running reademail against: ' + POE.target, 'white', attrs=['bold']))
if (POE.extension == 'eml'):
print (colored('[*] Target is .eml...', 'green', attrs=['bold']))
texts = []
text = ''
count = 0
failedcount = 0
with open(POE.target, 'rb') as filedata:
name = filedata.name # Get file name
msg = BytesParser(policy=policy.default).parse(filedata)
text += 'Date: ' + msg['date'] + '\n'
text += 'To: ' + msg['to'] + '\n'
text += 'From: ' + msg['from'] + '\n'
text += 'Subject: ' + msg['subject'] + '\n\r'
#text += str(msg.get_body(preferencelist=('plain'))#.get_content()
filedata.close()
print (colored('[-] Message Date: ' + msg['date'],'white'))
print (colored('[-] Message To: ' + msg['to'],'white'))
print (colored('[-] Message From: ' + msg['from'],'white'))
print (colored('[-] Message Subject: ' + msg['subject'],'white'))
if (POE.logging == True):
newlogentry = 'Message Date: ' + msg['date']
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message To: ' + msg['to']
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message From: ' + msg['from']
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message Subject: ' + msg['subject']
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
if (POE.debug == True):
print ('[DEBUG] name: ' + name)
print ('[DEBUG] text: ' + str(text))
print (colored('[*] Reademail e-mail body data has been generated to file here: ', 'green') + colored(output, 'blue', attrs=['bold']))
FI.WriteLogFile(output, text)
message = email.message_from_file(open(POE.target))
suffix=None
filenames = []
content_type = ''
if message.get_content_maintype() == 'multipart':
for part in message.walk():
if (POE.debug == True):
print ('[DEBUG] Content Type: ' + part.get_content_maintype())
if part.get_content_maintype() == 'multipart': continue
if ((part.get('Content-Type').find('application/octet-stream') != -1) or (part.get('Content-Type').find('application/x-zip-compressed') != -1) or (part.get('Content-Type').find('application') != -1) or (part.get('Content-Type').find('image') != -1) or (part.get('Content-Type').find('text/html') != -1) or (part.get('Content-Type').find('application/rtf') != -1)):
filename = part.get_filename()
try:
if suffix:
filename = ''.join( [filename.split('.')[0], '_', suffix, '.', filename.split('.')[1]])
filepath = os.path.join(POE.logdir, filename)
fb = open(filepath,'wb')
fb.write(part.get_payload(decode=True))
fb.close()
filenames.append(filename)
except Exception as e:
print (colored('[x] Unable to extract attachment! ' + str(e), 'red', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'Unable to extract attachment! ' + str(e)
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
failedcount += 1
continue
for fnames in filenames:
count += 1
if (POE.debug == True):
print ('[DEBUG] fnames: ' + fnames)
print (colored('[*] Attachment extracted: ' + fnames, 'green'))# + colored(POE.logdir + fnames, 'blue', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'Attachment extracted: ' + fnames + '\n'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print (colored('[*] ' + str(count) + ' total attachments extracted to: ', 'green') + colored(POE.logdir, 'blue', attrs=['bold']))
if (POE.logging == True):
newlogentry = str(count) + ' total attachments extracted to: <a href=\"' + POE.logdir + '\">' + POE.logdir + '</a>'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print (colored('[*] ' + str(failedcount) + ' total attachments failed to be extracted...', 'yellow'))
if (POE.logging == True):
newlogentry = str(failedcount) + ' total attachments failed to be extracted...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
else:
print (colored('[-] No attachments found to extract...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No attachments found to extract...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
elif(POE.extension == 'msg'):
print (colored('[*] Target is .msg...', 'green', attrs=['bold']))
email_msg = extract_msg.openMsg(POE.target)
#attachments = email_msg.attachments
attachment = email_msg.attachments
count = 0
if (POE.debug == True):
print ('email_msg.sender: ' + email_msg.sender)
print ('email_msg.to: ' + email_msg.to)
print ('email_msg.subject: ' + email_msg.subject)
print ('email_msg.date: ' + email_msg.date)
print ('email_msg.body: ' + email_msg.body)
if attachment:
messageto = str(make_header(decode_header(email_msg.to)))
messagefrom = str(make_header(decode_header(email_msg.sender)))
print (colored('[-] Message Date: ' + email_msg.date,'white'))
print (colored('[-] Message To: ' + messageto,'white'))
print (colored('[-] Message From: ' + messagefrom,'white'))
print (colored('[-] Message Subject: ' + email_msg.subject,'white'))
if (POE.logging == True):
newlogentry = 'Message Date: ' + email_msg.date
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message To: ' + messageto
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message From: ' + messagefrom
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message Subject: ' + email_msg.subject
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
for attachment in email_msg.attachments:
count += 1
attachment.save(customPath=POE.logdir)
print (colored('[*] Attachment extracted: ' + attachment.shortFilename, 'green'))
if (POE.logging == True):
newlogentry = 'Attachment extracted: ' + attachment.shortFilename
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print (colored('[*] ' + str(count) + ' total attachments extracted to: ', 'green') + colored(POE.logdir, 'blue', attrs=['bold']))
if (POE.logging == True):
newlogentry = str(count) + ' total attachments extracted to: <a href=\"' + POE.logdir + '\">' + POE.logdir + '</a>'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
else:
messageto = str(make_header(decode_header(email_msg.to)))
messagefrom = str(make_header(decode_header(email_msg.sender)))
print (colored('[-] Message Date: ' + email_msg.date,'white'))
print (colored('[-] Message To: ' + messageto,'white'))
print (colored('[-] Message From: ' + messagefrom,'white'))
print (colored('[-] Message Subject: ' + email_msg.subject,'white'))
if (POE.logging == True):
newlogentry = 'Message Date: ' + email_msg.date
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message To: ' + messageto
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message From: ' + messagefrom
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
newlogentry = 'Message Subject: ' + email_msg.subject
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
print (colored('[-] No attachments found to extract...', 'yellow', attrs=['bold']))
if (POE.logging == True):
newlogentry = 'No attachments found to extract...'
LOG.WriteSubLog(POE.logdir, POE.targetfilename, newlogentry)
else:
print (colored('[x] Target is not a supported e-mail type. Must be .eml or .msg!', 'red', attrs=['bold']))
return -1
return 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os.path import abspath, dirname, join
sys.path.append(abspath(join(dirname(__file__), '..')))
from uuid import uuid4
from mcfly import Connection
def test_connection():
connection = Connection(username='test', password='test')
assert connection.connection
def test_create_catalogue():
connection = Connection(username='test', password='test')
catalogue = connection.test_catalogue_1
assert catalogue.name == "test_catalogue_1"
def test_create_catalogue_twice_returns_same_catalogue():
connection = Connection(username='test', password='test')
catalogue = connection.test_catalogue_2
catalogue = connection.test_catalogue_2
assert catalogue.name == "test_catalogue_2"
def test_store_document():
connection = Connection(username='test', password='test')
catalogue = connection.test_catalogue_3
document_body = {
'name': 'Bernardo',
'family_name': 'Heynemann',
'male': True
}
document = catalogue.store(document_body)
assert document.uri.startswith('/test_catalogue_3/'), document.uri
assert len(document.id) == 8, len(document.id)
assert sorted(document.body.keys()) == sorted(document_body.keys())
def test_get_document_by_uri():
connection = Connection(username='test', password='test')
catalogue = connection.test_catalogue_4
document_body = {
'name': 'Bernardo',
'family_name': 'Heynemann',
'male': True
}
document = catalogue.store(document_body)
retrieved_document = catalogue.get(document.id)
assert retrieved_document.uri == document.uri
assert retrieved_document.id == document.id
assert retrieved_document.body == document.body
def test_get_catalogue_count():
connection = Connection(username='test', password='test')
catalogue = connection.test_catalogue_5
document_body = {
'name': 'Bernardo',
'family_name': 'Heynemann',
'male': True
}
document = catalogue.store(document_body)
document = catalogue.store(document_body)
catalogue.refresh()
assert catalogue.count == 2
def test_get_catalogue_documents():
connection = Connection(username='test', password='test')
catalogue = connection.test_catalogue_6
document_body = {
'name': 'Bernardo',
'family_name': 'Heynemann',
'male': True
}
document_a = catalogue.store(document_body)
document_body_b = {
'name': 'Aline',
'family_name': 'Lucena',
'male': False
}
document_b = catalogue.store(document_body_b)
documents = catalogue.get_documents()
assert documents[0].uri == document_a.uri
assert documents[1].uri == document_b.uri
|
# Generated by Django 2.2.4 on 2019-12-04 01:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('announcements', '0006_auto_20191122_1418'),
]
operations = [
migrations.AddField(
model_name='announcement',
name='send_out',
field=models.BooleanField(default=False),
),
]
|
import pandas as pd
# from sklearn import linear_model
# import csv
# from sklearn.preprocessing import Binarizer
# import ast
df = pd.read_csv('txt_to_understand.txt', delimiter="\t") # a is the file name
print(df)
# לחלק את הדאטה למילים לאותיות למספרים, לתת לכל מילה מספר, לעשות חלוקה לפי משקל, מקום במילון, כמות פעמים שהשתמשו בה
# [ 0 1 2 3 4 5 6 7 ]
# [אין דברי תורה מתקיימים אלא במי שממית עצמו עליהם מלך מלכי המלכים הקדוש ברוך הוא אתה נתת אתה בראתה אתה נפחתה בי ואתה משמרה בקרבי ואתה עתיד לטלה ממני בכל זמן שעתיד לבוא]
# greetings_words_list [1 0 0 0 0 0 0 0 ]
# body_name_list [0 1 0 0 0 1 0 0 ]
# [0 0 1 0 0 0 0 0 ]
# [0 0 0 1 0 0 0 0 ]
# [0 0 0 0 1 0 0 0 ]
# [0 0 0 0 0 1 0 0 ]
# [0 0 0 0 0 0 1 0 ]
# [0 0 0 0 0 0 0 1 ]
if index 1 in bodyname_binary and bodyname_reg:
return True
|
from sys import argv
import sqlite3
def roster():
if len(argv) != 2:
print("Usage: python roster.py House")
exit(1)
else:
connection = sqlite3.connect("students.db")
cursor = connection.cursor()
cursor.execute("SELECT first, middle, last, birth FROM 'students' WHERE house = ? ORDER BY last, first", (argv[1],))
record = cursor.fetchall()
for i in range (0, len(record), 1):
print(record[i][0], end = " ")
if(record[i][1] is not None):
print(record[i][1], end = " ")
print(record[i][2], end = "")
print(f", born {record[i][3]}")
cursor.close()
connection.close()
roster()
|
from rest_framework.response import Response
from rest_framework import response, status
# Библиотеки для сариализации
from rest_framework import serializers, viewsets
# Модели данных
from .models import OverProfCompetencesInGroupOfGeneralCharacteristic, GroupOfOverProfCompetencesInGeneralCharacteristic, IndicatorInOverProfCompetenceInGeneralCharacteristic
from workprogramsapp.models import Indicator
# Другие сериализаторы
from workprogramsapp.serializers import CompetenceSerializer, ImplementationAcademicPlanSerializer, CompetenceForEPSerializer, IndicatorListSerializer, IndicatorListWithoutCompetenceSerializer
"""
Ключевые компетенции
"""
class IndicatorInOverProfCompetenceInGeneralCharacteristicSerializer(serializers.ModelSerializer):
"""
Индикатор компетенции в общей характеристике
"""
indicator = IndicatorListWithoutCompetenceSerializer()
class Meta:
model = IndicatorInOverProfCompetenceInGeneralCharacteristic
fields = ['id', 'indicator']
class CreateIndicatorInOverProfCompetenceInGeneralCharacteristicSerializer(serializers.Serializer):
"""
Индикатор компетенции в общей характеристике
"""
competence_in_group_of_pk = serializers.IntegerField(min_value=1, write_only=True)
indicator = serializers.ListField(
child=serializers.IntegerField(min_value=1, write_only=True), write_only=True
)
def create(self, validated_data):
competence = OverProfCompetencesInGroupOfGeneralCharacteristic.objects.get(pk = validated_data.pop('competence_in_group_of_pk'))
indicators = validated_data.pop('indicator')
for ind in indicators:
try:
IndicatorInOverProfCompetenceInGeneralCharacteristic. \
objects.create(competence_in_group_of_pk =
OverProfCompetencesInGroupOfGeneralCharacteristic.objects.get
(pk = competence.id), indicator = Indicator.objects.get(pk = ind))
except:
raise serializers.ValidationError({"error":"indicator not found"})
return Response(status=status.HTTP_201_CREATED)
class OverProfCompetencesInGroupOfGeneralCharacteristicSerializer(serializers.ModelSerializer):
"""Сериализатор просмотра над-профессиональных компетенций"""
indicator_of_competence_in_group_of_over_prof_competences = IndicatorInOverProfCompetenceInGeneralCharacteristicSerializer(many=True)
competence = CompetenceSerializer()
class Meta:
model = OverProfCompetencesInGroupOfGeneralCharacteristic
fields = ['id', 'indicator_of_competence_in_group_of_over_prof_competences', 'competence']
class CreateOverProfCompetencesInGroupOfGeneralCharacteristicSerializer(serializers.ModelSerializer):
"""Сериализатор создания и изменения над-профессиональных компетенций"""
class Meta:
model = OverProfCompetencesInGroupOfGeneralCharacteristic
fields = "__all__"
class GroupOfOverProfCompetencesInGeneralCharacteristicSerializer(serializers.ModelSerializer):
"""Сериализатор вывода группы над-профессиональных куомпетенций в общей характеристике образовтаельной программы"""
competence_in_group_of_over_prof_competences = OverProfCompetencesInGroupOfGeneralCharacteristicSerializer(many=True)
class Meta:
model = GroupOfOverProfCompetencesInGeneralCharacteristic
fields = ['id','name', 'competence_in_group_of_over_prof_competences']
class CreateGroupOfOverProfCompetencesInGeneralCharacteristicSerializer(serializers.ModelSerializer):
"""Сериализатор создания и редактирования группы над-профессиональных куомпетенций в общей характеристике образовтаельной программы"""
class Meta:
model = GroupOfOverProfCompetencesInGeneralCharacteristic
fields = ['id','name', 'general_characteristic']
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from django import template
register = template.Library()
@register.simple_tag
def get_nota_multipla(questaoAvaliacao, opcao):
""""Retorna a nota que uma opcao multipla escolha tem em uma determinada questao de avaliacao
"""
return opcao.get_nota(questaoAvaliacao.nota)
|
import csv
with open ('DB1.csv') as csv_file:
csv_reader = csv.reader(csv_file,delimiter=",")
line = 0
list1 = list()
idlst = set()
for row in csv_reader:
if line == 0:
line+=1;
continue
idlst.add(row[1])
list1.append(row)
with open ('DB2.csv') as csv_file:
csv_reader = csv.reader(csv_file,delimiter=",")
line = 0
list2 = list()
for row in csv_reader:
if line == 0:
line+=1;
continue
idlst.add(row[1])
list2.append(row)
print(list1)
print(list2)
print(idlst)
db = dict()
for i in idlst:
for l in list1:
lt = list()
if i in l:
lt.append(l[0])
lt.append(l[2])
db[i] = lt
for l in list2:
lt = list()
if i in l:
lt.append(l[0])
lt.append(l[2])
db[i] = lt
for k,v in db.items():
bg = v[1].split()
try:
if bg[1] == "positive":
bg[1] = "+"
elif bg[1] == "negative":
bg[1] = "-"
except Exception as ex:
continue
v[1] = "".join(bg)
db[k] = v
print(db)
with open('combined.csv',mode='w') as file:
writer = csv.writer(file,delimiter=',')
for k,v in db.items():
lst = list();
lst.append(k)
lst.append(v[0])
lst.append(v[1])
writer.writerow(lst)
print("file written");
|
import sys
from PySide import QtGui
from aside.facade import AsideFacade
from standalone import main as sa_main, components
from . import controller
class PluginFacade(AsideFacade):
STARTUP = 'startup'
PLUGIN_DIR = "plugins/user"
def __init__(self, multitonKey):
super(PluginFacade, self).__init__(multitonKey)
def initializeFacade(self):
super(PluginFacade, self).initializeFacade()
self.initializeController()
def initializeController(self):
super(PluginFacade, self).initializeController()
super(PluginFacade, self).registerCommand(PluginFacade.STARTUP, controller.StartupCommand)
MAIN_APP_KEY = 'mainAppKey'
if __name__ == '__main__':
qtapp = QtGui.QApplication(sys.argv)
standalone_app = sa_main.StandaloneAppFacade.getInstance(key=MAIN_APP_KEY)
# NOTE: remember to disable webview QURL open
main_window = components.QtMainWindow()
main_window.show()
standalone_app.sendNotification(sa_main.StandaloneAppFacade.STARTUP, main_window)
#user_facade = PluginFacade.getInstance("user")
#if user_facade:
# user_facade.PLUGIN_NAME = "user"
# user_facade.sendNotification(PluginFacade.STARTUP, main_window)
sys.exit(qtapp.exec_())
|
from yabadaba import databasemanager
from .IprPyDatabase import IprPyDatabase
# Extend the yabadaba LocalDatabase to include IprPyDatabase operations
class LocalDatabase(databasemanager.get_class('local'), IprPyDatabase):
pass
|
#Cal n!
n=eval(input('请输入一个不小于0的整数:'))
def jc(n):
if n==0:
return 1
else:
return n*jc(n-1) #用一个新的内存来存储n-1的函数
print(jc(n))
|
from .copy import copy
from .file import file
from .synchronize import synchronize
from .command import command
from .template import template
cmd_map = {"copy": copy, "file": file, "synchronize": synchronize, "command": command, "template": template}
|
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
action = 0
while action != (5):
print('[1]Somar '
'\n[2]Multiplicar'
'\n[3]Maior'
'\n[4]Novos números'
'\n[5]Sair do programa')
action = int(input('O que você deseja fazer? '))
if action == 1:
s = n1 + n2
print('A soma entre os valores {} e {} é igual a {}'.format(n1, n2, s))
print('=' * 20)
elif action == 2:
m = n1 * n2
print('O produto de {} e {} é {}'.format(n1, n2, m))
print('=' * 20)
elif action == 3:
lista = [n1, n2]
print('O maior valor entre {} e {} é {}'.format(n1, n2, max(lista)))
if n1 == n2:
print('Os dois valores são iguais')
print('=' * 20)
elif action == 4:
n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
print('=' * 20)
elif action == 5:
print('Ok')
print('Programa desligado')
print('=' * 20)
|
import ipaddress as ip
def get_peer_info(pd, hostvars):
peer = { 'name': pd['peer'] }
our_net = ip.IPv4Interface(pd['ip']).network
pvars = hostvars[peer['name']]
for k,v in pvars['ports'].iteritems():
if not v['ip']:
continue
peer_nobj = ip.IPv4Interface(v['ip'])
if our_net == peer_nobj.network:
peer['ip'] = peer_nobj.ip
break
return peer
class FilterModule(object):
''' URI filter '''
def filters(self):
return {
'get_peer_info': get_peer_info
}
|
from client import Bot
from load import configs, extensions
import asyncio
bots = []
loop = asyncio.get_event_loop()
for c in configs:
if('extensions' in c):
bot = Bot(loop=loop)
ext = c['extensions']
newext = []
for e in ext:
newext.append(extensions[e](bot, ext[e]))
c['extensions'] = newext
bots.append(bot)
loop.create_task(bot.initialize(c))
try:
loop.run_forever()
except KeyboardInterrupt:
print('Detected interrupt, exiting')
for bot in bots:
print('Logging out %s' % bot.user.name)
loop.run_until_complete(bot.logout())
finally:
loop.close()
|
import argparse
from da_manager import DaData
from data_manager import dataman_factory
from plotter import Plotter
class Runner(object):
def __init__(self, args):
self._daman = DaData()
self._dataman = dataman_factory("june", link_route_shapes=False, link_stops=True)
def run(self):
plotter = Plotter()
saskatoon_bb = self._daman.get_saskatoon_bounding_box()
plotter.add_polygon(saskatoon_bb)
# To speed things up, make a list of all stops and throw away any raster
# points that are farther than 1 km from the nearest stop.
all_stops = self._dataman.get_active_stops()
self._daman.make_rasters(all_stops)
all_rasters = self._daman.get_all_rasters(all_stops)
for raster in all_rasters:
p = raster.get_polygon()
plotter.add_polygon(p)
plotter.plot("temp/maps/make_rasters.html")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Make Dissemination Areas Rasters')
# parser.add_argument("-d", "--dataset", help="Dataset", type=str, required=True)
args = parser.parse_args()
runner = Runner(args)
runner.run()
|
from django.shortcuts import render, render_to_response
from django.http import HttpResponse
from django.template import RequestContext
from blog.models import Post
from blog.models import Tag
def load(request,id):
t = Tag.objects.get(id=id)
p = t.post_set.all().order_by('-updateDatetime')
return render( request, 'blog/content.html' ,{ 'posts' : p} )
|
import os, shutil
file_paths = [os.path.join(root,f) for root, _, files in os.walk('wiki/fr/') for f in files]
for path in file_paths:
print path
try:
if not os.path.exists('/run/media/user/LBOX21/LibraryBox/Shared/'+os.path.dirname(path)):
os.makedirs('/run/media/user/LBOX21/LibraryBox/Shared/'+os.path.dirname(path))
shutil.copy(path, '/run/media/user/LBOX21/LibraryBox/Shared/'+path)
except:
print "Error with: "+path
with open("copy_fails.txt", "a") as f:
f.write(path+'/n')
|
#!/usr/bin/python3
# This is the Python 3.x client-sided message viewer script for a Chat Server.
from socket import socket, AF_INET, SOCK_STREAM, error
HOST = 'localhost'
PORT = 1337
serverAddress = (HOST, PORT)
Message_Socket = socket(family=AF_INET, type=SOCK_STREAM)
try:
Message_Socket.connect(serverAddress)
message = 'This is the message receiver JKwuP89?!234swWIpd'
Message_Socket.send(message.encode(encoding='ascii'))
while True:
print('\n')
recvMessage = Message_Socket.recv(1024)
if not recvMessage:
continue
else:
print(recvMessage.decode(encoding='ascii'))
except error:
print('Sorry cannot connect to the Chat Server :(')
finally:
Message_Socket.close()
|
#post data 长度,用于Content-Length
def get_content_length(data):
length = len(data.keys()) * 2 - 1
total = ''.join(list(data.keys()) + list(data.values()))
length += len(total)
int(time_now2())>2 and int(common.time_now2())<12)
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test covers a resharding scenario of an already sharded keyspace.
We start with shards -80 and 80-. We then split 80- into 80-c0 and c0-.
This test is the main resharding test. It not only tests the regular resharding
workflow for an horizontal split, but also a lot of error cases and side
effects, like:
- migrating the traffic one cell at a time.
- migrating rdonly traffic back and forth.
- making sure we can't migrate the master until replica and rdonly are migrated.
- has a background thread to insert data during migration.
- tests a destination shard master failover while replication is running.
- tests a filtered replication source replacement while filtered replication
is running.
- tests 'vtctl SourceShardAdd' and 'vtctl SourceShardDelete'.
- makes sure the key range rules are properly enforced on masters.
"""
import threading
import time
import logging
import unittest
import base_sharding
import environment
import tablet
import utils
from vtproto import topodata_pb2
from vtdb import keyrange_constants
# initial shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_ny_rdonly = tablet.Tablet(cell='ny')
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_slave1 = tablet.Tablet()
shard_1_slave2 = tablet.Tablet()
shard_1_ny_rdonly = tablet.Tablet(cell='ny')
shard_1_rdonly1 = tablet.Tablet()
# split shards
# range 80 - c0
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
shard_2_replica2 = tablet.Tablet()
shard_2_rdonly1 = tablet.Tablet()
# range c0 - ''
shard_3_master = tablet.Tablet()
shard_3_replica = tablet.Tablet()
shard_3_rdonly1 = tablet.Tablet()
shard_2_tablets = [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1]
shard_3_tablets = [shard_3_master, shard_3_replica, shard_3_rdonly1]
all_tablets = ([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1] +
shard_2_tablets + shard_3_tablets)
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql(use_rbr=base_sharding.use_rbr)
for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
# InsertThread will insert a value into the timestamps table, and then
# every 1/5s will update its value with the current timestamp
class InsertThread(threading.Thread):
def __init__(self, tablet_obj, thread_name, thread_id, user_id,
keyspace_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.thread_name = thread_name
self.thread_id = thread_id
self.user_id = user_id
self.keyspace_id = keyspace_id
self.str_keyspace_id = utils.uint64_to_hex(keyspace_id)
self.done = False
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'insert into timestamps(id, time_milli, custom_ksid_col) '
'values(%d, %d, 0x%x) '
'/* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(self.thread_id, long(time.time() * 1000), self.keyspace_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
self.start()
def run(self):
try:
while not self.done:
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'update timestamps set time_milli=%d '
'where id=%d /* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(long(time.time() * 1000), self.thread_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
time.sleep(0.2)
except Exception: # pylint: disable=broad-except
logging.exception('InsertThread got exception.')
# MonitorLagThread will get values from a database, and compare the timestamp
# to evaluate lag. Since the qps is really low, and we send binlogs as chunks,
# the latency is pretty high (a few seconds).
class MonitorLagThread(threading.Thread):
def __init__(self, tablet_obj, thread_name, thread_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.thread_name = thread_name
self.thread_id = thread_id
self.done = False
self.max_lag_ms = 0
self.lag_sum_ms = 0
self.sample_count = 0
self.start()
def run(self):
try:
while not self.done:
result = self.tablet.mquery(
'vt_test_keyspace',
'select time_milli from timestamps where id=%d' %
self.thread_id)
if result:
lag_ms = long(time.time() * 1000) - long(result[0][0])
logging.debug('MonitorLagThread(%s) got %d ms',
self.thread_name, lag_ms)
self.sample_count += 1
self.lag_sum_ms += lag_ms
if lag_ms > self.max_lag_ms:
self.max_lag_ms = lag_ms
time.sleep(5.0)
except Exception: # pylint: disable=broad-except
logging.exception('MonitorLagThread got exception.')
class TestResharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
custom_ksid_col ''' + t + ''' not null,
msg varchar(64),
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_table_bindata_template = '''create table %s(
custom_ksid_col ''' + t + ''' not null,
id bigint not null,
parent_id bigint not null,
msg bit(8),
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s'
'(parent_id, id, msg, custom_ksid_col)'
'as select parent_id, id, msg, custom_ksid_col '
'from %s')
create_timestamp_table = '''create table timestamps(
id int not null,
time_milli bigint(20) unsigned not null,
custom_ksid_col ''' + t + ''' not null,
primary key (id)
) Engine=InnoDB'''
# Make sure that clone and diff work with tables which have no primary key.
# RBR only because Vitess requires the primary key for query rewrites if
# it is running with statement based replication.
create_no_pk_table = '''create table no_pk(
custom_ksid_col ''' + t + ''' not null,
msg varchar(64),
id bigint not null,
parent_id bigint not null
) Engine=InnoDB'''
create_unrelated_table = '''create table unrelated(
name varchar(64),
primary key (name)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_bindata_template % ('resharding3'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_timestamp_table,
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_unrelated_table,
'test_keyspace'],
auto_log=True)
if base_sharding.use_rbr:
utils.run_vtctl(['ApplySchema', '-sql=' + create_no_pk_table,
'test_keyspace'], auto_log=True)
def _insert_startup_values(self):
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._insert_value(shard_0_master, 'resharding3', 1, 'a',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding3', 2, 'b',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding3', 3, 'c',
0xD000000000000000)
if base_sharding.use_rbr:
self._insert_value(shard_1_master, 'no_pk', 1, 'msg1',
0xA000000000000000)
# TODO(github.com/vitessio/vitess/issues/2880): Add more rows here such
# clone and diff would break when the insertion order on source and
# dest shards is different.
def _check_startup_values(self):
# check first value is in the right shard
for t in shard_2_tablets:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
self._check_value(t, 'resharding3', 2, 'b', 0x9000000000000000)
for t in shard_3_tablets:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
self._check_value(t, 'resharding3', 2, 'b', 0x9000000000000000,
should_be_here=False)
# check second value is in the right shard too
for t in shard_2_tablets:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
self._check_value(t, 'resharding3', 3, 'c', 0xD000000000000000,
should_be_here=False)
for t in shard_3_tablets:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
self._check_value(t, 'resharding3', 3, 'c', 0xD000000000000000)
if base_sharding.use_rbr:
for t in shard_2_tablets:
self._check_value(t, 'no_pk', 1, 'msg1', 0xA000000000000000)
for t in shard_3_tablets:
self._check_value(t, 'no_pk', 1, 'msg1', 0xA000000000000000,
should_be_here=False)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_1_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
def _exec_multi_shard_dmls(self):
mids = [10000001, 10000002, 10000003]
msg_ids = ['msg-id10000001', 'msg-id10000002', 'msg-id10000003']
keyspace_ids = [0x9000000000000000, 0xD000000000000000,
0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
mids = [10000004, 10000005]
msg_ids = ['msg-id10000004', 'msg-id10000005']
keyspace_ids = [0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
mids = [10000011, 10000012, 10000013]
msg_ids = ['msg-id10000011', 'msg-id10000012', 'msg-id10000013']
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
# This update targets two shards.
self._exec_non_annotated_update(shard_1_master, 'resharding1',
[10000011, 10000012], 'update1')
# This update targets one shard.
self._exec_non_annotated_update(shard_1_master, 'resharding1',
[10000013], 'update2')
mids = [10000014, 10000015, 10000016]
msg_ids = ['msg-id10000014', 'msg-id10000015', 'msg-id10000016']
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
# This delete targets two shards.
self._exec_non_annotated_delete(shard_1_master, 'resharding1',
[10000014, 10000015])
# This delete targets one shard.
self._exec_non_annotated_delete(shard_1_master, 'resharding1', [10000016])
# repeat DMLs for table with msg as bit(8)
mids = [10000001, 10000002, 10000003]
keyspace_ids = [0x9000000000000000, 0xD000000000000000,
0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding3', mids,
['a','b','c'], keyspace_ids)
mids = [10000004, 10000005]
keyspace_ids = [0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding3', mids,
['d', 'e'], keyspace_ids)
mids = [10000011, 10000012, 10000013]
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding3', mids,
['k', 'l', 'm'], keyspace_ids)
# This update targets two shards.
self._exec_non_annotated_update(shard_1_master, 'resharding3',
[10000011, 10000012], 'g')
# This update targets one shard.
self._exec_non_annotated_update(shard_1_master, 'resharding3',
[10000013], 'h')
mids = [10000014, 10000015, 10000016]
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding3', mids,
['n', 'o', 'p'], keyspace_ids)
# This delete targets two shards.
self._exec_non_annotated_delete(shard_1_master, 'resharding3',
[10000014, 10000015])
# This delete targets one shard.
self._exec_non_annotated_delete(shard_1_master, 'resharding3', [10000016])
def _check_multi_shard_values(self):
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000001, 'msg-id10000001', 0x9000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000002, 'msg-id10000002', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000003, 'msg-id10000003', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000001, 'msg-id10000001', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000002, 'msg-id10000002', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000003, 'msg-id10000003', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000004, 'msg-id10000004', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000005, 'msg-id10000005', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000004, 'msg-id10000004', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000005, 'msg-id10000005', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000011, 'update1', 0x9000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000012, 'update1', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000013, 'update2', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000014, 'msg-id10000014', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000015, 'msg-id10000015', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000016, 'msg-id10000016', 0xF000000000000000,
should_be_here=False)
# checks for bit(8) table
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000001, 'a', 0x9000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000002, 'b', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000003, 'c', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000001, 'a', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000002, 'b', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000003, 'c', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000004, 'd', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000005, 'e', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000004, 'd', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000005, 'e', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000011, 'g', 0x9000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000012, 'g', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000013, 'h', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding3', 10000014, 'n', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding3', 10000015, 'o', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding3', 10000016, 'p', 0xF000000000000000,
should_be_here=False)
# _check_multi_dbs checks the row in multiple dbs.
def _check_multi_dbs(self, dblist, table, mid, msg, keyspace_id,
should_be_here=True):
for db in dblist:
self._check_value(db, table, mid, msg, keyspace_id, should_be_here)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_2_replica2, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_3_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('waiting for %d%% of the data' % threshold,
timeout, sleep_time=1)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_3_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
# we're going to reparent and swap these two
global shard_2_master, shard_2_replica1
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'bad_column',
'--sharding_column_type', 'bytes',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'custom_ksid_col', 'uint64'], expect_fail=True)
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force',
'test_keyspace',
'custom_ksid_col', base_sharding.keyspace_id_type])
shard_0_master.init_tablet('replica', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave2.init_tablet('replica', 'test_keyspace', '80-')
shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')
# we set full_mycnf_args to True as a test in the KIT_BYTES case
full_mycnf_args = (base_sharding.keyspace_id_type ==
keyrange_constants.KIT_BYTES)
# create databases so vttablet can start behaving somewhat normally
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None, full_mycnf_args=full_mycnf_args,
binlog_use_v3_resharding_mode=False)
# wait for the tablets (replication is not setup, they won't be healthy)
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
self.assertIn('-80', shards, 'unexpected shards: %s' % str(shards))
self.assertIn('80-', shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 2, 'unexpected shards: %s' % str(shards))
# create the tables
self._create_schema()
self._insert_startup_values()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_slave1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in [shard_0_ny_rdonly, shard_1_ny_rdonly, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# create the split shards
shard_2_master.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica1.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica2.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-c0')
shard_3_master.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_replica.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_rdonly1.init_tablet('rdonly', 'test_keyspace', 'c0-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
shard_2_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_3_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_2_replica1, shard_2_replica2, shard_2_rdonly1,
shard_3_replica, shard_3_rdonly1]:
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-c0',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/c0-',
shard_3_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
for s in ['-80', '80-', '80-c0', 'c0-']:
self.assertIn(s, shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 4, 'unexpected shards: %s' % str(shards))
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# disable shard_1_slave2, so we're sure filtered replication will go
# from shard_1_slave1
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING')
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'):
utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated',
shard_1_rdonly1.tablet_alias, keyspace_shard],
auto_log=True)
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
# Copy the data from the source to the destination shards.
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
#
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Test the correct handling of keyspace_id changes which happen after
# the first clone.
# Let row 2 go to shard 3 instead of shard 2.
shard_1_master.mquery('vt_test_keyspace',
'update resharding1 set'
' custom_ksid_col=0xD000000000000000 WHERE id=2',
write=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Row 2 will be deleted from shard 2 and inserted to shard 3.
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 0, 1, 1)
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0xD000000000000000, should_be_here=False)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0xD000000000000000)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Move row 2 back to shard 2 from shard 3 by changing the keyspace_id again.
shard_1_master.mquery('vt_test_keyspace',
'update resharding1 set'
' custom_ksid_col=0x9000000000000000 WHERE id=2',
write=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Row 2 will be deleted from shard 3 and inserted to shard 2.
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 0, 1, 1)
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 2 (provokes an insert).
shard_2_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=2', write=True)
# Update row 3 (provokes an update).
shard_3_master.mquery('vt_test_keyspace',
"update resharding1 set msg='msg-not-3' where id=3",
write=True)
# Insert row 4 and 5 (provokes a delete).
self._insert_value(shard_3_master, 'resharding1', 4, 'msg4',
0xD000000000000000)
self._insert_value(shard_3_master, 'resharding1', 5, 'msg5',
0xD000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Change tablet, which was taken offline, back to rdonly.
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias,
'rdonly'], auto_log=True)
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 1, 2, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 2)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', '--exclude_tables=unrelated',
'test_keyspace'], auto_log=True)
# Verify vreplication table entries
result = shard_2_master.mquery('_vt', 'select * from vreplication')
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], 'SplitClone')
self.assertEqual(result[0][2],
'keyspace:"test_keyspace" shard:"80-" '
'key_range:<start:"\\200" end:"\\300" > ')
result = shard_3_master.mquery('_vt', 'select * from vreplication')
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], 'SplitClone')
self.assertEqual(result[0][2],
'keyspace:"test_keyspace" shard:"80-" key_range:<start:"\\300" > ')
# check the binlog players are running and exporting vars
self.check_destination_master(shard_2_master, ['test_keyspace/80-'])
self.check_destination_master(shard_3_master, ['test_keyspace/80-'])
# When the binlog players/filtered replication is turned on, the query
# service must be turned off on the destination masters.
# The tested behavior is a safeguard to prevent that somebody can
# accidentally modify data on the destination masters while they are not
# migrated yet and the source shards are still the source of truth.
shard_2_master.wait_for_vttablet_state('NOT_SERVING')
shard_3_master.wait_for_vttablet_state('NOT_SERVING')
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_1_slave1, horizontal=True)
# Check that the throttler was enabled.
# The stream id is hard-coded as 1, which is the first id generated
# through auto-inc.
self.check_throttler_service(shard_2_master.rpc_endpoint(),
['BinlogPlayer/1'], 9999)
self.check_throttler_service(shard_3_master.rpc_endpoint(),
['BinlogPlayer/1'], 9999)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Executing MultiValue Insert Queries')
self._exec_multi_shard_dmls()
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
# small optimization: only do this check if we don't have all the data
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
logging.debug('Checking MultiValue Insert Queries')
self._check_multi_shard_values()
self.check_binlog_player_vars(shard_2_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_player_vars(shard_3_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_1_slave1, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_3_rdonly1.tablet_alias])
if base_sharding.use_multi_split_diff:
logging.debug('Running vtworker MultiSplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'MultiSplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/80-'],
auto_log=True)
else:
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for destination master tablets, make sure we have it all
if base_sharding.use_rbr:
# We submitted non-annotated DMLs, that are properly routed
# with RBR, but not with SBR. So the first shard counts
# are smaller. In the second shard, we submitted statements
# that affect more than one keyspace id. These will result
# in two queries with RBR. So the count there is higher.
self.check_running_binlog_player(shard_2_master, 4036, 2016)
self.check_running_binlog_player(shard_3_master, 4056, 2016)
else:
self.check_running_binlog_player(shard_2_master, 4044, 2016)
self.check_running_binlog_player(shard_3_master, 4048, 2016)
# start a thread to insert data into shard_1 in the background
# with current time, and monitor the delay
insert_thread_1 = InsertThread(shard_1_master, 'insert_low', 1, 10000,
0x9000000000000000)
insert_thread_2 = InsertThread(shard_1_master, 'insert_high', 2, 10001,
0xD000000000000000)
monitor_thread_1 = MonitorLagThread(shard_2_replica2, 'insert_low', 1)
monitor_thread_2 = MonitorLagThread(shard_3_replica, 'insert_high', 2)
# tests a failover switching serving to a different replica
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica'])
utils.run_vtctl(['ChangeSlaveType', shard_1_slave1.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['RunHealthCheck', shard_1_slave2.tablet_alias])
# test data goes through again
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000, base=1000)
logging.debug('Checking 80 percent of data was sent quickly')
self._check_lots_timeout(1000, 80, 5, base=1000)
self.check_binlog_server_vars(shard_1_slave2, horizontal=True,
min_statements=800, min_transactions=800)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
expect_fail=True)
# check query service is off on master 2 and master 3, as filtered
# replication is enabled. Even health check that is enabled on
# master 3 should not interfere (we run it to be sure).
utils.run_vtctl(['RunHealthCheck', shard_3_master.tablet_alias],
auto_log=True)
for master in [shard_2_master, shard_3_master]:
utils.check_tablet_query_service(self, master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_3_master.get_healthz()
# now serve rdonly from the split shards, in test_nj only
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'test_keyspace/80-', 'rdonly'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# rerun migrate to ensure it doesn't fail
# skip refresh to make it go faster
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'-skip-refresh-state=true',
'test_keyspace/80-', 'rdonly'], auto_log=True)
# now serve rdonly from the split shards, everywhere
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, False, True)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# rerun migrate to ensure it doesn't fail
# skip refresh to make it go faster
utils.run_vtctl(['MigrateServedTypes', '-skip-refresh-state=true',
'test_keyspace/80-', 'rdonly'], auto_log=True)
# then serve replica from the split shards
destination_shards = ['test_keyspace/80-c0', 'test_keyspace/c0-']
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, True, False)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly.
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# reparent shard_2 to shard_2_replica1, then insert more data and
# see it flow through still
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/80-c0',
'-new_master', shard_2_replica1.tablet_alias])
# update our test variables to point at the new master
shard_2_master, shard_2_replica1 = shard_2_replica1, shard_2_master
logging.debug('Inserting lots of data on source shard after reparenting')
self._insert_lots(3000, base=2000)
logging.debug('Checking 80 percent of data was sent fairly quickly')
self._check_lots_timeout(3000, 80, 10, base=2000)
# use vtworker to compare the data again
if base_sharding.use_multi_split_diff:
logging.debug('Running vtworker MultiSplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'MultiSplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/80-'],
auto_log=True)
else:
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
# going to migrate the master now, check the delays
monitor_thread_1.done = True
monitor_thread_2.done = True
insert_thread_1.done = True
insert_thread_2.done = True
logging.debug('DELAY 1: %s max_lag=%d ms avg_lag=%d ms',
monitor_thread_1.thread_name,
monitor_thread_1.max_lag_ms,
monitor_thread_1.lag_sum_ms / monitor_thread_1.sample_count)
logging.debug('DELAY 2: %s max_lag=%d ms avg_lag=%d ms',
monitor_thread_2.thread_name,
monitor_thread_2.max_lag_ms,
monitor_thread_2.lag_sum_ms / monitor_thread_2.sample_count)
# mock with the SourceShard records to test 'vtctl SourceShardDelete'
# and 'vtctl SourceShardAdd'
utils.run_vtctl(['SourceShardDelete', 'test_keyspace/c0-', '1'],
auto_log=True)
utils.run_vtctl(['SourceShardAdd', '--key_range=80-',
'test_keyspace/c0-', '1', 'test_keyspace/80-'],
auto_log=True)
# CancelResharding should fail because migration has started.
utils.run_vtctl(['CancelResharding', 'test_keyspace/80-'],
auto_log=True, expect_fail=True)
# do a Migrate that will fail waiting for replication
# which should cause the Migrate to be canceled and the source
# master to be serving again.
utils.run_vtctl(['MigrateServedTypes',
'-filtered_replication_wait_time', '0s',
'test_keyspace/80-', 'master'],
auto_log=True, expect_fail=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_master, True, False)
# sabotage master migration and make it fail in an unfinished state
utils.run_vtctl(['SetShardTabletControl', '-blacklisted_tables=t',
'test_keyspace/c0-', 'master'], auto_log=True)
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True, expect_fail=True)
# remove sabotage, but make it fail early. This should not result
# in the source master serving, because this failure is past the
# point of no return.
utils.run_vtctl(['SetShardTabletControl', '-blacklisted_tables=t',
'-remove', 'test_keyspace/c0-', 'master'], auto_log=True)
utils.run_vtctl(['MigrateServedTypes',
'-filtered_replication_wait_time', '0s',
'test_keyspace/80-', 'master'],
auto_log=True, expect_fail=True)
utils.check_tablet_query_service(self, shard_1_master, False, True)
# do the migration that's expected to succeed
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-c0 c0-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
self.check_no_binlog_player(shard_2_master)
self.check_no_binlog_player(shard_3_master)
# test reverse_replication
# start with inserting a row in each destination shard
self._insert_value(shard_2_master, 'resharding2', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_3_master, 'resharding2', 3, 'msg3',
0xD000000000000000)
# ensure the rows are not present yet
self._check_value(shard_1_master, 'resharding2', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_1_master, 'resharding2', 3, 'msg3',
0xD000000000000000, should_be_here=False)
# repeat the migration with reverse_replication
utils.run_vtctl(['MigrateServedTypes', '-reverse_replication=true',
'test_keyspace/80-', 'master'], auto_log=True)
# look for the rows in the original master after a short wait
time.sleep(1.0)
self._check_value(shard_1_master, 'resharding2', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_1_master, 'resharding2', 3, 'msg3',
0xD000000000000000)
# retry the migration to ensure it now fails
utils.run_vtctl(['MigrateServedTypes', '-reverse_replication=true',
'test_keyspace/80-', 'master'],
auto_log=True, expect_fail=True)
# CancelResharding should now succeed
utils.run_vtctl(['CancelResharding', 'test_keyspace/80-'], auto_log=True)
self.check_no_binlog_player(shard_1_master)
# delete the original tablets in the original shard
tablet.kill_tablets([shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1])
for t in [shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_1_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(
['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# test RemoveShardCell
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True,
expect_fail=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
self.assertTrue('cells' not in shard or not shard['cells'])
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/80-'], auto_log=True)
# make sure we can't delete the destination shard now that it's serving
_, stderr = utils.run_vtctl(['DeleteShard', 'test_keyspace/80-c0'],
expect_fail=True)
self.assertIn('is still serving, cannot delete it', stderr)
# kill everything
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1])
if __name__ == '__main__':
utils.main()
|
#!/usr/bin/python3
'''
import statistics as s
'''
'''
import statistics
from statistics import *
'''
from statistics import mean as m,median as me,stdev as dev
examplelist = [3,4,5,6,7,8,9,10]
x=m(examplelist)
y=me(examplelist)
z=dev(examplelist)
print(x)
print(y)
print(z)
|
from django.urls import path
from .views import Authorize, gen_auth_code
urlpatterns = [
path('', Authorize.as_view(), name='blog'),
path('auth_code/', gen_auth_code, name='auth_code'),
]
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from typing import Optional
def n_things(count: int, plural_noun: str, singular_noun: Optional[str] = None):
if count == 1:
if singular_noun is None:
return f"1 {plural_noun[0:-1]}"
else:
return f"1 {singular_noun}"
else:
return f"{count} {plural_noun}"
|
from flask_restplus import Namespace, fields
|
from sklearn.metrics import confusion_matrix, mean_squared_error
import numpy as np
def calc_pdf_hist(x, x_bins):
return np.histogram(x, x_bins, density=True)[0]
def hellinger(x, pdf_p, pdf_q):
pdf_distances = (np.sqrt(pdf_p) - np.sqrt(pdf_q)) ** 2
return np.trapz(pdf_distances, x) / 2
def root_mean_squared_error(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
def hellinger_distance(y_true, y_pred, bins=50):
bin_points = np.linspace(np.minimum(y_true.min(), y_pred.min()),
np.maximum(y_true.max(), y_pred.max()),
bins)
bin_centers = 0.5 * (bin_points[:-1] + bin_points[1:])
y_true_pdf = calc_pdf_hist(y_true, bin_points)
y_pred_pdf = calc_pdf_hist(y_pred, bin_points)
return hellinger(bin_centers, y_true_pdf, y_pred_pdf)
def peirce_skill_score(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
n = float(cm.sum())
nf = cm.sum(axis=0)
no = cm.sum(axis=1)
correct = float(cm.trace())
return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2)
def heidke_skill_score(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
n = float(cm.sum())
nf = cm.sum(axis=0)
no = cm.sum(axis=1)
correct = float(cm.trace())
return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2)
def r2_corr(y_true, y_pred):
return np.corrcoef(y_true, y_pred)[0, 1] ** 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.