text stringlengths 8 6.05M |
|---|
def only_positive_numbers(list):
return [i for i in list if i>0]
positive_only = only_positive_numbers([1,-1,2,-2])
print(positive_only) |
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.shortcuts import render, redirect, render_to_response
# Create your views here.
from contest import settings
from first.forms import SignupForm, CourseForm
from first.models import Course
def homepage(request):
return render(request, 'Homepage.html')
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
error1 = False
error2 = False
if form.is_valid():
# if form.cleaned_data['password1'] != form.cleaned_data['password2']:
# error1 = True
# if len (User.objects.filter(username=form.cleaned_data['username'])) > 0:
# error2 = True
# if error1 or error2:
# return render(request,'signup.html',{'form':form, 'error1':error1, 'error2':error2})
user = form.save()
user.save()
else:
form = SignupForm()
return render(request, 'signup.html', {'form': form})
def contact(request):
title = ""
email = ""
text = ""
if request.method == "POST":
title = request.POST['title']
email = request.POST['email']
text = request.POST['text']
if 10 <=len(text)<=250:
send_mail(
title,
text + email,
settings.EMAIL_HOST,
['webe19lopers@gmail.com'],
fail_silently=False,
)
return redirect('done')
else:
return redirect('contact')
else:
return render(request,'contact.html')
def Login(request):
username = ""
password = ""
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(request,username=username,password=password)
if user is not None:
login(request,user)
return redirect('home')
else:
return render(request,'login.html',{"error":True})
else:
return render(request,"login.html",{"error":False})
def Logout(request):
logout(request)
return redirect('home')
def profile(request):
user = request.user
return render(request, 'profile.html', {'first_name':user.first_name, 'last_name':user.last_name, 'username':user.username})
def setting(request):
if request.method == "POST":
first_name = request.POST['first_name']
last_name = request.POST['last_name']
if len(first_name) > 0 or len(last_name) > 0:
user = request.user
print(user.first_name)
if len(first_name) > 0:
user.first_name = first_name
if len(last_name):
user.last_name = last_name
user.save()
return redirect('profile')
else:
return render(request,'settings.html')
def new_course(request):
if request.method == "POST":
form = CourseForm(request.POST)
if form.is_valid():
course = form.save()
#course.user = request.user
course.save()
return redirect('panel')
return redirect('new_course')
else:
form = CourseForm()
return render(request,"new_course.html", {"form":form})
def Courses(request):
courses = Course.objects.all()
return render(request,"cources.html",{"courses":courses})
|
import requests # 导入发送请求
from bs4 import BeautifulSoup # 用于解析数据
import mysql.connector # 用于连接数据库
import time
import random
class LianJiaSpider():
#获取连接对象
mydb = mysql.connector.connect(host='localhost',user='root',passwd='root',database='lianjia',auth_plugin='mysql_native_password')
# 获取cursor对象
mycursor = mydb.cursor()
# 初始化方法
def __init__(self):
self.url = 'https://sh.lianjia.com/chengjiao/pg{0}/' # 初始化请求url
time.sleep(random.random() * 3) # 设置随机访问时间间隔
# 用于将爬虫程序伪装成浏览器(对付反爬)
self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'}
# 发送请求的方法
def send_request(self,url):
# 由于请求方式为get,所以使用requests方法中的get方法
resp = requests.get(url,headers = self.headers)
if resp.status_code == 200: # 如果响应状态码为200,将返回方法的调用处
return resp
# 解析HTML获取有用数据
def parse_content(self,resp):
list = [] # 空列表,用于保存数据
html = resp.text # 获取响应的html
bs = BeautifulSoup(html,'html.parser') # 第一个参数是要解析的内容,第二个参数是解析器
# 查找名称为sellListContent的ul
ul = bs.find('ul',class_='listContent')
# 在ul中获取所有的li标签
li_list = ul.find_all('li') # 1.获取数据数量 print(len(li_list)) # 2.是否获取到数据 print(li_list)
# 遍历
for item in li_list:
title = item.find('div', class_='title').text # 获取标题
house_Info = item.find('div', class_='houseInfo').text # 获取房屋描述
deal_date = item.find('div', class_='dealDate').text # 获取成交日期
total_price = item.find('div', class_='totalPrice').text # 获取总价
position_info = item.find('div', class_='positionInfo').text # 楼层信息
unit_price = item.find('div', class_='unitPrice').text # 单价
span = item.find('span', class_='dealCycleTxt') # 获取到span标签,有两个span标签
span_list = span.find_all('span') # 获取挂牌价 / 成交周期
agent_name = item.find('a','agent_name') # 房产销售
agent_namet = agent_name.text if agent_name!=None else '' # 判空输出空值
list.append((title, house_Info, deal_date, total_price, position_info, unit_price, span_list[0].text,span_list[1].text, agent_namet))
# print(title, house_Info, deal_date, total_price, position_info, unit_price, span_list[0].text,span_list[1].text, agent_name)
# 数据库解析完毕,需要存储到数据库中
self.write_mysql(list)
# 写入数据库
def write_mysql(self,list):
# print(self.mydb)
sql = 'insert into tbl_lianjia (title,house_Info,deal_date,total_price,position_info,unit_price,listing_price,date,agent_name) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)'
# 执行批量插入
self.mycursor.executemany(sql,list)
#提交事务
self.mydb.commit()
# 启动爬虫程序
def start(self):
for i in range(1, 101): # 产生一个1到100的之间的整数序列
full_url = self.url.format(i)
# print(full_url)
resp = self.send_request(full_url) # 发送请求,获取一个响应
# print(resp.text)
if resp:
self.parse_content(resp) # 将相应结果传入,调用解析方法提取有用数据
if __name__ == '__main__':
# 创建类的对象,并调用start()方法
lianjia = LianJiaSpider()
lianjia.start()
|
#%%
from sklearn import linear_model #다중회귀분석
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn import datasets
# data = {'x1':[13, 18, 17, 20, 22, 21],
# 'x2':[9, 7, 17, 11, 8, 10],
# 'y':[20, 22, 30, 27, 35, 32]}
# data = pd.DataFrame(data)
# X = data[['x1','x2']]
# y = data['y']
# # print(data)
# linear_regression = linear_model.LinearRegression()
# linear_regression.fit(X=pd.DataFrame(X), y=y)
# prediction = linear_regression.predict(X=pd.DataFrame(X))
# print('a value =', linear_regression.intercept_)
# print('b value =', linear_regression.coef_)
####적합도 검증
# residuals = y - prediction
# SSE = (residuals**2).sum() #잔차 제곱의 합
# SST = ((y-y.mean())**2).sum()
# R_squared = 1-(SSE/SST)
# print('R squared =', R_squared) #Price 가 예측에 영향을 주는 정도 확인
####성능평가
# print('score =', linear_regression.score(X = pd.DataFrame(X), y=y))
# print('mean squared error =', mean_squared_error(prediction, y))
# print('rmse =', mean_squared_error(prediction, y)**0.5) #오차가 작은것 확인가능
#next#
# boston_house_prices = datasets.load_boston()
# X = pd.DataFrame(boston_house_prices.data)
# y = pd.DataFrame(boston_house_prices.target)
# # print(X.tail())
# linear_regression = linear_model.LinearRegression()
# linear_regression.fit(X=pd.DataFrame(X), y=y)
# prediction = linear_regression.predict(X=pd.DataFrame(X))
# ####적합도 검증
# residuals = y - prediction
# # print(residuals.describe())
# SSE = (residuals**2).sum() #잔차 제곱의 합
# SST = ((y-y.mean())**2).sum()
# R_squared = 1-(SSE/SST)
# # print('R squared =', R_squared)
# ####성능평가
# print('score =', linear_regression.score(X = pd.DataFrame(X), y=y))
# print('mean squared error =', mean_squared_error(prediction, y))
# print('rmse =', mean_squared_error(prediction, y)**0.5) #오차가 작은것 확인가능
#next#
#%% |
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
# Create time: 2020-03-14
import json
import config
from scheduletool.buaacps import database, schedule
if __name__ == '__main__':
with open('./demand.hrm') as f:
jsonObj = json.loads(f.read())
demands = schedule.receive(jsonObj)
database_manager = database.DatabaseManager(
config.DATABASE['remote_ip'], config.DATABASE['remote_usr'],
config.DATABASE['remote_pwd'], config.DATABASE['database_usr'],
config.DATABASE['database_pwd'], config.DATABASE['database_name'])
resources = schedule.queryResources(database_manager)
schedule.save_proto(demands, 'demands.prototxt')
schedule.save_proto(resources, 'resources.prototxt')
schedule.print_proto(demands)
schedule.print_proto(resources)
result = schedule.schedule(demands, resources, rtype='json')
# schedule.writeToTable(database_manager, 'matchtable', result)
print(result)
|
"""
Purpose: Create a program that removes all vowels in a sentence.
Author: Rafael Broseghini
Date: 04/22/2016
Filename: VowelRemoval.py
"""
def removeVowels(sentence):
vowels = "aeiouAEIOU"
noVowels = []
[noVowels.append(ch) for ch in sentence if ch not in vowels]
return "".join(noVowels)
def main():
mySentence = input("Enter a sentence: ")
print("'{}' with no vowels is: '{}'".format(mySentence, removeVowels(mySentence)))
main()
|
"""
File: part1b.py
Created by Andrew Ingson (aings1@umbc.edu)
Date: 4/26/2020
CMSC 441 (Design and Analysis of Algorithms)
"""
# size is 312 bits (supposedly)
n = 6207034496804283879630919311406969504330524655944955079581115322595987746105035112739268374117
print("Modulus is", n)
|
from operator import itemgetter
import sys
current_word = None
current_date = None
current_count = 0
current_count_date = [0,0,0,0,0,0,0,0,0,0,0,0]
word = None
# input comes from STDIN
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from mapper.py
try:
date, other = line.split("\t",1)
word, count = other.split("\t",1)
except ValueError:
continue
# convert count (currently a string) to int
try:
count = int(count)
except ValueError:
# count was not a number, so silently
# ignore/discard this line
continue
# this IF-switch only works because Hadoop sorts map output
# by key (here: word) before it is passed to the reducer
if current_word == word:
current_count += count
current_date = date[:2]
if current_date == '01':
current_count_date[0] +=count
elif current_date =='02':
current_count_date[1] += count
elif current_date =='03':
current_count_date[2] += count
elif current_date =='04':
current_count_date[3] += count
elif current_date =='05':
current_count_date[4] += count
elif current_date =='06':
current_count_date[5] += count
elif current_date =='07':
current_count_date[6] += count
elif current_date =='08':
current_count_date[7] += count
elif current_date =='09':
current_count_date[8] += count
elif current_date =='10':
current_count_date[9] += count
elif current_date =='11':
current_count_date[10] += count
else:
current_count_date[11] += count
else:
if current_word:
# write result to STDOUT
print("%s\t%s" % (current_word, current_count_date))
current_count = count
current_word = word
current_date = date[:2]
# do not forget to output the last word if needed!
if current_word == word:
print("%s\t%s " % (current_word, current_count_date))
|
class Bayer():
"""Class for generating a Bayer matrix used as threshold map
Attributes
----------
map : list[list[float]]
The bayer matrix with values normalized to the [0,1] range"""
def __init__(self,depth):
self.width = 2**(depth+1)
self.height = self.width
self.type = 'ordered'
self.depth = depth
self.map = self.genBayerMatrix(self.depth)
self.normalizeBayerMatrix()
def genBayerMatrix(self,depth):
if depth == 0: # Base un-normalized bayer matrix
return [[0,2],[3,1]]
else:
prevBMatrix = self.genBayerMatrix(depth-1)
bquadrant = [] # Assemble the matrix by quadrants
for line in prevBMatrix:
lHalf, rHalf = [],[]
for num in line:
finalNum = num*4
lHalf.append(finalNum)
rHalf.append(finalNum+2)
bquadrant.append(lHalf+rHalf)
lineQty = len(bquadrant)
numQty = len(bquadrant[0])//2
for linePos in range(lineQty):
curLine = []
for num in bquadrant[linePos][:numQty]:
curLine.append(num+3)
for num in bquadrant[linePos][numQty:]:
curLine.append(num-1)
bquadrant.append(curLine)
linePos += 1
return bquadrant
def normalizeBayerMatrix(self):
"""Normalize the values to the [0,1] to match the image brightness range"""
coeff = 4**(self.depth+1)
for i in range(self.height):
for j in range(self.width):
self.map[i][j] /= coeff |
from sklearn.neural_network import MLPClassifier
from sklearn.utils import shuffle
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
dataset = pd.read_csv("final11.csv")
dataset=dataset.values
np.random.shuffle(dataset)
x=dataset[:,:-1]
y=dataset[:,-1]
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.33, random_state=42)
model=MLPClassifier(activation='relu', alpha=0.001,epsilon=1e-08,hidden_layer_sizes=(200,),learning_rate_init=0.001, max_iter=500000,solver='sgd', tol=0.0001, validation_fraction=0.1)
model.fit(xtrain,ytrain)
#MLPClassifier(activation='relu', alpha=0.0001, batch_size='auto', beta_1=0.9,beta_2=0.999, early_stopping=False, epsilon=1e-08,hidden_layer_sizes=(15,), learning_rate='constant',learning_rate_init=0.001, max_iter=2000, momentum=0.9,nesterovs_momentum=True, power_t=0.5, random_state=None,shuffle=True, solver='adam', tol=0.0001, validation_fraction=0.1,verbose=False, warm_start=False)
test_accuracy = model.score(xtest,ytest)
print(test_accuracy)
train_accuracy = model.score(xtrain,ytrain)
print(train_accuracy)
#print(model.predict([[4.333333,1.039,3.333333,2.888888,2.533333,0.5873,3.1111,4.416666]]))
#print(model.predict([[8.66666,1.4411,10.0,8.0,5.6,0.66666,5.5,5.3333]])) #
print(model.predict([[5.5,1.9354,3.0,7.0,2.6,0.3571,3.0,3.25]]))
|
import json
data = open('data.txt','r')
text = data.read()
beg=0
total_len = len(text)
data_obj = {"airports":[]}
while beg<total_len:
beg_name = text.find('name,',beg,total_len)
if beg_name == -1:
break
else:
beg = beg_name + 5
end_name = text.find(',1,description',beg_name,total_len)
if end_name == -1:
break
else:
name = text[beg:end_name]
end_description = text.find(',1,,,,',end_name,total_len)
description = text[end_name+15:end_description]
data_obj["airports"].append({
"name":name,
"description":description,
"city":"",
"country":""
})
with open('data.json', 'w') as outfile:
json.dump(data_obj, outfile, indent=4, sort_keys=True, separators=(',', ':'))
|
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.models import TimeStampedModel
from nba_py.constants import CURRENT_SEASON
from seasons.models import PlayerSeason
class Player(TimeStampedModel, models.Model):
first_name = models.CharField(
verbose_name=_("first name"),
max_length=64,
)
last_name = models.CharField(
verbose_name=_("last name"),
max_length=64,
)
photo = models.ImageField(
verbose_name=_("photo"),
upload_to='players_photos',
null=True,
)
# NBA API
PERSON_ID = models.PositiveIntegerField(
verbose_name="PERSON_ID",
unique=True,
)
PLAYERCODE = models.CharField(
verbose_name="PLAYERCODE",
max_length=128,
unique=True,
)
class Meta:
verbose_name = _("player")
verbose_name_plural = _("players")
ordering = ['last_name', 'first_name']
@cached_property
def get_full_name(self):
"""Return player name."""
return ' '.join([self.first_name, self.last_name])
def get_current_season(self):
"""Return player's current season"""
return PlayerSeason.objects.get(player=self, season=CURRENT_SEASON)
def __str__(self):
return '{0.get_full_name} ({0.PERSON_ID})'.format(self)
|
import re
class Validation:
def __init__(self):
pass
def input_str_for_create(self, data):
result = False
a = ''
while result is not True:
a = input(data)
result = Validation.validate_str(self, a) and Validation.validate_empty_string(self, a)
return a
def validate_empty_string(self, input_str):
if input_str is '':
return False
return True
def validate_str(self, input_str):
for i in input_str:
if i.isalpha() is False:
if i is ' ':
continue
return False
return True
def input_int_for_create(self, data):
result = False
a = ''
while result is not True:
a = input(data)
result = Validation.validate_int(self, a) and Validation.validate_empty_string(self, a)
return a
def validate_int(self, input_str):
try:
for i in input_str:
x = int(i)
if x in range(10):
pass
return True
except:
return False
def validate_email(self, input_str):
result = False
a = ''
while result is not True:
a = input(input_str)
result = Validation.validate_empty_string(self, a) and Validation.validate_email_req(self, a)
return a
def validate_email_req(self, input_str):
try:
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if re.search(regex, input_str):
return True
else:
return False
except:
return False
def input_not_null_pass(self, data):
result = False
a = ''
while result is not True:
a = input(data)
result = Validation.validate_empty_string(self, a) and Validation.validate_pass(self, a)
return a
def validate_pass(self, data):
try:
reg = "^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!#%*?&]{6,20}$"
pat = re.compile(reg)
mat = re.search(pat, data)
if mat:
return True
else:
return False
except:
return False
|
#!/usr/bin/env python
import sys
def read_conllx(input):
sentences = []
sentence = []
for line in input:
line = line.strip()
if line:
sentence.append(line.split('\t'))
elif sentence:
sentences.append(sentence)
sentence = []
if sentence:
sentences.append(sentence)
return sentences
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage: %s <gold.conll> <guess.conll>" % (sys.argv[0])
exit(1)
gold = [line for line in open(sys.argv[1])]
guess = [line for line in open(sys.argv[2])]
assert len(gold) == len(guess)
correct = 0
total = 0
for t1, t2 in zip(gold, guess):
if t1.strip():
if t1.split('\t')[4] == t2.split('\t')[4]:
correct += 1
total += 1
print "accuracy: %s" % (float(correct) / total)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from mpl_toolkits.mplot3d import Axes3D
dirty = pd.read_csv(r"C:\Users\chias\source\repos\FFM-MA\oil.csv")
#data = dirty.loc[:, ['growth', 'inflation', 'return', 'type']]
x = StandardScaler().fit_transform(dirty)
dbscan = DBSCAN(eps=0.3, min_samples = 2)
model = dbscan.fit(x)
labels = model.labels_
print(labels)
core_samples = np.zeros_like(labels, dtype=bool)
core_samples[model.core_sample_indices_] = True
num_clusters = len(set(labels)) - (1 if -1 in labels else 0)
print(num_clusters)
xx, yy, zz, aa, bb = zip(*x)
#xx = np.arange(-3,3, 0.25)
#yy = np.arange(-3,3, 0.25)
#zz = np.arange(-3,3, 0.25)
#aa = np.arange(-3,3, 0.25)
#bb = np.arange(-3,3, 0.25)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xx,yy,zz, c=bb, cmap=plt.hot())
plt.show()
show()
#noNoise = list(labels).count(-1)
#print('Est No. of Clusters: %d'% noClusters)
#print('Est No. of Noise: %d'% noNoise)
#print('Homogeneity: %0.3f'% metrics.homogenity_score(labels_)
|
import threading
import time
import uuid
import hdbfs
class Cacheable:
def __init__( self, item_id ):
self.item_id = item_id
self.access_time = time.time()
def get_id( self ):
return self.item_id
def touch( self ):
self.access_time = time.time()
def flush( self ):
pass
def is_expired( self ):
return time.time() > self.access_time + 3600
class CacheSet:
def __init__( self ):
self.items = {}
def register( self, item ):
self.items[item.get_id()] = item
def get( self, item_id ):
item = self.items[item_id]
item.touch()
return item
def remove( self, item_id ):
del self.items[item_id]
def flush( self ):
for item in self.items.values():
item.flush()
for item in [item for item in self.items.values()
if( item.is_expired() ) ]:
del self.items[item.get_id()]
class Selection( Cacheable ):
def __init__( self, results ):
Cacheable.__init__( self, uuid.uuid4().hex )
if( isinstance( results, list ) ):
self.loaded = results
else:
self.loaded = []
self.preload( results.__iter__() )
def preload( self, results ):
i = 0
try:
self.loaded = [ 0 ] * 10000
for i in range( 10000 ):
self.loaded[i] = results.next().get_id()
except StopIteration:
self.loaded = self.loaded[:i]
def __len__( self ):
return len( self.loaded )
def __getitem__( self, idx ):
assert( isinstance( idx, int ) )
if( idx < 0 or idx >= len( self.loaded ) ):
raise IndexError
return self.loaded[idx]
class Session( Cacheable ):
def __init__( self, session_id ):
Cacheable.__init__( self, session_id )
self.selections = CacheSet()
def flush( self ):
self.selections.flush()
def register_selection( self, selection ):
sel = Selection( selection )
self.selections.register( sel )
return sel
def fetch_selection( self, sel_id ):
return self.selections.get( sel_id )
def close_selection( self, sel_id ):
self.selections.remove( sel_id )
class SessionCache:
def __init__( self ):
self.lock = threading.Lock()
self.sessions = CacheSet()
def flush( self ):
with self.lock:
self.sessions.flush()
def drop( self, session_id ):
with self.lock:
try:
self.sessions.remove( session_id )
except KeyError:
pass
def register_selection( self, session_id, selection ):
with self.lock:
try:
session = self.sessions.get( session_id )
except KeyError:
session = Session( session_id )
self.sessions.register( session )
return session.register_selection( selection )
def fetch_selection( self, session_id, selection_id ):
with self.lock:
try:
session = self.sessions.get( session_id )
except KeyError:
return None
try:
return session.fetch_selection( selection_id )
except KeyError:
return None
def close_selection( self, session_id, selection_id ):
with self.lock:
try:
session = self.sessions.get( session_id )
except KeyError:
return
try:
session.close_selection( selection_id )
except KeyError:
return
default_cache = None
def get_default_cache():
global default_cache
if( default_cache is None ):
default_cache = SessionCache()
return default_cache
|
"""Utility functions for translation of Go packages into Github repositories."""
import re
from requests_html import HTMLSession
GITHUB_REPO_RE = re.compile(r"github.com/(?P<user>[a-zA-Z0-9][ A-Za-z0-9_-]*)"
r"/(?P<repo>[a-zA-Z0-9][ A-Za-z0-9_-]*)")
from typing import Union
assert Union
def get_repo_from_random_urn(urn):
# type: (str) -> Union[str, None]
"""
Translate Go package urn into Github repo.
In case the repository is not directly from GitHub, it is possible that there is a way to
translate the urn into Github repository using some magic from "go get". So just try it.
"""
session = HTMLSession()
resp = session.get('https://' + urn + '?go-get=1')
# Check that the request was successful
if resp.status_code == 200:
# Find all 'meta' elements, in case there is not any, it will just return empty list
meta_elements = resp.html.find('meta')
def _is_go_import_element(e):
try:
return e.attrs['name'] == 'go-import'
except KeyError:
return False
# Filter the list, if it was empty from the beginning, no harm done
go_import_elements = list(filter(_is_go_import_element, meta_elements))
if len(go_import_elements) > 0:
# Finally search for github repository
try:
m = GITHUB_REPO_RE.search(go_import_elements[0].attrs['content'])
if m is not None:
return '{}/{}'.format(m.group('user'), m.group('repo'))
except KeyError:
pass
return None
def translate(pkg):
# type: (str) -> Union[str, None]
"""
Take whatever `go get` takes and return string with organization/user and repository name.
It returns none for anything that is not available on Github.
"""
if pkg == "":
return None
# Test if the pkg is directly from github
m = GITHUB_REPO_RE.match(pkg)
if m is not None:
return '{}/{}'.format(m.group('user'), m.group('repo'))
# Test if the pkg name can be translated into Github repo
repo = get_repo_from_random_urn(pkg)
if repo is not None:
return repo
return None
|
import pymysql
from Django_shop.settings import *
from django.http import HttpResponse
class SalesDetail:
def __init__(self):
self.serialnum = "" # 流水单号
self.totalnum = 0 # 商品总数
self.totalmoney = 0.0 # 商品总金额
self.username = "" # 用户名
self.buytime = "" # 购买时间
self.detail_list = [] # 购买的明细
def get_serial_info(self):
"""
获取当前流水单号的信息
:return:
"""
# =========去数据库读取模块信息======
# 实例化mysqL连接
mysql_db = pymysql.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
# 创建指针
cursor = mysql_db.cursor()
# 准备SQL语句
sql = "Select SerialNumber, TotalNumber,TotalPrice,UserName,BuyTime " \
"from SalesList As T1 Left Outer Join Login As T2 on T1.LoginId = T2.LoginId " \
"where SerialNumber = '%s'" % (self.serialnum)
try:
cursor.execute(sql)
serial_info = cursor.fetchone() # ((),(),(),())
# 通过结果赋值
self.totalnum = serial_info[1]
self.totalmoney = serial_info[2]
self.username = serial_info[3]
self.buytime = serial_info[4]
except Exception as e:
return HttpResponse("读取数据库数据出现异常,具体原因:" + str(e))
finally:
mysql_db.close()
def get_detail_info(self):
# 实例化mysqL连接
mysql_db = pymysql.connect(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME)
# 创建指针
cursor = mysql_db.cursor()
# 准备SQL语句
sql = "Select SerialNumber,ProductId,ProductName,Unit,UnitPrice,Number,Money " \
"from SalesListDetail where SerialNumber= '%s'" %(self.serialnum)
try:
cursor.execute(sql)
self.detail_list = cursor.fetchall() # ((),(),(),())
# 通过结果赋值
except Exception as e:
return HttpResponse("读取数据库数据出现异常,具体原因:" + str(e))
finally:
mysql_db.close()
|
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django import forms
from .models import Question, Answer
class AskForm(forms.Form):
title = forms.CharField(max_length=255)
text = forms.CharField(widget=forms.Textarea)
def __init__(self, *args, **kwargs):
super(AskForm, self).__init__(*args, **kwargs)
def clean(self):
try:
User.objects.get(username=self._user)
except User.DoesNotExist:
raise forms.ValidationError(u'Access is limited!')
self.cleaned_data = super(AskForm, self).clean()
title = self.cleaned_data['title']
text = self.cleaned_data['text']
if not title or not text:
raise forms.ValidationError(u'Validation Error!')
return self.cleaned_data
def save(self):
self.cleaned_data['author'] = self._user
question = Question(**self.cleaned_data)
question.save()
return question
class AnswerForm(forms.Form):
text = forms.CharField(widget=forms.Textarea)
question = forms.IntegerField()
def __init__(self, *args, **kwargs):
super(AnswerForm, self).__init__(*args, **kwargs)
def clean(self):
text = self.cleaned_data['text']
if 'spam' in text:
raise forms.ValidationError(u'Validation Error!')
try:
User.objects.get(username=self._user)
except User.DoesNotExist:
raise forms.ValidationError(u'Access is limited!')
return self.cleaned_data
def save(self):
self.cleaned_data['author'] = self._user
self.cleaned_data['question'] = get_object_or_404(
Question,
pk=self.cleaned_data['question']
)
answer = Answer(**self.cleaned_data)
answer.save()
return answer
class SignupForm(forms.Form):
username = forms.CharField(max_length=255)
email = forms.EmailField()
password = forms.CharField(max_length=255)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data['username']
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not username:
raise forms.ValidationError(u'Please enter a username')
if not email:
raise forms.ValidationError(u'Please enter a email')
if not password:
raise forms.ValidationError(u'Please enter a password')
return self.cleaned_data
def save(self):
username = self.cleaned_data['username']
email = self.cleaned_data['email']
password = self.cleaned_data['password']
# try:
# User.objects.get(username=username)
# except User.DoesNotExist:
# raise forms.ValidationError(u'User is in database')
try:
user = User.objects.create(username=username,
email=email,
password=password)
except User.DoesNotExist:
raise forms.ValidationError(u'Ooops...')
self.cleaned_data['user'] = user
return self.cleaned_data
class LoginForm(forms.Form):
username = forms.CharField(max_length=255)
password = forms.CharField(max_length=255)
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data['username']
password = self.cleaned_data['password']
if not username:
raise forms.ValidationError(u'Please enter a username')
if not password:
raise forms.ValidationError(u'Please enter a password')
return self.cleaned_data
def save(self):
username = self.cleaned_data['username']
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise forms.ValidationError(u'User does not in database')
self.cleaned_data['user'] = user
return self.cleaned_data
|
import torch
a = torch.tensor([1, 2, 3, 4])
b = torch.tensor([4, 5, 6])
x, y = torch.meshgrid(a, b)
print(x)
print(y)
x = x.reshape(-1)
y = y.reshape(-1)
print(x)
print(y)
z = torch.stack([x, y, x, y], dim=1)
print(z)
print(z.view(-1,1,4)) |
import threading
from config import t_api, w_api
import schedule
import time
import models
def looper():
db = models.DataBase('mydb.db')
schedule.every().day.at('06:00').do(models.BotHandler(t_api, db).send_notes, )#итоговое
#schedule.every(5).seconds.do(models.BotHandler(t_api, db).send_notes, )# для тестировок
while True:
schedule.run_pending()
time.sleep(1)
def main():
x = threading.Thread(target=looper, daemon=True, )
x.start()
commands = ['/help', '/add', '/change', '/remove', '/start', ]
modes = ['/weather', '/translate']
new_offset = None
db = models.DataBase('mydb.db')
weather_bot = models.BotHandler(t_api, db)
while True:
messages = weather_bot.get_updates(new_offset)
if len(messages) > 0:
for message in messages:
message_id = message['message']['chat']['id']
new_offset = message['update_id'] + 1
message_text = message['message']['text']
words_in_message = message_text.split(' ', 1)
command = words_in_message[0]
com_attr = None
if len(words_in_message)>1:
com_attr = words_in_message[1]
if command in commands:
weather_bot.commands(message_id, command, com_attr)
elif command in modes and com_attr is None:
weather_bot.change_mode(message_id, command)
else:
weather_bot.send_info(message_id, message_text.capitalize())
else:
continue
if __name__ == '__main__':
main()
|
nums = [3,5,7,9,10.5]
print(nums)
nums.append('Python')
print(nums)
print(len(nums))
print(" * * *")
print(nums[0])
print(nums[-1])
print(nums[2:5])
nums.remove('Python')
print(nums) |
import pickle
import argparse
def main(args):
with open(args.text_path, 'r') as f:
w = f.readlines()
w_dict = {}
for l in w:
if l.split('#')[0] in list(w_dict.keys()):
w_dict[l.split('#')[0]].append(l.split('#')[1][3:-4])
else:
w_dict[l.split('#')[0]] = [l.split('#')[1][3:-4]]
vocab = {}
for li in w_dict.values():
for sen in li:
for word in sen.split(' '):
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
words = [word for word, count in vocab.items() if count >= args.word_freq]
class Vocabulary(object):
def __init__(self):
self.word2idx = {}
self.id2word = {}
self.idx = 0
def add_word(self, word):
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.id2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx['<unk>']
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
vocab = Vocabulary()
vocab.add_word('<pad>')
vocab.add_word('<start>')
vocab.add_word('<end>')
vocab.add_word('<unk>')
for word in words:
vocab.add_word(word)
vocab_path = args.vocab_path # path to store the vocabulary file
# dump the vocabulary object into a pickle file
with open(vocab_path, 'wb') as f:
pickle.dump(vocab, f)
print("Total vocabulary size: {}".format(len(vocab)))
print("Saved the vocabulary wrapper to '{}'".format(vocab_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--text_path', type=str,
default='data/annotations/captions_train2014.json',
help='path for train annotation file')
parser.add_argument('--vocab_path', type=str, default='./data/vocab.pkl',
help='path for saving vocabulary wrapper')
parser.add_argument('--word_freq', type=int, default=4,
help='minimum word count threshold')
args = parser.parse_args()
main(args) |
from elk.models.host_pool import HostPool
from elk.utils import logger
logging = logger(__name__)
class Database(object):
@classmethod
def all_list(cls):
rs = HostPool.get_all_list()
return [line.dump() for line in rs]
@classmethod
def format_data(cls, data_dict):
'''
input dict
Formatted database instance list details
'''
data = dict()
for k, v in data_dict.items():
if k == "mount_info":
data['mount_info'] = [i.replace(",", " ") for i in v.split("|")]
elif k == "ip_info":
data['ip_info'] = v.split("|")
else:
data[k] = v
return data
@classmethod
def detail_data(cls, uid):
'''
return one dict
'''
if uid:
rs = HostPool.get_all_by_uid(uid)
return cls.format_data([line.dump() for line in rs][0])
else:
return
|
l1=[1,-5,-7,3,9,-4]
l2=[x for x in l1 if x>0]
print(l2)
#square of n numbers
n=int(input("enter the limit"))
a=[]
for i in range(n):
x=int(input("enter the numbers"))
a.append(x)
l2=[x**2 for x in a]
print(l2)
#list of vowels selected from selected word
word="happy new year"
vowels="aeiou"
v=[x for x in word if x in vowels]
print(v)
l3=[ord(x)for x in word]
print(l3)
|
import urllib3
import json
import threading
import time
import argparse
import logging
import sys
import random
import requests
from urllib3 import HTTPConnectionPool
parser = argparse.ArgumentParser()
parser.add_argument("--requests", help="the number of times search request of cluster (default 1)", default=1, required=False)
parser.add_argument("--input_file", help="input file name", required=False, type=str)
parser.add_argument("--output_file", help="output file name (default STDOUT)", required=False, type=str)
args = parser.parse_args()
requests = int(args.requests)
output_file = args.output_file
input_file_nm = args.input_file
input_file = '/app/psuer/performance_test/' + input_file_nm
print(input_file)
date_list = []
time_list = []
trcno_list = []
logger = logging.getLogger("elk")
logger.setLevel(logging.DEBUG)
if output_file != None :
logger.addHandler(logging.FileHandler(output_file))
else:
logger.addHandler(logging.StreamHandler(sys.stdout))
input_f = open(input_file, 'r')
while True:
line = input_f.readline()
if not line:
break
result = line.split()
date_list.append(result[0])
time_list.append(result[1])
trcno_list.append(result[2])
input_f.close()
query = {
"query": {
"bool": {
"must": [
{
"match_phrase": {
"send_dy": "*"
}
},
{
"match_phrase": {
"send_tm": "*"
}
},
{
"match_phrase": {
"trc_no": "*"
}
}
]
}
}
}
encoded_data = json.dumps(query).encode('utf8')
es_connection_pool = HTTPConnectionPool("172.22.235.69", port=9200, maxsize=100)
headers = urllib3.make_headers(basic_auth='elastic:Xjaqmffj12#')
headers['Content-Type'] = 'application/json'
took_data = []
total_took = 0
for i in range(0,requests) :
tmp = random.randint(0,len(trcno_list)-1)
query["query"]["bool"]["must"][0]["match_phrase"]["send_dy"] = date_list[tmp]
query["query"]["bool"]["must"][1]["match_phrase"]["send_tm"] = time_list[tmp]
query["query"]["bool"]["must"][2]["match_phrase"]["trc_no"] = trcno_list[tmp]
response = es_connection_pool.request(
'GET',
'/_search',
body=encoded_data,
headers=headers
)
search_response_data = json.loads(response.data)
print(search_response_data)
took_data.append(search_response_data['took'])
total_took += search_response_data['took']
time.sleep(1)
logger.info("== RESULT ==")
logger.info("total took time : %d ms\n" % (total_took))
logger.info("average took time : %d ms\n" % (total_took/len(took_data)))
logger.info("max took time : %d ms\n" % ( max(took_data)))
logger.info("min took time : %d ms\n\n" % (min(took_data))) |
#!/usr/bin/python2.7
#-*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import math
plt.rcParams["legend.fontsize"]=30
plt.rcParams["font.size"]=27
plt.figure(figsize=(15,12),dpi=200)
rc('font',**{'family':'serif','serif':['Times']})
rc('text', usetex=True)
data5 = np.loadtxt('numdata1800.dat')
x5 = data5[:,0] * math.sqrt(9.81/0.4)
y5 = data5[:,1] / (9.81*1000*0.8)
plot5=plt.plot(x5,y5,'-.b',linewidth=2,label=r'$\Delta l=1.33\times 10^{-2}m$')
# data4 = np.loadtxt('numdata3200.dat')
# x4 = data4[:,0] * math.sqrt(9.81/0.4)
# y4 = data4[:,1] / (9.81*1000*0.8)
# plot4=plt.plot(x4,y4,'--g',label='Case2')
# data3 = np.loadtxt('numdata4050.dat')
# x3 = data3[:,0] * math.sqrt(9.81/0.4)
# y3 = data3[:,1] / (9.81*1000*0.8)
# plot3=plt.plot(x3,y3,'--r',linewidth=2,label=r'$\Delta l=8.89\times 10^{-3}$')
data1 = np.loadtxt('numdata7200.dat')
x1 = data1[::15,0] * math.sqrt(9.81/0.4)
y1 = data1[::15,1] / (9.81*1000*0.8)
plot1=plt.plot(x1,y1,'--r',linewidth=2,label=r'$\Delta l=6.67\times 10^{-3}m$')
data1 = np.loadtxt('numdata28800.dat')
x1 = data1[::30,0] * math.sqrt(9.81/0.4)
y1 = data1[::30,1] / (9.81*1000*0.8)
plot1=plt.plot(x1,y1,'-k',linewidth=2,label=r'$\Delta l=3.33\times 10^{-3}m$')
# data2 = np.loadtxt('dambreak-PNU-MPS.txt')
# x2 = data2[:,0] * math.sqrt(9.81/0.4)
# y2 = data2[:,1] / (9.81*1000*0.8)
# plot2=plt.plot(x2,y2,'^r',markersize=8,label="Lee et al.[14]")
plt.xlim(0,15)
#plt.xlim(5.5,7.3)
plt.ylim(-0.1,3.6)
#plt.ylim(0.2,1.8)
plt.xlabel ('t$\sqrt{g/H}$',fontsize=30)
#plt.xlabel ('t/s',fontsize=15)
plt.ylabel (r'P/($\rho gH)$',fontsize=30)
#plt.legend([plot1,plot2],'best', numpoints=1)
l=plt.legend(numpoints=1,loc=1)
l.get_frame().set_edgecolor('k')
#plt.show()
#plt.savefig("dambreak-pressure-result.tif")
#plt.savefig("dambreak-pressure-convergence.tif")
#plt.savefig("pressure-db2.eps")
plt.savefig("p-convergence-spacing.eps")
|
'''
Clone a database experiment and its corresponding files.
Filter the results during the process:
- the original db might be "raw"/"unfiltered",
but later processing (tracking) is much more stable/
efficient/accurate with preprocessed data.
'''
import config
import numpy as np
import os
from os.path import isdir, isfile, join
import traceback
from uuid import uuid4, UUID
from lib.Database import Database
async def main(args):
await clone_experiment(args[0],args[1])
def particleFilter(p):
if p["area"] < 100:
return True
if p["area"] > 2000:
return True
async def clone_experiment(experiment_uuid, method, testing=False):
try:
db = Database()
tx1, transaction = await db.transaction()
txMessage = "Transaction 1"
dbFrames = []
osFrames = []
frame_uuid_map = {}
dbTracks = []
osTracks = []
trackInserts = []
track_uuid_map = {}
segment_uuid_map = {}
segment_insert = []
frame_segment_map = {}
particleInserts = []
particle_uuid_map = {}
new_experiment_uuid = uuid4()
experiment_path = join(config.experiment_dir, str(experiment_uuid))
base_files = [file for file in os.listdir(experiment_path) if isfile(join(experiment_path, file))]
s = """
SELECT frame
FROM frame
WHERE experiment = '{experiment}'
"""
q = s.format(experiment=experiment_uuid)
async for row in tx1.cursor(q):
dbFrames.append(str(row['frame']))
osFrames = [frame for frame in os.listdir(experiment_path) if isdir(join(experiment_path, frame))]
frame_uuid_map = {UUID(f): uuid4() for f in dbFrames}
s = """
SELECT t.frame as frame, t.particle as particle, track
FROM track t, frame f
WHERE t.frame = f.frame
AND f.experiment = '{experiment}'
"""
q = s.format(experiment=experiment_uuid)
dbTracks = []
async for row in tx1.cursor(q):
dbTracks.append(str(row['track']))
# osTracks: (oldFrame, (oldTrack, oldTrackExtension))
osTracks = [(frame, os.path.splitext(track)) for frame in osFrames for track in os.listdir(join(experiment_path, frame)) if len(track) == 40]
track_uuid_map = {UUID(track): uuid4() for track in dbTracks}
new_experiment_path = join(config.experiment_dir, str(new_experiment_uuid))
if not testing:
os.mkdir(new_experiment_path)
await tx1.execute("""
INSERT INTO Experiment (experiment, day, name, method, notes)
SELECT $1, day, name, $3, notes FROM Experiment
WHERE experiment = $2
""", new_experiment_uuid, experiment_uuid, method)
if not testing:
for file in base_files:
os.link(join(experiment_path, file), join(new_experiment_path, file))
for old_frame_uuid, new_frame_uuid in frame_uuid_map.items():
os.mkdir(join(new_experiment_path, str(new_frame_uuid)))
# Clone db segments
segment_uuid_map = {}
segment_insert = []
async for s in tx1.cursor("SELECT segment, number FROM Segment WHERE experiment = $1", experiment_uuid):
segment_uuid = uuid4()
segment_uuid_map[s["segment"]] = {"segment": segment_uuid, "number": s["number"]}
segment_insert.append((segment_uuid, new_experiment_uuid, s["number"]))
await tx1.executemany("INSERT INTO Segment (segment, experiment, number) VALUES ($1, $2, $3)", segment_insert)
frame_segment_map = {}
async for f in tx1.cursor("select frame, segment From Frame WHERE experiment = $1", experiment_uuid):
frame_segment_map[f["frame"]] = segment_uuid_map[f["segment"]]["segment"]
# Clone db frames
await tx1.executemany("""
INSERT INTO Frame (frame, experiment, segment, number)
SELECT $1, $2, $3, number FROM Frame
WHERE frame = $4
""", [(frame_uuid_map[UUID(frame)], new_experiment_uuid, frame_segment_map[UUID(frame)], UUID(frame)) for frame in dbFrames])
# Clone db particles
print("Inserting particles")
s = """
SELECT *
FROM Particle
WHERE experiment = '{exp}'
"""
q = s.format(exp=experiment_uuid)
particles_inserted = set()
async for p in tx1.cursor(q):
newParticleUUID = str(uuid4())
# Filter by particle properties
if particleFilter(p):
continue
particles_inserted.add(p["particle"])
particle_uuid_map[p["particle"]] = newParticleUUID
particleInserts.append((newParticleUUID, new_experiment_uuid, p["area"], p["intensity"], p["perimeter"], p["radius"], p["category"], p["valid"]))
s = """
INSERT INTO Particle (particle, experiment, area, intensity, perimeter, radius, category, valid)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
"""
await tx1.executemany(s, particleInserts)
# await transaction.commit()
# Workaround until the table is changed to FK constraint deferrable
# tx2, transaction = await db.transaction()
txMessage = "Transaction 2"
# Clone db tracks
s = """
SELECT *
FROM Track t, Frame f
WHERE t.frame = f.frame
AND f.experiment = '{exp}'
"""
q = s.format(exp=experiment_uuid)
# tracks_inserted = set()
old_tracks_inserted = set()
async for t in tx1.cursor(q):
# Check if it was filtered during particle insertion
if t["particle"] not in particles_inserted:
continue
# tracks_inserted.add(track_uuid_map[t["track"]])
old_tracks_inserted.add(str(t["track"]))
trackInserts.append((track_uuid_map[t["track"]], frame_uuid_map[t["frame"]], particle_uuid_map[t["particle"]], t["location"], t["bbox"], t["latent"]))
print("Inserting tracks")
s = """
INSERT INTO Track (track, frame, particle, location, bbox, latent)
VALUES ($1, $2, $3, $4, $5, $6)
"""
await tx1.executemany(s, trackInserts)
count = 0
if not testing:
for track in osTracks:
# Make sure the track wasn't filtered...
if track[1][0] not in old_tracks_inserted:
continue
count += 1
from shutil import copyfile
copyfile(join(experiment_path, track[0], "".join(track[1])),
join(new_experiment_path,
str(frame_uuid_map[UUID(track[0])]),
str(track_uuid_map[UUID(track[1][0])]) + track[1][1]))
print("Copied "+str(count)+" crop files")
await transaction.commit()
print("Success! "+ str(new_experiment_uuid))
except Exception as e:
print(e)
print("Error during " + txMessage)
print("Check db for experiment "+ str(new_experiment_uuid))
traceback.print_exc()
await transaction.rollback() |
#!/usr/bin/env python3
import sys, os
import argparse
import math
import numpy as np
import logging;
# Add a progress bar
from progress.bar import IncrementalBar
import matplotlib.pyplot as plt
from skimage.io import imread, imsave
from EvolutionaryAlgorithm import *
# Selection operators
from TournamentSelection import *
from RouletteWheelSelection import *
from RankSelection import *
from ThresholdSelection import *
# Genetic operators
from ElitismOperator import *
from BlendCrossoverOperator import *
from GaussianMutationOperator import *
from NewBloodOperator import *
from TomographyGlobalFitness import TomographyGlobalFitness
import ImageMetrics as IM;
import matplotlib
#matplotlib.use('PS')
matplotlib.use('QT5Agg')
NoneType = type(None);
# Check the command line arguments
def checkCommandLineArguments():
global logging;
global args;
parser = argparse.ArgumentParser(description='Evolutionary reconstruction.')
parser.add_argument('--input', help='Input image (groundtruth)', nargs=1, type=str, required=True);
parser.add_argument('--output', help='Reconstructed image', nargs=1, type=str, required=False);
parser.add_argument('--save_input_images', help='Where to save the input images (groundtruth with and without noise, and the sinogram)', nargs=1, type=str, required=False);
parser.add_argument('--angles', help='Number of angles', nargs=1, type=int, required=True);
parser.add_argument('--peak', help='Peak value for the Poisson noise', nargs=1, type=float, required=False);
parser.add_argument('--selection', help='Selection operator (ranking, roulette, tournament or dual)', nargs=1, type=str, required=True);
parser.add_argument('--pop_size', help='Size of the population', nargs=1, type=int, required=True);
parser.add_argument('--number_of_emission_points', help='Number of emission points', nargs=1, type=int, required=False);
parser.add_argument('--tournament_size', help='Number of individuals involved in the tournament', nargs=1, type=int, required=False, default=2);
parser.add_argument('--generations', help='Number of generations', nargs=1, type=int, required=True);
parser.add_argument('--visualisation', help='Realtime visualisation', action="store_true");
parser.add_argument('--max_stagnation_counter', help='Max value of the stagnation counter to trigger a mitosis', nargs=1, type=int, required=True);
parser.add_argument('--initial_lambda', help='Weight of the TV-norm regularisation at the start of the optimisation', nargs=1, type=float, required=True);
parser.add_argument('--final_lambda', help='Weight of the TV-norm regularisation at the end of the optimisation', nargs=1, type=float, required=True);
parser.add_argument('--initial_mutation_variance', help='Mutation variance at the start of the optimisation', nargs=1, type=float, required=True);
parser.add_argument('--final_mutation_variance', help='Mutation variance at the end of the optimisation', nargs=1, type=float, required=True);
parser.add_argument('--logging', help='File name of the log file', nargs=1, type=str, required=False);
parser.add_argument('--objective', help='Objective function: Valid values are: MAE, MSE, RMSE, NRMSE_euclidean, NRMSE_mean, NRMSE_min_max, cosine_similarity, mean_relative_error, max_relative_error, SSIM, PSNR, or ZNCC', nargs=1, type=str, required=True);
args = parser.parse_args();
# Set the logger if needed
if not isinstance(args.logging, NoneType):
logging.basicConfig(filename=args.logging[0],
level=logging.DEBUG,
filemode='w',
format='%(asctime)s, %(name)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logging.debug(args)
if not isinstance(args.objective, NoneType):
if args.objective[0] not in IM.MINIMISATION and args.objective[0] not in IM.MAXIMISATION:
raise ValueError('Argument --objective "%s" is not valid.' % args.objective[0])
return args;
class MyBar(IncrementalBar):
suffix = '%(index)d/%(max)d - %(percent).1f%% - %(eta)ds - Global fitness %(global_fitness)d - RMSE %(RMSE)d - TV %(TV)d - ZNCC %(zncc).1f%%'
@property
def global_fitness(self):
global global_fitness_function;
return global_fitness_function.global_fitness_set[-1]
@property
def RMSE(self):
global global_fitness_function;
return global_fitness_function.global_error_term_set[-1]
@property
def TV(self):
global global_fitness_function;
return global_fitness_function.global_regularisation_term_set[-1]
@property
def zncc(self):
global global_fitness_function;
return global_fitness_function.zncc_set[-1] * 100;
def linearInterpolation(start, end, i, j):
return start + (end - start) * (1 - (j - i) / j);
g_first_log = True;
g_log_event = "";
g_generation = 0;
def logStatistics(aNumberOfIndividuals):
global global_fitness_function;
global g_first_log;
global g_log_event;
global g_generation;
global optimiser;
if not isinstance(args.logging, NoneType):
if g_first_log:
g_first_log = False;
logging.info("generation,new_individual_counter,event,number_of_emission_points,MAE_sinogram,MSE_sinogram,RMSE_sinogram,NRMSE_euclidean_sinogram,NRMSE_mean_sinogram,NRMSE_min_max_sinogram,cosine_similarity_sinogram,SSIM_sinogram,PSNR_sinogram,ZNCC_sinogram,TV_sinogram,MAE_reconstruction,MSE_reconstruction,RMSE_reconstruction,NRMSE_euclidean_reconstruction,NRMSE_mean_reconstruction,NRMSE_min_max_reconstruction,cosine_similarity_reconstruction,SSIM_reconstruction,PSNR_reconstruction,ZNCC_reconstruction,TV_reconstruction");
ref = global_fitness_function.projections;
test = global_fitness_function.population_sinogram_data;
MAE_sinogram = IM.getMAE(ref, test);
MSE_sinogram = IM.getMSE(ref, test);
RMSE_sinogram = IM.getRMSE(ref, test);
NRMSE_euclidean_sinogram = IM.getNRMSE_euclidean(ref, test);
NRMSE_mean_sinogram = IM.getNRMSE_mean(ref, test);
NRMSE_min_max_sinogram = IM.getNRMSE_minMax(ref, test);
cosine_similarity_sinogram = IM.getCosineSimilarity(ref, test);
#mean_relative_error_sinogram = IM.getMeanRelativeError(ref, test);
#max_relative_error_sinogram = IM.getMaxRelativeError(ref, test);
SSIM_sinogram = IM.getSSIM(ref, test);
PSNR_sinogram = IM.getPSNR(ref, test);
ZNCC_sinogram = IM.getNCC(ref, test);
TV_sinogram = IM.getTV(test);
ref = global_fitness_function.image;
test = global_fitness_function.population_image_data;
MAE_reconstruction = IM.getMAE(ref, test);
MSE_reconstruction = IM.getMSE(ref, test);
RMSE_reconstruction = IM.getRMSE(ref, test);
NRMSE_euclidean_reconstruction = IM.getNRMSE_euclidean(ref, test);
NRMSE_mean_reconstruction = IM.getNRMSE_mean(ref, test);
NRMSE_min_max_reconstruction = IM.getNRMSE_minMax(ref, test);
cosine_similarity_reconstruction = IM.getCosineSimilarity(ref, test);
#mean_relative_error_reconstruction = IM.getMeanRelativeError(ref, test);
#max_relative_error_reconstruction = IM.getMaxRelativeError(ref, test);
SSIM_reconstruction = IM.getSSIM(ref, test);
PSNR_reconstruction = IM.getPSNR(ref, test);
ZNCC_reconstruction = IM.getNCC(ref, test);
TV_reconstruction = IM.getTV(test);
#logging.info("%i,%s,%i,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f" % (g_generation,g_log_event,MAE_sinogram,MSE_sinogram,RMSE_sinogram,NRMSE_euclidean_sinogram,NRMSE_mean_sinogram,NRMSE_min_max_sinogram,cosine_similarity_sinogram,mean_relative_error_sinogram,max_relative_error_sinogram,SSIM_sinogram,PSNR_sinogram,ZNCC_sinogram,TV_sinogram,MAE_reconstruction,MSE_reconstruction,RMSE_reconstruction,NRMSE_euclidean_reconstruction,NRMSE_mean_reconstruction,NRMSE_min_max_reconstruction,cosine_similarity_reconstruction,mean_relative_error_reconstruction,max_relative_error_reconstruction,SSIM_reconstruction,PSNR_reconstruction,ZNCC_reconstruction,TV_reconstruction));
logging.info("%i,%i,%s,%i,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f" % (g_generation,optimiser.number_created_children,g_log_event,aNumberOfIndividuals,MAE_sinogram,MSE_sinogram,RMSE_sinogram,NRMSE_euclidean_sinogram,NRMSE_mean_sinogram,NRMSE_min_max_sinogram,cosine_similarity_sinogram,SSIM_sinogram,PSNR_sinogram,ZNCC_sinogram,TV_sinogram,MAE_reconstruction,MSE_reconstruction,RMSE_reconstruction,NRMSE_euclidean_reconstruction,NRMSE_mean_reconstruction,NRMSE_min_max_reconstruction,cosine_similarity_reconstruction,SSIM_reconstruction,PSNR_reconstruction,ZNCC_reconstruction,TV_reconstruction));
g_log_event="";
args = None;
try:
args = checkCommandLineArguments()
# Create test problem
number_of_angles = args.angles[0];
peak_value = -1;
if not isinstance(args.peak, NoneType):
peak_value = args.peak[0];
k = args.initial_lambda[0];
global_fitness_function = TomographyGlobalFitness(args.input[0],
args.objective[0],
args.number_of_emission_points[0],
number_of_angles,
peak_value,
k);
global_fitness_function.save_best_solution = True;
if not isinstance(args.save_input_images, NoneType):
global_fitness_function.saveInputImages(args.save_input_images[0]);
# Parameters for EA
number_of_individuals = args.pop_size[0];
number_of_generation = args.generations[0];
# Log messages
if not isinstance(args.logging, NoneType):
logging.debug("Number of angles: %i", number_of_angles)
logging.debug("Peak value for the Poisson noise: %f", peak_value)
logging.debug("Number of individuals: %i", number_of_individuals)
logging.debug("Number of generations: %i", number_of_generation)
# Create the optimiser
optimiser = EvolutionaryAlgorithm(global_fitness_function,
number_of_individuals);
# Default tournament size
tournament_size = 2;
# The tournament size is always two for dual
if args.selection[0] == "dual":
tournament_size = 2;
# Update the tournament size if needed
elif not isinstance(args.tournament_size, NoneType):
if isinstance(args.tournament_size, int):
tournament_size = args.tournament_size;
else:
tournament_size = args.tournament_size[0];
# Set the selection operator
if args.selection[0] == "dual" or args.selection[0] == "tournament":
optimiser.setSelectionOperator(TournamentSelection(tournament_size));
elif args.selection[0] == "ranking":
optimiser.setSelectionOperator(RankSelection());
elif args.selection[0] == "roulette":
optimiser.setSelectionOperator(RouletteWheelSelection());
else:
raise ValueError('Invalid selection operator "%s". Choose "threshold", "tournament" or "dual".' % (args.selection[0]))
# Create the genetic operators
gaussian_mutation = GaussianMutationOperator(0.3, args.initial_mutation_variance[0]);
blend_cross_over = BlendCrossoverOperator(0.6, gaussian_mutation);
# Add the genetic operators to the EA
optimiser.addGeneticOperator(blend_cross_over);
optimiser.addGeneticOperator(gaussian_mutation);
optimiser.addGeneticOperator(ElitismOperator(0.1));
# Show the visualisation
if args.visualisation:
fig, ax = plt.subplots(7,2);
global_fitness_function.plot(fig, ax, 0, number_of_generation)
# Create a progress bar
bar = MyBar('Generation', max=number_of_generation)
best_global_fitness = global_fitness_function.global_fitness_set[-1];
# Log message
if not isinstance(args.logging, NoneType):
logging.debug("Initial Global fitness: %f" % best_global_fitness);
logging.debug("Initial RMSE: %f" % global_fitness_function.global_error_term_set[-1]);
logging.debug("Initial TV: %f" % global_fitness_function.global_regularisation_term_set[-1]);
# Counters
i = 0;
stagnation = 0;
number_of_mitosis = 0;
g_generation = 0;
# Run the evolutionary loop
run_evolutionary_loop = True;
# Log the statistics
g_log_event="Random initial population"; logStatistics(optimiser.getNumberOfIndividuals()); g_generation += 1;
while run_evolutionary_loop:
# The max number of generations has not been reached
if i < number_of_generation:
# Stagnation has been reached
if stagnation >= args.max_stagnation_counter[0]:
# Exit the for loop
run_evolutionary_loop = False;
# Log message
if not isinstance(args.logging, NoneType):
logging.debug("Stopping criteria met. Population stagnation.");
# Decrease the mutation variance
start = args.initial_mutation_variance[0];
end = args.final_mutation_variance[0];
gaussian_mutation.mutation_variance = linearInterpolation(start, end, i, number_of_generation - 1);
# Increase the regularisation weight
start = args.initial_lambda[0];
end = args.final_lambda[0];
global_fitness_function.k = linearInterpolation(start, end, i, number_of_generation - 1);
# Run the evolutionary loop
optimiser.runIteration();
# Log the statistics
g_log_event="Evolutionary loop"; logStatistics(optimiser.getNumberOfIndividuals()); g_generation += 1;
# Get the current global fitness
new_global_fitness = global_fitness_function.global_fitness_set[-1];
# The population has not improved since the last check
if new_global_fitness >= best_global_fitness:
stagnation += 1; # Increase the stagnation counter
# The population has improved since the last check
else:
# Reset the stagnation counter and
# Update the best global fitness
stagnation = 0;
best_global_fitness = new_global_fitness;
# Log message
if not isinstance(args.logging, NoneType):
logging.debug("Global fitness after %i-th generation: %f" % (i, global_fitness_function.global_fitness_set[-1]));
logging.debug("RMSE after %i-th generation: %f" % (i, global_fitness_function.global_error_term_set[-1]));
logging.debug("TV after %i-th generation: %f" % (i, global_fitness_function.global_regularisation_term_set[-1]));
# Update progress bar
bar.next();
# Show the visualisation
if args.visualisation:
# The main windows is still open
# (does not work with Tkinker backend)
if plt.fignum_exists(fig.number) and plt.get_fignums():
# Update the main window
global_fitness_function.plot(fig, ax, i, number_of_generation)
plt.pause(5.00)
#plt.savefig('test.eps', format='eps', bbox_inches='tight', pad_inches=1.0, dpi=600)
# Increment the counter
i += 1;
# The max number of generations has been reached
else:
# Stop the evolutionary loop
run_evolutionary_loop = False;
# Log messages
if not isinstance(args.logging, NoneType):
logging.debug("Stopping criteria met. Number of new generations (%i) reached" % number_of_generation);
bar.finish();
# Show the visualisation
if args.visualisation:
# Create a new figure and show the reconstruction with the bad flies
fig = plt.figure();
fig.canvas.set_window_title("Reconstruction")
plt.imshow(global_fitness_function.population_image_data, cmap=plt.cm.Greys_r);
# Show all the windows
plt.show();
# There is an output for the image with the bad flies
if not isinstance(args.output, NoneType):
# Save a PNG file
imsave(args.output[0] + '-reconstruction.png', global_fitness_function.population_image_data);
# Save an ASCII file
np.savetxt(args.output[0] + '-reconstruction.txt', global_fitness_function.population_image_data);
# Save a PNG file
imsave(args.output[0] + '-projections.png', global_fitness_function.population_sinogram_data);
# Save an ASCII file
np.savetxt(args.output[0] + '-projections.txt', global_fitness_function.population_sinogram_data);
# Log message
if not isinstance(args.logging, NoneType):
logging.debug("Best global fitness: %f", global_fitness_function.global_fitness_set[-1]);
except Exception as e:
if not isinstance(args.logging, NoneType):
logging.critical("Exception occurred", exc_info=True)
else:
print(e)
sys.exit(os.EX_SOFTWARE)
sys.exit(os.EX_OK) # code 0, all ok
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-01-17 09:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AlcoholCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_average_drink_per_week', models.FloatField(blank=True, null=True)),
('cum_alcohol_drink_per_week_gpa', models.FloatField(blank=True, null=True)),
('rank', models.BigIntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='CumulativeSum',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ExerciseConsistencyCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_avg_exercise_day', models.FloatField(blank=True, null=True)),
('cum_exercise_consistency_gpa', models.FloatField(blank=True, null=True)),
('rank', models.BigIntegerField(blank=True, null=True)),
('user_cum', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='exercise_consistency_cum', to='progress_analyzer.CumulativeSum')),
],
),
migrations.CreateModel(
name='ExerciseStatsCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_workout_duration_in_hours', models.FloatField(blank=True, null=True)),
('cum_workout_effort_level', models.FloatField(blank=True, null=True)),
('cum_avg_exercise_hr', models.BigIntegerField(blank=True, null=True)),
('cum_overall_exercise_gpa', models.FloatField(blank=True, null=True)),
('rank', models.BigIntegerField(blank=True, null=True)),
('user_cum', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='exercise_stats_cum', to='progress_analyzer.CumulativeSum')),
],
),
migrations.CreateModel(
name='MovementConsistencyCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_movement_consistency_gpa', models.FloatField(blank=True, null=True)),
('cum_movement_consistency_score', models.BigIntegerField(blank=True, null=True)),
('rank', models.BigIntegerField(blank=True, null=True)),
('user_cum', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='movement_consistency_cum', to='progress_analyzer.CumulativeSum')),
],
),
migrations.CreateModel(
name='NonExerciseStepsCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_non_exercise_steps', models.BigIntegerField(blank=True, null=True)),
('cum_non_exercise_steps_gpa', models.FloatField(blank=True, null=True)),
('cum_total_steps', models.BigIntegerField(blank=True, null=True)),
('rank', models.BigIntegerField(blank=True, null=True)),
('user_cum', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='non_exercise_steps_cum', to='progress_analyzer.CumulativeSum')),
],
),
migrations.CreateModel(
name='NutritionCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_prcnt_unprocessed_food_consumed', models.BigIntegerField(blank=True, null=True)),
('cum_prcnt_processed_food_consumed_gpa', models.FloatField(blank=True, null=True)),
('rank', models.BigIntegerField(blank=True, null=True)),
('user_cum', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='nutrition_cum', to='progress_analyzer.CumulativeSum')),
],
),
migrations.CreateModel(
name='OverallHealthGradeCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_total_gpa_point', models.BigIntegerField(blank=True, null=True)),
('cum_overall_health_gpa_point', models.FloatField(blank=True, null=True)),
('rank', models.BigIntegerField(blank=True, null=True)),
('user_cum', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='overall_health_grade_cum', to='progress_analyzer.CumulativeSum')),
],
),
migrations.CreateModel(
name='PenaltyCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_sleep_aid_penalty', models.BigIntegerField(blank=True, null=True)),
('cum_controlled_subs_penalty', models.FloatField(blank=True, null=True)),
('cum_smoking_penalty', models.FloatField(blank=True, null=True)),
('user_cum', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='penalty_cum', to='progress_analyzer.CumulativeSum')),
],
),
migrations.CreateModel(
name='SleepPerNightCumulative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cum_total_sleep_in_hours', models.FloatField(blank=True, null=True)),
('cum_overall_sleep_gpa', models.FloatField(blank=True, null=True)),
('rank', models.BigIntegerField(blank=True, null=True)),
('user_cum', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='sleep_per_night_cum', to='progress_analyzer.CumulativeSum')),
],
),
migrations.AddField(
model_name='alcoholcumulative',
name='user_cum',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='alcohol_cum', to='progress_analyzer.CumulativeSum'),
),
migrations.AddIndex(
model_name='cumulativesum',
index=models.Index(fields=['user', '-created_at'], name='progress_an_user_id_59c3a6_idx'),
),
migrations.AddIndex(
model_name='cumulativesum',
index=models.Index(fields=['created_at'], name='progress_an_created_ccb04d_idx'),
),
migrations.AlterUniqueTogether(
name='cumulativesum',
unique_together=set([('user', 'created_at')]),
),
]
|
def split_and_join(line):
# write your code here
splitted_line = line.split(" ")
joined_line = "-".join(splitted_line)
return joined_line
|
from bs4 import BeautifulSoup
import sys
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class downloader(object):
def __init__(self):
self.server = 'https://www.mycareersfuture.gov.sg'
self.target = 'https://www.mycareersfuture.gov.sg/search?search=Java&salary=6000&sortBy=new_posting_date&page='
self.companynames = []
self.urls = []
self.nums = 0
self.title = []
self.location = []
self.perm = []
self.experience = []
self.numApplication = []
self.salary = []
self.postdate = []
def get_download_url(self):
for x in range(0, 100):
driver = webdriver.Chrome()
url = self.target + str(x)
driver.get(url)
timeout = 50
try:
element_present = EC.presence_of_element_located((By.ID, 'job-card-0'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
content = BeautifulSoup(driver.page_source, "html.parser")
driver.quit()
a = content.find_all("div", "card relative")
for each in a[:]:
link = self.server + each.a.get('href')
companyName = each.a.p.string
jobTitle = each.a.h1.string
self.urls.append(link)
self.companynames.append(companyName)
self.title.append(jobTitle)
for job in content.find_all("div", "dn db-ns"):
Area = job.p.string
Type = job.p.next_sibling.string
NumYear = job.find("p", "black-80 f6 fw4 mt0 mb1 dib pr3 icon-bw-period")
if NumYear == None:
self.experience.append("null")
else:
self.experience.append(NumYear.string)
self.location.append(Area)
self.perm.append(Type)
for application in content.find_all("div", "w-40 ph3-ns ph0 order-3 dn db-l pt3"):
Applicant = application.section
if Applicant.string == None:
self.numApplication.append("null")
else:
self.numApplication.append(Applicant.string)
Post = application.section.next_sibling
if Post == None:
self.postdate.append("null")
else:
self.postdate.append(Post.string)
for salary in content.find_all("div", "lh-solid"):
if salary.get_text() == None:
self.salary.append("null")
else:
self.salary.append(salary.get_text())
print("urls = " + str(len(self.urls)) + " years" + str(len(self.experience)))
for i in range(self.nums, len(dl.urls[:])):
dl.writer('joblisting_6000.txt', dl.urls[i], dl.companynames[i], dl.title[i], dl.salary[i],
dl.experience[i], dl.postdate[i], dl.numApplication[i])
sys.stdout.flush()
print("Done written index " + str(self.nums) + " to " + str(len(dl.urls[:])-1))
self.nums = len(dl.urls[:])
def writer(self, path, url, company, title, salary, experience, date, application):
write_flag = True
with open(path, 'a', encoding='utf-8') as f:
f.writelines(company + ";" + title + ";" + salary + ";" + experience + ";" + date + ";" + application + ";" + url)
f.write('\n')
if __name__ == "__main__":
dl = downloader()
dl.get_download_url()
|
import re
from sys import exit
def readability():
text = input("Text: ")
word = len(re.findall('\s', text))
if word == 0:
print("Before Grade 1")
exit(1)
word+=1
L = (len(re.findall('\w', text))/word) * 100
S = (len(re.findall('[.!?]', text))/word) * 100
grade = 0.0588 * L - 0.296 * S - 15.8
grade = round(grade)
if grade >= 16:
print("Grade 16+")
elif grade < 1:
print("Before Grade 1")
else:
print("Grade " + str(grade))
readability()
|
from __future__ import print_function
from scipy.io import loadmat
import cv2
import json
import sys
from pprint import pprint
import struct
def convert_annotations(vbb, prefix):
"""Get the bounding boxes for MATLAB's vbb file format
Argument:
vbb: Matlab file containing the bounding boxes
Returns:
dict containing num_frames * num_objects * bbox's
"""
num_frames = int(vbb['A'][0][0][0][0][0])
objects_sequence = vbb['A'][0][0][1][0]
annotations = {}
for i in xrange(num_frames):
objects_current_frame = objects_sequence[i]
if objects_current_frame.shape[1] > 0:
num_objects = objects_current_frame['pos'][0].shape[0]
coords_list = []
for obj in xrange(num_objects):
coords_curr = {}
box = objects_current_frame['pos'][0][obj]
x1 = int(box[0][0])
y1 = int(box[0][1])
w = int(box[0][2])
h = int(box[0][3])
coords_curr['x1'] = x1
coords_curr['y1'] = y1
coords_curr['x2'] = x1 + w
coords_curr['y2'] = y1 + h
coords_list.append(coords_curr)
im_info = {}
im_info['num_objects'] = num_objects
im_info['coords_list'] = coords_list
else:
im_info = {}
im_info['num_objects'] = 0
im_info['coords_list'] = []
annotations['{}_{:04d}.jpg'.format(prefix, i)] = im_info
print('Processing annotations {}/{}'.format(i, num_frames), end='\r')
sys.stdout.flush()
return annotations
def read_header(ifile):
feed = ifile.read(4)
norpix = ifile.read(24)
version = struct.unpack('@i', ifile.read(4))
length = struct.unpack('@i', ifile.read(4))
assert(length != 1024)
descr = ifile.read(512)
params = [struct.unpack('@i', ifile.read(4))[0] for i in range(0,9)]
fps = struct.unpack('@d', ifile.read(8))
# skipping the rest
ifile.read(432)
image_ext = {100:'raw', 102:'jpg',201:'jpg',1:'png',2:'png'}
return {'w':params[0],'h':params[1],
'bdepth':params[2],
'ext':image_ext[params[5]],
'format':params[5],
'size':params[4],
'true_size':params[8],
'num_frames':params[6]}
def read_sequence(path, target_dir, prefix):
ifile = open(path, 'rb')
params = read_header(ifile)
bytes = open(path, 'rb').read()
# this is freaking magic, but it works
extra = 8
s = 1024
seek = [0]*(params['num_frames']+1)
seek[0] = 1024
images = []
for i in range(0, params['num_frames']):
try:
tmp = struct.unpack_from('@I', bytes[s:s+4])[0]
s = seek[i] + tmp + extra
if i == 0:
val = struct.unpack_from('@B', bytes[s:s+1])[0]
if val != 0:
s -= 4
else:
extra += 8
s += 8
seek[i+1] = s
nbytes = struct.unpack_from('@i', bytes[s:s+4])[0]
I = bytes[s+4:s+nbytes]
open('{}/{}_{:04d}.jpg'.format(target_dir, prefix, i), 'wb+').write(I)
except:
continue
return images
def convert_sequence(vid, target_dir):
"""Convert the video sequence to images, and saves it in target directory
Arguments:
video sequence, output diretory
"""
i = 0
while True:
ret, frame = vid.read()
if not ret:
print('\n')
sys.stdout.flush()
break
cv2.imwrite('{}/{:04d}.png'.format(target_dir, i), frame)
print('Number of images extracted: {} '.format(i), end='\r')
sys.stdout.flush()
i += 1
def test_conversion(test_path):
annotations = None
with open(test_path + '/annotations.json', 'r') as f:
annotations = json.load(f)
lis = annotations['0279.png']
print(lis['coords_list'])
print(type(lis['coords_list']))
print(lis['coords_list'], len(lis['coords_list']))
print(annotations['0279.png']['coords_list'][0]['x1'])
# for img_path in glob.glob(test_path + '/*.png'):
# img = cv2.imread(img_path)
if __name__ == '__main__':
test_path = './target/set00/V000'
test_conversion(test_path)
|
starFuncCount = int(input("출력 횟수 입력 : "))
def star(a):
return "★"*a
def main():
print(star(starFuncCount))
print(star(5))
print(star(6))
print(star(7))
return exit(0)
main()
|
from ephemeral.build_api.lib_builder import LibBuilder
from typing import List
# Define some functions
def filter_string_length(string_list, word_length=None, comparison=None) -> List[str]:
if comparison == "less":
filtered_strings = [string for string in string_list if len(string) < word_length]
elif comparison == "greater":
filtered_strings = [string for string in string_list if len(string) > word_length]
else:
filtered_strings = string_list
return filtered_strings
def capitalize_words(words_map) -> List[str]:
"""
A simple function to illustrate use of the task factory pattern.
"""
words = words_map["strings"]
capitalized_words = [word.upper() for word in words]
return capitalized_words
# Create a library
lib = LibBuilder()
# Add your functions to the library
capitalize = lib.add_function(name='capitalize-words', method=capitalize_words)
filter_word_length_task = lib.add_function(name='filter', method=filter_string_length)
# Create a job
job = lib.create_job('my job')
# Create job tasks, start with input
job_input = job.create_input_task('this is a test')
capitalized = capitalize(job_input, task_name='capitalize')
filtered_long = filter_word_length_task(capitalized, task_name='filter-long', word_length=5, comparison='greater')
filtered_short = filter_word_length_task(capitalized, task_name='filter-short', word_length=6, comparison='less')
job.print_tasks()
|
import matplotlib.pyplot as plt
import os
import pandas as pd
def plot_learning_curve(opt):
## Reading options ##
model_params = opt['model_params']
base_trace_dir = opt['base_trace_dir']
trace_dir = base_trace_dir + '/' + model_params + '/'
save_plots_to = trace_dir + 'plots/'
split = opt['split']
ftsplit = opt['ftsplit']
dirName = save_plots_to
if not os.path.exists(dirName):
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
else:
print("Directory ", dirName, " already exists")
trace_file = trace_dir + split + '_' + ftsplit + '_trace.tsv'
## start reading the file after '#' sign (indicates end of params info) ##
# count number of lines preceeding the '#' sign
# open a file using with statement
with open(trace_file,'r') as f:
num_lines_to_skip = 1 # also skip the line with '#'
for line in f:
# check if the '#' line has been encountered
if line.startswith("#"):
break
else:
num_lines_to_skip += 1
# print(line)
print("num lines to skip: {}\n".format(num_lines_to_skip))
trace_df = pd.read_csv(trace_file, sep='\t', index_col=False, skiprows=num_lines_to_skip)
# print("len(trace_df['epoch']): {}\n".format(len(trace_df['epoch'])))
# print("column names: {}\n\n".format(list(trace_df.columns)))
plt.figure()
plt.plot(trace_df['train_losstotal'][1:], label='training loss')
plt.plot(trace_df['val_losstotal'][1:], label='validation loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.xlim([1, len(trace_df['epoch'])])
# plt.ylim([-0.5, max(trace_df['train_losstotal'][1],trace_df['val_losstotal'][1])+15])
plt.title("Loss over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}losses_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
plt.figure()
plt.plot(trace_df['train_AUC'], label='training AUC')
plt.plot(trace_df['val_AUC'], label='validation AUC')
plt.legend()
plt.ylabel('AUC')
plt.xlabel('Epoch')
plt.title("Accuracy (AUC) over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}acc_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
# ================================ #
# Regression Performance #
# ================================ #
plt.figure()
plt.plot(trace_df['train_regloss'][1:], label='training')
plt.plot(trace_df['val_regloss'][1:], label='validation')
plt.legend()
plt.ylabel('MSE Loss')
plt.xlabel('Epochs')
# plt.xlim([1, len(trace_df['epoch'])])
# plt.ylim([-0.5, max(trace_df['train_losstotal'][1],trace_df['val_losstotal'][1])+15])
plt.title("Regression MSE Loss over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}reg_losses_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
# ================================ #
# Classification Loss #
# ================================ #
plt.figure()
plt.plot(trace_df['train_closs'], label='training')
plt.plot(trace_df['val_closs'], label='validation')
plt.legend()
plt.ylabel('BCE Loss')
plt.xlabel('Epochs')
# plt.xlim([1, len(trace_df['epoch'])])
# plt.ylim([-0.5, max(trace_df['train_losstotal'][1],trace_df['val_losstotal'][1])+15])
plt.title("Classification Loss over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}clas_losses_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
# ================================ #
# Loss Plots for Discriminators #
# ================================ #
plt.figure()
plt.plot(trace_df['train_DGloss'], label='DG training loss')
plt.plot(trace_df['val_DGloss'], label='DG validation loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.title("DG Losses over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}DG_losses_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
plt.figure()
plt.plot(trace_df['train_DRloss'], label='DR training loss')
plt.plot(trace_df['val_DRloss'], label='DR validation loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.title("DR Losses over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}DR_losses_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
plt.figure()
plt.plot(trace_df['train_DSloss'], label='DS training loss')
plt.plot(trace_df['val_DSloss'], label='DS validation loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.title("DS Losses over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}DS_losses_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
# ================================ #
# AUC Plots for Discriminators #
# ================================ #
plt.figure()
plt.plot(trace_df['train_DGauc'], label='DG training auc')
plt.plot(trace_df['val_DGauc'], label='DG validation auc')
plt.legend()
plt.ylabel('AUC')
plt.xlabel('Epochs')
plt.xlim([1, len(trace_df['epoch'])])
plt.title("DG AUC over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}DG_auc_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
plt.figure()
plt.plot(trace_df['train_DRauc'], label='DR training auc')
plt.plot(trace_df['val_DRauc'], label='DR validation auc')
plt.legend()
plt.ylabel('AUC')
plt.xlabel('Epochs')
plt.xlim([1, len(trace_df['epoch'])])
plt.title("DR AUC over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}DR_auc_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show()
plt.figure()
plt.plot(trace_df['train_DSauc'], label='DS training auc')
plt.plot(trace_df['val_DSauc'], label='DS validation auc')
plt.legend()
plt.ylabel('AUC')
plt.xlabel('Epochs')
plt.xlim([1, len(trace_df['epoch'])])
plt.title("DS AUC over training epochs ({} {})".format(split, ftsplit))
plt.savefig("{}DS_auc_{}_{}_epochs{}.png".format(save_plots_to, split, ftsplit, len(trace_df['epoch'])))
plt.close()
# plt.show() |
#!/usr/bin/env python
"""entertainment_center.py: Contains database for the Fresh Tomatoes website.
Runs fresh_tomatoes.open_movies_page() to create fresh_tomatoes.html"""
import media
import fresh_tomatoes
__author__ = "Udacity Full Stack Web Developer Nanodegree, Jordan Lamoreaux"
__credits__ = "Jordan Lamoreaux"
__version__ = "1.0"
__maintainer__ = "Jordan Lamoreaux"
__email__ = "jnacious88@gmail.com"
__status__ = "Development"
#The following are the Movies contained on Fresh Tomatoes
GOG_VOL2 = media.Movie("Guardians of the Galaxy Vol. 2",
"Star Lord and his companions come face to face with Star Lord's "
"father named Ego",
"http://cdn.movieweb.com/img.site/PHi3FGAu4Fkdlm_1_l.jpg",
"https://www.youtube.com/watch?v=duGqrYw4usE",
media.Movie.VALID_RATINGS[2])
WONDER_WOMAN = media.Movie("Wonder Woman",
"Before she was Wonder Woman she was Diana,"
"princess of the Amazons, trained warrior.",
"https://images-na.ssl-images-amazon.com/images/"
"M/MV5BNDFmZjgyMTEtYTk5MC00NmY0LWJhZjktOWY2MzI5Yj"
"kzODNlXkEyXkFqcGdeQXVyMDA4NzMyOA@@._V1_SY1000_SX675_AL_.jpg",
"https://www.youtube.com/watch?v=INLzqh7rZ-U",
media.Movie.VALID_RATINGS[2])
THOR3 = media.Movie("Thor 3 Ragnarok",
"Imprisoned, the mighty Thor finds himself in a lethal "
"gladiatorial contest against the Hulk, his former ally.",
"https://images-na.ssl-images-amazon.com/images/M/"
"MV5BMjE1ODgwOTkzNF5BMl5BanBnXkFtZTgwMDcwMTg5MTI@."
"_V1_SY1000_CR0,0,674,1000_AL_.jpg",
"https://www.youtube.com/watch?v=v7MGUNV8MxU",
media.Movie.VALID_RATINGS[2])
SPIDERMAN_HOMECOMING = media.Movie("Spider-Man: Homecoming",
"Peter Parker attempts to balance his life in high school "
"with his career as the web-slinging superhero Spider-Man",
"https://images-na.ssl-images-amazon.com/images/M/"
"MV5BNTk4ODQ1MzgzNl5BMl5BanBnXkFtZTgwMTMyMzM4MTI@."
"_V1_SY1000_CR0,0,658,1000_AL_.jpg",
"https://www.youtube.com/watch?v=39udgGPyYMg",
media.Movie.VALID_RATINGS[2])
JUSTICE_LEAGUE = media.Movie("Justice League",
"Fueled by his restored faith in humanity and inspired by Superman's selfless act, "
"Bruce Wayne enlists the help of his newfound ally, Diana Prince, "
"to face an even greater enemy.",
"https://images-na.ssl-images-amazon.com/images/M/"
"MV5BMjI2NjI2MDQ0NV5BMl5BanBnXkFtZTgwMTc1MjAwMjI@._V1_SY1000_CR0,0,674,1000_AL_.jpg",
"https://www.youtube.com/watch?v=3cxixDgHUYw",
media.Movie.VALID_RATINGS[2])
SW_THE_LAST_JEDI = media.Movie("Star Wars: The Last Jedi",
"Rey continues her epic journey with Finn, Poe and "
"Luke Skywalker in the next chapter of the saga.",
"https://images-na.ssl-images-amazon.com/images/M/"
"MV5BOTE5NzYyNjM0Ml5BMl5BanBnXkFtZTgwNjk4MDIwMjI@._V1_SY1000_CR0,0,674,1000_AL_.jpg",
"https://www.youtube.com/watch?v=zB4I68XVPzQ",
media.Movie.VALID_RATINGS[2])
# pylint recommends 'movies' being in formatted differently. I believe it
# views it as a GLOBAL_CONSTANT so it should be in ALL CAPS, but for
# consistancy with fresh_tomatoes, 'movies' was left lowercase
movies = [SW_THE_LAST_JEDI, WONDER_WOMAN, THOR3, SPIDERMAN_HOMECOMING,
JUSTICE_LEAGUE, GOG_VOL2]
fresh_tomatoes.open_movies_page(movies)
|
import os
from jinja2 import Environment, FileSystemLoader
class TemplateFiller(object):
def __init__(self):
file_loader = FileSystemLoader(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'files/'))
self.env = Environment(loader=file_loader)
def fill(self, asn_type, type_or_value='type', **kwargs):
if asn_type in ('BooleanType', 'IntegerType', 'OctetStringType', 'BitStringType') and type_or_value == 'value':
template = self.env.get_template('simple_value.txt')
else:
template = self.env.get_template('{}.txt'.format(asn_type))
return template.render(**kwargs)
template_filler = TemplateFiller()
|
#!/usr/bin/python -u
'''
Cue_Control_Middleware.py
Written by Andy Carluccio
University of Virginia
This file is designed to run on a properly configured YUN Linux Environment
Extensive documentation is available at:
Good things to know about communication with the Arduino environment:
1. The first int sent is always the number of ints to follow (accomplished within the sendCommand method)
2. Using sendCommand(-1) alerts for load mode enter
3. Using sendCommand(-2) alerts for load mode exit
4. Using sendCommand(-3) alerts for a manual control packet to follow
5. Using sendCommand(-4) alerts for an incomming cue
6. Using sendCommand(-5) is a short-hand soft stop trigger
'''
#import statements for required libraries and bridge
#setup------------------------
import sys
import os
import requests
import math
sys.path.insert(0, '/usr/lib/python2.7/bridge')
from time import sleep
from bridgeclient import BridgeClient as bridgeclient
value = bridgeclient()
print("setup completed")
sys.stdout.flush()
#Runs forever (in theory!)
while(True):
inputA = sys.stdin.readline()
inputA = inputA.rstrip('\n')
inputA = inputA.strip()
inputB = sys.stdin.readline()
inputB = inputB.rstrip('\n')
inputB = inputB.strip()
inputA = int(inputA)
inputB = int(inputB)
print(inputA)
#a = -7
#print(a)
sys.stdout.flush()
#b = -32442
#print(b)
print(inputB)
sys.stdout.flush()
#A message we should never see
print("We're in the endgame now...\n") |
from .distance_matrix import *
from .similarity_metrics import *
|
import time
import serial
import sys
import pickle
from printrun.printcore import printcore
from printrun import gcoder
from imageProc import objectDet
from interface import start, insert, FPickupErr, PPickupErr, AssemFail, restockF, restockP, calib, feedIns, Xadj, Yadj
printer = printcore('/dev/ttyUSB1',115200) #Connects to the printer.
arduino = serial.Serial('/dev/ttyUSB0', 9600) #Connects to the arduino.
#The following imports gcode text files and assigns them to arrays to be sent to the printer.
home = [i.strip() for i in open('/home/pi/Printrun/testfiles/home.gcode')]
home = gcoder.LightGCode(home)
getComponent = [i.strip() for i in open('/home/pi/Printrun/testfiles/getComponent.gcode')]
getComponent = gcoder.LightGCode(getComponent)
feedthroughView = [i.strip() for i in open('/home/pi/Printrun/testfiles/feedthroughView.gcode')]
feedthroughView = gcoder.LightGCode(feedthroughView)
preformView = [i.strip() for i in open('/home/pi/Printrun/testfiles/preformView.gcode')]
preformView = gcoder.LightGCode(preformView)
feedthroughPickup = [i.strip() for i in open('/home/pi/Printrun/testfiles/feedthroughPickup.gcode')]
feedthroughPickup = gcoder.LightGCode(feedthroughPickup)
preformPickup = [i.strip() for i in open('/home/pi/Printrun/testfiles/preformPickup.gcode')]
preformPickup = gcoder.LightGCode(preformPickup)
feedthroughDrop = [i.strip() for i in open('/home/pi/Printrun/testfiles/feedthroughDrop.gcode')]
feedthroughDrop = gcoder.LightGCode(feedthroughDrop)
preformDrop = [i.strip() for i in open('/home/pi/Printrun/testfiles/preformDrop.gcode')]
preformDrop = gcoder.LightGCode(preformDrop)
#Initialises variables
numberAssem = 0
percent = 0
#Loads values of coordinates of the first placement location saved in a file.
filename = 'startCoords'
infile = open(filename,'rb')
startCoords = pickle.load(infile)
infile.close()
#Takes the values of the first placement location and converts them to floats
X = float(startCoords.get('X'))
Y = float(startCoords.get('Y'))
def FDrop(sec):
#Defines the process for dropping a feedthrough in the feedthrough bin. Takes 'sec' input as the time delay between previous command and start of this function.
time.sleep(sec) #Sleeps for the time determined by the variable 'sec'
print(" Throwing Feedthrough Away...") #Writes information to the screen.
printer.startprint(feedthroughDrop) #Moves the gripper to the drop location.
time.sleep(5)
arduino.write(b'4\n') #Partially opens the feedthrough gripper.
def PDrop(sec):
#Defines the process for dropping a preform in the preform bin. Takes 'sec' input as the time delay between previous command and start of this function.
time.sleep(sec) #Sleeps for the time determined by the variable 'sec'
print(" Throwing Preform Away...") #Writes information to the screen.
printer.startprint(preformDrop) #Moves the gripper to the drop location.
time.sleep(5)
arduino.write(b'7\n') #Partially opens the feedthrough gripper.
def pickup(obj):
#Defines the process for picking up distributed object - object to be picked specified through paramater 'obj' by either 'feedthrough' or 'preform'.
#Function performs all vision, gripper, and movement operation.
arduino.write(b'6\n') #Open preform grippers.
time.sleep(1)
arduino.write(b'3\n') #Open feedthrough grippers.
if obj == 'feedthrough': #If the object being picked up is specified as a feedthrough.
print(" Moving to detect Feedthrough...")
printer.startprint(feedthroughView) #Positions camera for feedthrough detection.
while printer.printing: #Checks if the printer is still in operation.
time.sleep(1)
arduino.write(b'0\n') #Turns on LEDs for camera.
time.sleep(7)
print(" Detecting Feedthrough...")
det = objectDet('feedthrough') #Calls detection function.
print(" " + det) #Prints outcome of detection.
if det != 'No Feedthrough Detected.': #When an object is in the distribution holder
print(" Picking-up Feedthrough...")
printer.startprint(feedthroughPickup) #Moves object to feedthrough gripper.
while printer.printing:
time.sleep(1)
time.sleep(5)
arduino.write(b'5\n') #Closes gripper on feedthrough.
time.sleep(3)
printer.send_now('G1 Z30') #Moves gripper up 30mm.
time.sleep(3)
print(" Confirming Pickup...")
printer.startprint(feedthroughView) #Moves back to camera view of feedthrough.
while printer.printing:
time.sleep(1)
time.sleep(3)
compCheck = objectDet('feedthrough') #Checks if feedthrough is still in holder.
if compCheck != 'No Feedthrough Detected.': #Checks if the feedthrough has not been picked up, if so, then executes the following.
print(" ERROR:Pickup failure. Re-attempting...")
arduino.write(b'3\n') #Opens feedthrough gripper.
printer.startprint(feedthroughPickup) #Moves object to feedthrough gripper.
while printer.printing:
time.sleep(1)
time.sleep(5)
arduino.write(b'5\n') #Closes gripper on feedthrough.
time.sleep(3)
printer.send_now('G1 Z30') #Moves gripper up 30mm.
time.sleep(5)
print(" Confirming Pickup...")
printer.startprint(feedthroughView) #Moves back to camera view of feedthrough.
while printer.printing:
time.sleep(1)
time.sleep(5)
compCheck = objectDet('feedthrough') #Checks if feedthrough is still in holder.
if compCheck != 'No Feedthrough Detected.': #Checks if the feedthrough has not been picked up, if so, then executes the following.
arduino.write(b'2\n') #Strobes LEDs.
res = FPickupErr() #Calls pickup error function.
while res == 0: #Waits for user to input appropriate response.
res = PPickupErr()
arduino.write(b'3\n') #Opens feedthrough grippers.
return 'FPickupErr'
if det == 'Incorrect Size Feedthrough Detected.': #Checks if the feedthrough dimensions are incorrect.
return "FSizeErr" #If so, returns incorrect size.
else: #Else, if the feedthrough dimensions are correct.
time.sleep(1)
arduino.write(b'1\n') #Turns off LEDs.
return "correct" #Returns correct size.
else: #Else, if no object is detected.
return "FNoObjErr" #Returns no object.
elif obj == 'preform': #If the object being picked up is specified as a preform.
print(" Moving to detect Preform...")
printer.startprint(preformView) #Positions camera for preform detection.
while printer.printing:
time.sleep(1)
arduino.write(b'0\n') #Turns on LEDs for camera.
time.sleep(7)
print(" Detecting Preform...")
det = objectDet('preform') #Calls detection function.
print(" " + det) #Prints outcome of detection.
if det != 'No Preform Detected.': #When an object is in the distribution holder
print(" Picking-up Preform...")
printer.startprint(preformPickup) #Moves object to preform gripper.
while printer.printing:
time.sleep(1)
arduino.write(b'7\n') #Partially closes gripper.
time.sleep(5)
arduino.write(b'8\n') #Closes gripper on preform.
time.sleep(3)
printer.send_now('G1 Z30') #Moves gripper up 30mm.
time.sleep(3)
print(" Confirming Pickup...")
printer.startprint(preformView) #Moves back to camera view of preform.
while printer.printing:
time.sleep(1)
time.sleep(3)
compCheck = objectDet('preform') #Checks if preform is still in holder.
if compCheck != 'No Preform Detected.':
print(" ERROR: Pickup failure. Re-attempting...")
arduino.write(b'6\n')
printer.startprint(preformPickup) #Moves object to preform gripper.
while printer.printing:
time.sleep(1)
arduino.write(b'7\n')
time.sleep(5)
arduino.write(b'8\n') #Closes gripper on preform.
time.sleep(3)
printer.send_now('G1 Z30') #Moves gripper up 30mm.
time.sleep(5)
print(" Confirming Pickup...")
printer.startprint(preformView) #Moves back to camera view of preform.
while printer.printing:
time.sleep(1)
time.sleep(3)
compCheck = objectDet('preform') #Checks if preform is still in holder.
if compCheck != 'No Preform Detected.': #If preform is still detected.
arduino.write(b'2\n') #Strobes LEDs.
res = PPickupErr() #Calls preform pickup error function.
while res == 0: #Waits until user inputs a satisfactory input.
res = PPickupErr()
arduino.write(b'3\n') #Opens grippers.
return 'PPickupErr' #Returns pickup error.
else: #Otherwise, if there are no issues with the pickup.
time.sleep(1)
arduino.write(b'1\n') #Turns LEDs off.
return "correct" #Returns correct pickup.
else: #Otherwise, if no preform is detected, meaning pickup is sucessful
time.sleep(1)
arduino.write(b'1\n') #Turns LEDs off.
return "correct" #Returns correct pickup.
time.sleep(1)
arduino.write(b'1\n') #Turns LEDs off.
return "correct" #Returns correct pickup.
else: #Otherwise, if no object is detected.
return "PNoObjErr" #Returns no object error.
def place(coords, sec, fVal):
#Defines the method for placing both the feedthrough and preform components on the assembly bed with only one call.
#Takes the coordinates for the placement, the time delay between placement and opening the grippers, and the feedthrough error value which is already defined.
print(" Retrieving Components...")
printer.startprint(getComponent) #Moves the assembly bed to distribute a preform and feedthrough component.
while printer.printing:
time.sleep(1)
time.sleep(2)
det = pickup('preform') #Calls the pickup function for the preform.
if fVal == 0: #Check that there is no feedthrough error.
if det == 'correct': #If the preform has been picked up correctly.
print(" Placing Preform...")
printer.send_now('G1 ' + coords) #Moves to feedthrough placement coordinates.
time.sleep(1)
printer.send_now('G91') #Changes printer to incremental values.
time.sleep(1)
printer.send_now('G1 X-68.5 Y2') #Moves the preform to the place where the feedthrough gripper just was.
time.sleep(3)
printer.send_now('G90') #Changes back to absolute values.
time.sleep(1)
printer.send_now('G1 Z14') #Moves to 14mm in Z axis.
time.sleep(sec)
arduino.write(b'7\n') #Partially opens preform gripper.
time.sleep(1)
printer.send_now('G1 Z30') #Moves gripper back up to 30mm in Z axis.
time.sleep(4)
arduino.write(b'6\n') #Fully opens preform gripper.
time.sleep(2)
print(" Successful Placement.")
elif det == "PNoObjErr" or det == "PPickupErr": #Otherwise, if there was a preform pickup error or there was no preform.
det = pickup('feedthrough') #Moves to the feedthrough and picks it up.
if det == "FSizeErr" or det == "correct": #If feedthrough pickup is successful.
FDrop(1) #Throws the feedthrough away.
return "repeat" #Returns try again.
else: #If there is a feedthrough error.
if det == 'correct': #If the preform has been correctly picked up.
PDrop(1) #Throw the deedthrough away.
det = pickup('feedthrough') #Calls the pickup function for the feedthrough.
if det == 'correct': #If the feedthrough is correctly picked up.
print(" Placing Feedthrough...")
printer.send_now('G1 ' + coords) #Moves feedthrough gripper to placement location.
printer.send_now('G1 Z15.5') #Moves gripper to Z15.5mm in Z axis.
time.sleep(sec)
arduino.write(b'4\n') #Partially opens feedthrough gripper.
time.sleep(1)
printer.send_now('G1 Z30') #Moves gripper back up to 30mm in Z axis.
time.sleep(2)
arduino.write(b'3\n') #Fully opens feedthrough gripper.
time.sleep(1)
print(" Successful Placement.")
return 'success' #Returns sucessful feedthrough preform placement.
elif det == "FSizeErr": #If there is a feedthrough size error.
print(" Throwing Incorrect Feedthrough Away...")
FDrop(1) #Throws feedthrough away.
return 'FRepeat' #Returns repeat but with consideration of feedthrough.
def complete():
#Once the assembly has been complete, the assembler presents the bed to the user, and moves the grippers out of the way.
printer.send_now('G1 Z30') #Moves gripper up to 30mm in Z axis.
time.sleep(3)
printer.send_now('G1 X200 Y160') #Moves gripper to the right and brings assembly bed forward.
arduino.write(b'3\n') #Opens feedthrough grippers.
time.sleep(1)
arduino.write(b'6\n') #Opens preform grippers.
time.sleep(1)
arduino.write(b'2\n') #Strobes LEDs.
print("\nAssembly Complete!")
sys.exit() #Exits code.
def percentDisp():
#Displays the percentage of the assemmbly that has been complete, subject to the number of assemblies being assembled.
global percent #Takes the percentage variable for the function.
if numberAssem == 1: #Changes the increment in percentage for each preform-feedthrough placement subject to the number of assemblies being assembled.
percent += 2.3255813953 #Adds the percentage to the total percent complete.
print("Assembly " + str(round(percent, 2)) + "% Complete") #Writes the rounded value to the assembler screen.
elif numberAssem == '2':
percent += 1.1627906977
print("Assembly " + str(round(percent, 2)) + "% Complete")
elif numberAssem == '3':
percent += 0.7751937984
print("Assembly " + str(round(percent, 2)) + "% Complete")
elif numberAssem == '4':
percent += 0.5813953488
print("Assembly " + str(round(percent, 2)) + "% Complete")
def tManage(Xcoord, Ycoord, sec):
#Organises the higher level functions of the assembler, takes the coordinates for placement, as well as the delay in distributing components as inputs.
#Manages the error values of feedthroughs. If a feedthrough is thrown away or not distributed, then the function throws the next preform away after collection.
Xcoord = str(Xcoord) #Takes coordinates and turns to string.
Ycoord = str(Ycoord)
coords = 'X' + Xcoord + ' ' + 'Y' + Ycoord #Places coordinates in correct G-code format.
outcome = place(coords, sec, 0) #Calls placement function, sending coordinates for placement.
if outcome == 'repeat': #If the preform collection needs to be repeated.
outcome = place(coords, sec, 0) #Calls the placement function again.
if outcome == 'repeat': #If the preform collection still needs to be repeated.
arduino.write(b'2\n') #Strobes LEDs
restock = restockP() #Calls the restock preforms function.
while restock == 0: #Waits for user to input proper response.
restock == restockP()
outcome = place(coords, sec, 0) #Calls the placement function again.
if outcome == 'repeat': #If the preform collection still needs to be repeated.
arduino.write(b'2\n') #Strobes LEDs.
fail = AssemFail() #Assembly fail function called.
while fail == 0: #Waits for user to input proper response.
fail = AssemFail()
complete() #Completes the assembly.
elif outcome == 'FRepeat': #If feedthrough collection needs to be repeated.
outcome = place(coords, sec, 1) #Calls the placement function again with the feedthrough flag.
if outcome == 'FRepeat': #If the feedthrough collection still needs to be repeated.
arduino.write(b'2\n') #Strobe LEDs.
restock = restockF() #Calls the restock feedthrough function.
while restock == 0: #Waits for user to input proper response.
restock == restockF()
outcome = place(coords, sec, 1) #Calls the placement function again with the feedthrough flag.
if outcome == 'FRepeat': #If the feedthrough collection still needs to be repeated.
arduino.write(b'2\n') #Strobes LEDs.
fail = AssemFail() #Assembly fail function called.
while fail == 0: #Waits for user to input proper response.
fail = AssemFail()
complete() #Completes the assembly.
percentDisp() #Calls the percentage displat function.
def assem1():
#Function organises the placement for the entire first assembly bed.
#It increments X axis placement locations until it gets to the end of a row, then it increments the Y axis placement and moves to the start of the next row in the X axis.
global X
global Y
#First Row
tManage(X, Y, 10)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Second Row
X -= 55 #2nd row start distance in X axis.
Y += 8.66 #2nd row start row distance in Y axis.
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Third Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Fourth Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Fifth Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Sixth Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Seventh Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
def assem2():
#Function organises the placement for the entire second assembly bed.
#Operation is the same as for assem1().
global X
global Y
X -= 30 #Moves to the starting location of assembly bed 2 from the end location of assembly bed 1.
Y += 44.34
#First Row
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Second Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Third Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Fourth Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Fifth Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Sixth Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Seventh Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
def assem3():
#Function organises the placement for the entire third assembly bed.
#Operation is the same as for assem1().
global X
global Y
X += 66.3
Y -= 52
#First Row
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Second Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Third Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Fourth Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Fifth Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Sixth Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Seventh Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
def assem4():
#Function organises the placement for the entire fourth assembly bed.
#Operation is the same as for assem1().
global X
global Y
X -= 30
Y -= 148.34
#First Row
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Second Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Third Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Fourth Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Fifth Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Sixth Row
X -= 55
Y += 8.66
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
#Seventh Row
X -= 55
Y += 8.67
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
X += 10
tManage(X, Y, 8)
def calMove(coords):
#Moves to the required coordinates specified during calibration mode - Takes the coordinates as an input.
printer.send_now('G1 ' + coords) #Moves to the coordinates send.
printer.send_now('G1 Z23') #Moves to 23mm in the Z axis.
time.sleep(1)
def calibrate():
#Calibration function. Pickups up a feedthrough and places it at the starting coordinates for user to determine if starting coordinated need to be changed.
global X
global Y
Xcoord = str(X)
Ycoord = str(Y)
coords = 'X' + Xcoord + ' ' + 'Y' + Ycoord
inserted = feedIns() #Calls insert feedthrough function.
while inserted == 0: #Waits for proper response.
inserted = feedIns()
print(" Retrieving Feedthrough...")
printer.startprint(getComponent) #Picks up feedthrough.
while printer.printing:
time.sleep(1)
time.sleep(2)
det = pickup('feedthrough')
if det == "FSizeErr" or det == "correct": #If a feedthrough has been properly picked up.
calMove(coords) #Moves to starting coordinates.
time.sleep(4)
print(" Current X starting coord is: " + Xcoord + "mm") #Prints current X coordinate.
insert = Xadj() #Reads adjusted amount.
while insert == 'i': #Waits to proper response.
insert = Xadj()
while insert != 'y': #While there are adjustments being made.
if insert <= 60: #Sets boundaries for the adjustment.
insert = 60
elif insert >= 65:
insert = 65
X = insert #Sets X starting value to input value.
Xcoord = str(X)
coords = 'X' + Xcoord + ' ' + 'Y' + Ycoord
calMove(coords) #Moves gripper to new value.
print(" Current X starting coord is: " + Xcoord + "mm")
insert = Xadj() #Confirms if further adjustments are still to be made.
while insert == 'i':
insert = Xadj()
print(" Current Y starting coord is: " + Ycoord + "mm") #Performs the same operation for the Y axis as for the X.
insert = Yadj()
while insert == 'i':
insert = Yadj()
while insert != 'y':
if insert <= 10:
insert = 10
elif insert >= 15:
insert = 15
Y = insert
Ycoord = str(Y)
coords = 'X' + Xcoord + ' ' + 'Y' + Ycoord
calMove(coords)
print(" Current Y starting coord is: " + Ycoord + "mm")
insert = Yadj()
while insert == 'i':
insert = Yadj()
printer.send_now('G1 Z30') #Moves the gripper back up to 30mm in the Z acis.
time.sleep(3)
FDrop(1) #Throws the feedthrough away.
arduino.write(b'3\n') #Opens the feedthrough gripper.
time.sleep(1)
printer.send_now('G1 X140') #Moves the gripper out of the way of the screen.
time.sleep(3)
print(" Calibration complete.")
startCoords.update({'X': X, 'Y': Y}) #Sets the starting location.
#Saves the starting location to a file.
outfile = open(filename, 'wb')
pickle.dump(startCoords,outfile)
outfile.close()
else:
print(" ERROR: Calibration failure, continuing assembly.") #If there was an issue with the feedthrough distribution or pickup, exits calibration.
return
time.sleep(3)
arduino.write(b'2\n') #Strobes LEDs
print("\nPlease do not load assembly components until instructed.\n")
numberAssem = start() #Prompts user for number of assemblies to be completed.
while numberAssem == 0:
numberAssem = start()
time.sleep(1)
printer.startprint(home) #Homes printer.
while printer.printing:
time.sleep(1)
printer.send_now('G1 F1600 Z30')
time.sleep(3)
printer.send_now('G1 X140 Y20 Z30') #Moves grippers out of the way.
time.sleep(8)
arduino.write(b'6\n') #Opens preform grippers.
time.sleep(8)
arduino.write(b'2\n')
calVal = calib() #Checks if the user wishes to enter calibration mode.
while calVal == 0:
calVal = calib()
if calVal == 1:
calibrate()
arduino.write(b'2\n')
ready = insert() #Prompts the user to insert feedthroughs and preforms.
while ready == 0:
ready = insert()
assem1() #Calls the assembly functions subject to the number of asssemblies specified by the user.
if numberAssem > 1:
assem2()
if numberAssem > 2:
assem3()
if numberAssem > 3:
assem4()
complete() #Once complete, finished assembly process.
|
import math
cato = float(input('Digite o cateto oposto:'))
cata = float(input('Digite o cateto adjacente'))
hipo = math.hypot(cato,cata)
print('A hipotenusa é: {:.2f}'.format(hipo) )
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.append("/SysCollect/class/config")
import osquery
class SysQuery(object):
def __init__(self):
self.instance = osquery.SpawnInstance()
return self.instance.open()
def getinterface(self):
q = "SELECT * FROM interface_addresses"
result = self.instance.client.query(q)
return result.response
def getifacedetail(self):
q = "SELECT * FROM interface_details"
result = self.instance.client.query(q)
return result.response
def kernelinfo(self):
q = "SELECT * FROM kernel_info"
result = self.instance.client.query(q)
return result.response |
import matplotlib.pyplot as plp
pieValue = [20, 71, 26]
pieLabel = ["Me", "All", "SS"]
plp.figure(figsize=(3, 3)) #Size of the plot in inches
plp.pie(pieValue, labels = pieLabel)
plp.title("Pie-Chart")
plp.suptitle("Title")
plp.show() |
import turtle as te
from bs4 import BeautifulSoup
import argparse
import sys
import numpy as np
import cv2
import os
from win32.win32api import GetSystemMetrics
WriteStep = 15 # 贝塞尔函数的取样次数
Speed = 1000
Width = 600 # 界面宽度
Height = 600 # 界面高度
Xh = 0
Yh = 0
scale = (1, 1)
first = True
K = 32
def Bezier(p1, p2, t): # 一阶贝塞尔函数
return p1 * (1 - t) + p2 * t
def Bezier_2(x1, y1, x2, y2, x3, y3): # 二阶贝塞尔函数
te.goto(x1, y1)
te.pendown()
for t in range(0, WriteStep + 1):
x = Bezier(Bezier(x1, x2, t / WriteStep),
Bezier(x2, x3, t / WriteStep), t / WriteStep)
y = Bezier(Bezier(y1, y2, t / WriteStep),
Bezier(y2, y3, t / WriteStep), t / WriteStep)
te.goto(x, y)
te.penup()
def Bezier_3(x1, y1, x2, y2, x3, y3, x4, y4): # 三阶贝塞尔函数
x1 = -Width / 2 + x1
y1 = Height / 2 - y1
x2 = -Width / 2 + x2
y2 = Height / 2 - y2
x3 = -Width / 2 + x3
y3 = Height / 2 - y3
x4 = -Width / 2 + x4
y4 = Height / 2 - y4 # 坐标变换
te.goto(x1, y1)
te.pendown()
for t in range(0, WriteStep + 1):
x = Bezier(Bezier(Bezier(x1, x2, t / WriteStep), Bezier(x2, x3, t / WriteStep), t / WriteStep),
Bezier(Bezier(x2, x3, t / WriteStep), Bezier(x3, x4, t / WriteStep), t / WriteStep), t / WriteStep)
y = Bezier(Bezier(Bezier(y1, y2, t / WriteStep), Bezier(y2, y3, t / WriteStep), t / WriteStep),
Bezier(Bezier(y2, y3, t / WriteStep), Bezier(y3, y4, t / WriteStep), t / WriteStep), t / WriteStep)
te.goto(x, y)
te.penup()
def Moveto(x, y): # 移动到svg坐标下(x,y)
te.penup()
te.goto(-Width / 2 + x, Height / 2 - y)
te.pendown()
def Moveto_r(dx, dy):
te.penup()
te.goto(te.xcor() + dx, te.ycor() - dy)
te.pendown()
def line(x1, y1, x2, y2): # 连接svg坐标下两点
te.penup()
te.goto(-Width / 2 + x1, Height / 2 - y1)
te.pendown()
te.goto(-Width / 2 + x2, Height / 2 - y2)
te.penup()
def Lineto_r(dx, dy): # 连接当前点和相对坐标(dx,dy)的点
te.pendown()
te.goto(te.xcor() + dx, te.ycor() - dy)
te.penup()
def Lineto(x, y): # 连接当前点和svg坐标下(x,y)
te.pendown()
te.goto(-Width / 2 + x, Height / 2 - y)
te.penup()
def Curveto(x1, y1, x2, y2, x, y): # 三阶贝塞尔曲线到(x,y)
te.penup()
X_now = te.xcor() + Width / 2
Y_now = Height / 2 - te.ycor()
Bezier_3(X_now, Y_now, x1, y1, x2, y2, x, y)
global Xh
global Yh
Xh = x - x2
Yh = y - y2
def Curveto_r(x1, y1, x2, y2, x, y): # 三阶贝塞尔曲线到相对坐标(x,y)
te.penup()
X_now = te.xcor() + Width / 2
Y_now = Height / 2 - te.ycor()
Bezier_3(X_now, Y_now, X_now + x1, Y_now + y1,
X_now + x2, Y_now + y2, X_now + x, Y_now + y)
global Xh
global Yh
Xh = x - x2
Yh = y - y2
def transform(w_attr):
funcs = w_attr.split(' ')
for func in funcs:
func_name = func[0: func.find('(')]
if func_name == 'scale':
global scale
scale = (float(func[func.find('(') + 1: -1].split(',')[0]),
-float(func[func.find('(') + 1: -1].split(',')[1]))
def readPathAttrD(w_attr):
ulist = w_attr.split(' ')
for i in ulist:
# print("now cmd:", i)
if i.isdigit() or i.isalpha():
yield float(i)
elif i[0].isalpha():
yield i[0]
yield float(i[1:])
elif i[-1].isalpha():
yield float(i[0: -1])
elif i[0] == '-':
yield float(i)
def drawSVG(filename, w_color):
global first
SVGFile = open(filename, 'r')
SVG = BeautifulSoup(SVGFile.read(), 'lxml')
Height = float(SVG.svg.attrs['height'][0: -2])
Width = float(SVG.svg.attrs['width'][0: -2])
transform(SVG.g.attrs['transform'])
if first:
te.setup(height=Height, width=Width)
te.setworldcoordinates(-Width / 2, 300, Width -
Width / 2, -Height + 300)
first = False
te.tracer(100)
te.pensize(1)
te.speed(Speed)
te.penup()
te.color(w_color)
for i in SVG.find_all('path'):
attr = i.attrs['d'].replace('\n', ' ')
f = readPathAttrD(attr)
lastI = ''
for i in f:
if i == 'M':
te.end_fill()
Moveto(next(f) * scale[0], next(f) * scale[1])
te.begin_fill()
elif i == 'm':
te.end_fill()
Moveto_r(next(f) * scale[0], next(f) * scale[1])
te.begin_fill()
elif i == 'C':
Curveto(next(f) * scale[0], next(f) * scale[1],
next(f) * scale[0], next(f) * scale[1],
next(f) * scale[0], next(f) * scale[1])
lastI = i
elif i == 'c':
Curveto_r(next(f) * scale[0], next(f) * scale[1],
next(f) * scale[0], next(f) * scale[1],
next(f) * scale[0], next(f) * scale[1])
lastI = i
elif i == 'L':
Lineto(next(f) * scale[0], next(f) * scale[1])
elif i == 'l':
Lineto_r(next(f) * scale[0], next(f) * scale[1])
lastI = i
elif lastI == 'C':
Curveto(i * scale[0], next(f) * scale[1],
next(f) * scale[0], next(f) * scale[1],
next(f) * scale[0], next(f) * scale[1])
elif lastI == 'c':
Curveto_r(i * scale[0], next(f) * scale[1],
next(f) * scale[0], next(f) * scale[1],
next(f) * scale[0], next(f) * scale[1])
elif lastI == 'L':
Lineto(i * scale[0], next(f) * scale[1])
elif lastI == 'l':
Lineto_r(i * scale[0], next(f) * scale[1])
te.penup()
te.hideturtle()
te.update()
SVGFile.close()
def drawBitmap(w_image):
print('Reducing the colors...')
Z = w_image.reshape((-1, 3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS, 10, 1.0)
global K
ret, label, center = cv2.kmeans(
Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res = res.reshape(w_image.shape)
no = 1
for i in center:
sys.stdout.write('\rDrawing: %.2f%% [' % (
no / K * 100) + '#' * no + ' ' * (K - no) + ']')
no += 1
res2 = cv2.inRange(res, i, i)
res2 = cv2.bitwise_not(res2)
cv2.imwrite('.tmp.bmp', res2)
os.system('potrace.exe .tmp.bmp -s --flat')
# print(i)
drawSVG('.tmp.svg', '#%02x%02x%02x' % (i[2], i[1], i[0]))
os.remove('.tmp.bmp')
os.remove('.tmp.svg')
print('\n\rFinished, close the window to exit.')
te.done()
if __name__ == '__main__':
paser = argparse.ArgumentParser(
description="Convert an bitmap to SVG and use turtle libray to draw it.")
paser.add_argument('filename', type=str,
help='The file(*.jpg, *.png, *.bmp) name of the file you want to convert.')
paser.add_argument(
"-c", "--color", help="How many colors you want to draw.(If the number is too large that the program may be very slow.)", type=int, default=32)
args = paser.parse_args()
K = args.color
try:
bitmapFile = open(args.filename, mode='r')
except FileNotFoundError:
print(__file__ + ': error: The file is not exists.')
quit()
if os.path.splitext(args.filename)[1].lower() not in ['.jpg', '.bmp', '.png']:
print(__file__ + ': error: The file is not a bitmap file.')
quit()
bitmap = cv2.imread(args.filename)
if bitmap.shape[0] > GetSystemMetrics(1):
bitmap = cv2.resize(bitmap, (int(bitmap.shape[1] * (
(GetSystemMetrics(1) - 50) / bitmap.shape[0])), GetSystemMetrics(1) - 50))
drawBitmap(bitmap)
|
from collections import defaultdict
import graph_handle
STR_PADDING_SIZE = 30
COUNT_STR_PADDING_SIZE = 10
SCORES_FILE_NAME = 'scores_genomes.txt'
ADDITIONAL_FILE_NAME = 'additional_data.txt'
def parse_args(args):
num_of_args = len(args) - 1
if num_of_args < 3:
raise ValueError('One or more arguments are missing')
scoring_matrix = read_scoring_matrix(args[1])
sequences = {}
for i in range(2, num_of_args + 1):
sequence_id, sequence = read_seq_file(args[i])
sequences[sequence_id] = sequence
return sequences, scoring_matrix
def build_sequences_dict(sequences, K):
mapped_sequences = {}
for sequence_id, sequence in sequences.items():
sequence_dict = map_sequence(sequence, K)
mapped_sequences[sequence_id] = sequence_dict
return mapped_sequences
def read_scoring_matrix(path):
scoring_matrix = {}
with open(path) as f:
chars = f.readline().strip().split()
for line in f:
ch1, *scores = line.strip().split()
for i, score in enumerate(scores):
scoring_matrix[(ch1, chars[i])] = int(score)
return scoring_matrix
def read_seq_file(seq_file):
seq_id = ''
seq = ''
with open(seq_file) as f:
for line in f:
if line.startswith('>'):
seq_id = line.strip()[1:]
else:
seq += line.strip()
return seq_id, seq
def map_sequence(sequence, k):
sequence_dict = defaultdict(list)
for i in range(0, len(sequence) - k + 1):
kmer = sequence[i:i + k]
sequence_dict[kmer].append(i)
return sequence_dict
def align(seq1, seq2, scoring_matrix):
if len(seq1) != len(seq2):
raise ValueError("Sequences must be with the same length")
score = 0
for i in range(len(seq1)):
score += scoring_matrix[seq1[i], seq2[i]]
return score
def find_neighbors(kmer, scoring_matrix, alphabet, T):
neighbors = []
max_score = align(kmer, kmer, scoring_matrix)
if max_score >= T:
find_neighbors_rec(kmer, kmer, 0, max_score, alphabet, neighbors, scoring_matrix, T)
return neighbors
def find_neighbors_rec(kmer, neighbor, pos, curr_score, alphabet, neighbors, scoring_matrix, T):
if len(kmer) == pos:
neighbors.append(neighbor)
else:
for char in alphabet:
score = curr_score - scoring_matrix[kmer[pos], kmer[pos]] + scoring_matrix[kmer[pos], char]
if score >= T:
neighbor = list(neighbor)
neighbor[pos] = char
neighbor = "".join(neighbor)
find_neighbors_rec(kmer, neighbor, pos + 1, score, alphabet, neighbors, scoring_matrix, T)
def create_file_for_final_scores(msps_dict, scores_list):
f = open(SCORES_FILE_NAME, "w")
counter = 0
for sequences in msps_dict.keys():
seq1 = sequences[0]
seq2 = sequences[1]
f.write(seq1 + "\t" + seq2 + "\t" + str(scores_list[counter]) + "\n")
counter += 1
f.close()
def write_additional_data(seq1, seq2, hsps_count, msps_count):
fp = open(ADDITIONAL_FILE_NAME, "a")
fp.write(('Sequence1 id: {} Sequence2 id: {} HSPs found: {} MSPs found: {}\n'
.format(seq1.ljust(STR_PADDING_SIZE), seq2.ljust(STR_PADDING_SIZE),
str(hsps_count).ljust(COUNT_STR_PADDING_SIZE), str(msps_count).ljust(COUNT_STR_PADDING_SIZE))))
def calculate_scores(msps):
scores_list = []
for pair in msps.values():
g = graph_handle.create_graph(pair)
path = graph_handle.find_path(g)
path_score = graph_handle.compute_pairwise_score(path, g)
scores_list.append(path_score)
return scores_list
|
# coding=utf8
from lib.corp import Corp
import re, sys
class JobcfwCorp(Corp):
def __init__(self):
config = {
'info_from': '中国服装人才网',
'corplist_url':
'http://www.cfw.cn/search/zhiwei.html?request_edu=0&request_experience=0&keywords=0&zhaopin_bigname=0&edittime=0&hidJobArea=0&hidFuntype=0&province2=0&keywordtype=0&invite_salary=0&mpage=50&page={0}',
'corp_url': 'http://www.cfw.cn/{corp_code}/',
'corplist_reg': re.compile(r'class="cname"><A title=\'(?P<name>[^\']+)\' href="http://www\.cfw\.cn/(?P<corp_code>[^/]+)', re.S),
'corp_regs': [
re.compile(r'<th width="80">联 系 人:</th>[^>]+>(?P<contact_person>[^<]+)', re.S),
re.compile(r'<th width="80">联系电话:</th>[^>]+>(?P<contact_tel_no>[^<]+)', re.S),
re.compile(r'<th width="80">通讯地址:</th>[^>]+>(?P<addr>[^<]+)', re.S),
],
'commit_each_times': 30,
'has_cookie': True,
'charset': 'gbk',
}
super().__init__(**config)
self.pages = 100
if not self.opener.login(
login_url='http://www.cfw.cn/personal/login.asp',
login_data='url=&method=loginin&username=gogo88er&password=123456',
check_url='http://www.cfw.cn/personal/',
check_data='叶问',
encoding='gbk'
):
sys.exit()
print('登录成功!')
def get_next_page_url(self):
return (self.corplist_url.format(page) for page in range(1, self.pages + 1))
|
import unittest
import numpy as np
from pathlib import Path
from pdb2sql import pdb2sql
from pdb2sql import transform
from . import pdb_folder
class TestTools(unittest.TestCase):
def setUp(self):
self.db = pdb2sql(Path(pdb_folder, 'dummy_transform.pdb'))
self.xyz = self.db.get('x,y,z')
def test_get_xyz(self):
"""Verfify getting xyz from sql."""
result = transform._get_xyz(self.db)
target = np.array([[1., 0., 0.], [-1., 0., 0.],
[0., 1., 0.], [0., -1., 0.],
[0., 0., 1.], [0., 0., -1.]])
np.testing.assert_equal(result, target)
def test_translation(self):
"""Verify sql translation."""
trans_vec = np.array([1, 1, 1])
target = np.array([[2., 1., 1.], [0., 1., 1.],
[1., 2., 1.], [1., 0., 1.],
[1., 1., 2.], [1., 1., 0.]])
transform.translation(self.db, trans_vec)
result = self.db.get('x,y,z')
np.testing.assert_almost_equal(result, target)
def test_rot_axis(self):
"""Verify sql rotation using axis and angle."""
# rotate pi around x axis
angle = np.pi
axis = (1., 0., 0.)
target = np.array([[1., 0., 0.], [-1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., -1.], [0., 0., 1.]])
transform.rot_axis(self.db, axis, angle)
result = self.db.get('x,y,z')
np.testing.assert_almost_equal(result, target)
def test_rot_xyz_around_axis(self):
"""Verify xyz values rot ation using axis and angle."""
# rotate pi around x, y and z axis
angle = np.pi
axes_xyz = [(1., 0., 0.),
(0., 1., 0.),
(0., 0., 1.)]
targets = [np.array([[1., 0., 0.], [-1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., -1.], [0., 0., 1.]]),
np.array([[-1., 0., 0.], [1., 0., 0.],
[0., 1., 0.], [0., -1., 0.],
[0., 0., -1.], [0., 0., 1.]]),
np.array([[-1., 0., 0.], [1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., 1.], [0., 0., -1.]])]
for axis, target in zip(axes_xyz, targets):
with self.subTest(axis=axis, target=target):
xyz_rot = transform.rot_xyz_around_axis(self.xyz, axis, angle)
np.testing.assert_almost_equal(xyz_rot, target)
def test_get_rot_axis_angle(self):
"""Verify generation of random axis and angle."""
# number of repeats
n = 1000
for i in range(n):
with self.subTest(i=i):
axis, angle = transform.get_rot_axis_angle()
# axis verctor must be unit vector
result = axis[0]**2 + axis[1]**2 + axis[2]**2
target = 1.
np.testing.assert_almost_equal(result, target)
# angle in the range [0, 2π)
self.assertTrue(0. <= angle < 2 * np.pi)
def test_get_rot_axis_angle_seed(self):
"""Verify specific random seed."""
seed = 2019
axis1, angle1 = transform.get_rot_axis_angle(seed)
axis2, angle2 = transform.get_rot_axis_angle(seed)
self.assertEqual(axis1, axis2)
self.assertEqual(angle1, angle2)
def test_rot_euler(self):
"""Verify sql rotation using Euler angles."""
# rotate pi around z axis
alpha, beta, gamma = 0, 0, np.pi
target = np.array([[-1., 0., 0.], [1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., 1.], [0., 0., -1.]])
transform.rot_euler(self.db, alpha, beta, gamma)
result = self.db.get('x,y,z')
np.testing.assert_almost_equal(result, target)
def test_rotation_euler(self):
"""Verify xyz values rotation using Euler angles."""
# rotate pi around x, y and z axis
angles = [(np.pi, 0., 0.),
(0., np.pi, 0.),
(0., 0., np.pi)]
targets = [np.array([[1., 0., 0.], [-1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., -1.], [0., 0., 1.]]),
np.array([[-1., 0., 0.], [1., 0., 0.],
[0., 1., 0.], [0., -1., 0.],
[0., 0., -1.], [0., 0., 1.]]),
np.array([[-1., 0., 0.], [1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., 1.], [0., 0., -1.]])]
for angle, target in zip(angles, targets):
with self.subTest(angle=angle, target=target):
result = transform.rotation_euler(
self.xyz, angle[0], angle[1], angle[2])
np.testing.assert_almost_equal(result, target)
def test_rot_mat(self):
"""Verify sql roation using rotation matrix."""
# rotate pi around z-axis
theta = np.pi
cosa = np.cos(theta)
sina = np.sin(theta)
rot_mat = np.array([[cosa, -sina, 0],
[sina, cosa, 0],
[0, 0, 1]])
target = np.array([[-1., 0., 0.], [1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., 1.], [0., 0., -1.]])
transform.rot_mat(self.db, rot_mat)
result = self.db.get('x,y,z')
np.testing.assert_almost_equal(result, target)
def test_rotation_matrix(self):
"""Verify xyz values roation using rotation matrix."""
theta = np.pi
cosa = np.cos(theta)
sina = np.sin(theta)
# rotate pi around x, y and z axis
rot_mats = [np.array([[1, 0, 0], [0, cosa, -sina], [0, sina, cosa]]),
np.array([[cosa, 0, sina], [0, 1, 0], [-sina, 0, cosa]]),
np.array([[cosa, -sina, 0], [sina, cosa, 0], [0, 0, 1]])]
targets = [np.array([[1., 0., 0.], [-1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., -1.], [0., 0., 1.]]),
np.array([[-1., 0., 0.], [1., 0., 0.],
[0., 1., 0.], [0., -1., 0.],
[0., 0., -1.], [0., 0., 1.]]),
np.array([[-1., 0., 0.], [1., 0., 0.],
[0., -1., 0.], [0., 1., 0.],
[0., 0., 1.], [0., 0., - 1.]])]
for mat, target in zip(rot_mats, targets):
with self.subTest(mat=mat, target=target):
result = transform.rotate(self.xyz, mat)
np.testing.assert_almost_equal(result, target)
def test_rotation_matrix_center(self):
"""Verify specific rotation center."""
# rotate pi around z-axis with rotation center [1,1,1,]
theta = np.pi
cosa = np.cos(theta)
sina = np.sin(theta)
xyz = np.array([0., 0., 0.])
rot_mat = np.array([[cosa, -sina, 0], [sina, cosa, 0], [0, 0, 1]])
centers = [np.array([1., 1., 1.]), [1., 1., 1.]]
for center in centers:
with self.subTest(center=center):
result = transform.rotate(xyz, rot_mat, center=center)
target = np.array([2., 2., 0.])
np.testing.assert_almost_equal(result, target)
if __name__ == "__main__":
unittest.main()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^registration$', views.registration),
url(r'^processreg$',views.processreg),
url(r'^processlog$', views.processlog),
url(r'^success$',views.success),
url(r'^logout$', views.logout),
url(r'^home$', views.home),
url(r'^add$',views.add),
url(r'^add_travel_plan$', views.add_travel_plan),
url(r'^details/(?P<trip_id>\d+)$', views.details),
url(r'^join/(?P<trip_id>\d+)$', views.join),
url(r'^unjoin/(?P<trip_id>\d+)$', views.unjoin)
] |
#!/bin/python
# coding: utf-8
from Database import Database
import yaml
class SurveyProperties:
def __init__(self):
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
sql = cfg["mysql"]
db = sql['database']
hostname = sql["hostname"]
username = sql["username"]
password = sql["password"]
#self._database = Database(db, hostname, username, password)
def get_survey_name(self, surveyId):
#surveyName = self._database.getSurveyName(surveyId)
#return surveyName
return("weather survey")
|
# -*- python -*-
from flask import Flask, render_template, request, redirect
app = Flask( __name__ )
@app.route( '/' )
def index():
return( render_template( "index.html" ) )
@app.route( '/process', methods=['POST'] )
def process():
print "Got /process POST data"
name = request.form['name']
print "name:", name
return( redirect( '/' ) )
app.run( debug=True )
|
def test(a):
if a:
return 0
return a + 5
def main():
test(5)
b = 0
if (b):
return 1
return 0
main() |
# import os
# from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
#
# # get the path of ChromeDriverServer
# dir = os.path.dirname(__file__)
# chrome_driver_path = dir + "\chromedriver.exe"
#
# # create a new Chrome session
# driver = webdriver.Chrome(chrome_driver_path)
# driver.implicitly_wait(30)
# driver.maximize_window()
#
# # navigate to the application home page
# driver.get("http://www.google.com")
#
# # get the search textbox
# search_field = driver.find_element_by_name("q")
#
# # enter search keyword and submit
# search_field.send_keys("Selenium WebDriver Interview questions")
# search_field.submit()
#
# # get the list of elements which are displayed after the search
# # currently on result page using find_elements_by_class_name method
# lists = driver.find_elements_by_class_name("r")
#
# # get the number of elements found
# print("Found " + str(len(lists)) + " searches:")
#
# # iterate through each element and print the text that is
# # name of the search
#
# i=0
# for listitem in lists:
# print(listitem)
# i=i+1
# if(i>10):
# break
#
# # close the browser window
# driver.quit()
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
# Start WebDriver
driver = webdriver.Chrome()
# driver.implicitly_wait(30)
driver.maximize_window()
print("Opening Browser...")
# Get Link
driver.get("http://128.199.150.205/#")
# Detect Login and Password textbox
field_name = driver.find_element_by_id("id_username")
field_pass = driver.find_element_by_id("id_password")
# Sign In
field_name.send_keys("admin")
field_pass.send_keys("4321dcba")
field_name.submit()
# Detect String
string = driver.find_element_by_css_selector("h1").get_attribute("innerHTML")
print(string)
# driver.get("http://www.python.org")
# assert "Python" in driver.title
# elem = driver.find_element_by_name("q")
# elem.clear()
# elem.send_keys("pycon")
# elem.send_keys(Keys.RETURN)
# assert "No results found." not in driver.page_source
# driver.close()
# import time
# from selenium import webdriver
#
# driver = webdriver.Chrome()
# time.sleep(5)
# driver.quit()
# import unittest
# from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
#
# class PythonOrgSearch(unittest.TestCase):
#
# def setUp(self):
# self.driver = webdriver.Chrome()
#
# def test_search_in_python_org(self):
# driver = self.driver
# driver.get("http://www.python.org")
# self.assertIn("Python", driver.title)
# elem = driver.find_element_by_name("q")
# elem.send_keys("pycon")
# elem.send_keys(Keys.RETURN)
# assert "No results found." not in driver.page_source
#
#
# def tearDown(self):
# self.driver.close()
#
# if __name__ == "__main__":
# unittest.main()
|
import sys
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
sys.path.append("../")
from nadl.core.tensor import Tensor
import unittest
import numpy as np
import tensorflow as tf
tensor_fr1 = np.random.randn(5,5).astype('float32')
tensor_fr2 = np.random.randn(5,3).astype('float32')
def test_nadl():
t1 = Tensor(data=tensor_fr1)
t2 = Tensor(data=tensor_fr2)
out = t1.matmul(t2)
out.backward()
return out.data, t1.grad, t2.grad
def test_tf():
t1 = tf.Variable(tensor_fr1)
t2 = tf.Variable(tensor_fr2)
with tf.GradientTape(persistent=True) as gr:
gr.watch(t1)
gr.watch(t2)
out = tf.matmul(t1, t2)
t1_grad = gr.gradient(out, t1)
t2_grad = gr.gradient(out, t2)
return out.numpy(), t1_grad.numpy(), t2_grad.numpy()
class TestNADL(unittest.TestCase):
def tensor_tests():
te_out, te_g1, te_g2 = test_nadl()
tf_out, tf_g1, tf_g2 = test_tf()
np.testing.assert_allclose(te_out, tf_out, atol=1e-5, err_msg="Outputs not in the range")
np.testing.assert_allclose(te_g1, tf_g1, atol=1e-5, err_msg="Gradients of T1 not in the range")
np.testing.assert_allclose(te_g2, tf_g2, atol=1e-5, err_msg="Gradients of T2 not in the range")
if __name__ == "__main__":
unittest.main() |
#! /usr/bin/python3
class Host():
#Host class constructor
def __init__(self,ID,name,log):
self.name = name
self.ID = ID
self.log = log
self.item_list = []
|
from django.contrib import admin
from .models import Result
class ResultAdmin(admin.ModelAdmin):
list_display = ('period', 'red1', 'red2', 'red3', 'red4', 'red5', 'red6', 'blue')
ordering = ['period']
# Register your models here.
admin.site.register(Result)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
import os
import apsw
from multiprocess import Pool, Manager
from py2neo import Graph, Node, authenticate
from device.olt import Zte
from funcy import lmap, merge, partial, walk, lmapcat
from funcy import re_test
config = configparser.ConfigParser()
config.read('config.ini')
neo4j_username = config.get('neo4j', 'username')
neo4j_password = config.get('neo4j', 'password')
olts_file, log_file, result_file = (
'olts.txt', 'result/olt_log.txt', 'result/olt_info.txt')
authenticate('61.155.48.36:7474', neo4j_username, neo4j_password)
graph = Graph("http://61.155.48.36:7474/db/data")
def clear_log():
for f in [log_file, result_file]:
if os.path.exists(f):
os.remove(f)
os.mknod(f)
nodes = graph.cypher.execute(
'match (n:Olt) where n.company="zte" return n.ip')
olts = [x[0] for x in nodes]
def saveOnus_f(ip):
mark, rslt = Zte.get_onus(ip)[:-1]
if mark == 'success' and rslt:
_ff = lambda x: walk(partial(merge, (ip, x[0])), x[1])
rslt1 = lmapcat(_ff, rslt)
with open(result_file, 'a') as frslt:
for record in rslt1:
ip, port, onuid, loid = record
frslt.write("{ip},{port},{onuid},{loid}\n"
.format(ip=ip, port=port, onuid=onuid, loid=loid))
with open(log_file, 'a') as flog:
flog.write("{ip}:{mark}\n".format(ip=ip, mark=mark))
def in_to_DB():
conn = apsw.Connection('db/onu.db')
cursor = conn.cursor()
records = [x.strip().split(',') for x in open(result_file)]
cmd = "insert into onu values(?,?,?,?)"
cursor.executemany(cmd, records)
def del_onu():
records = (x.strip().split(',') for x in open('e8c_diff.csv'))
for ip, port, onuid, loid in records:
child = Zte.telnet(ip)
rslt = Zte.do_some(child, 'show run {port}'.format(port=port))
if re_test(r'onu\s{0}\stype\sE8C[PG]24\sloid\s{1}'.format(onuid, loid),
rslt):
child.sendline('conf t')
child.expect('#')
child.sendline(port)
child.expect('#')
child.sendline('no onu {onuid}'.format(onuid=onuid))
child.expect('#')
Zte.close(child)
def main():
# clear_log()
# pool = Pool(256)
# list(pool.map(saveOnus_f, olts))
# pool.close()
# pool.join()
pass
if __name__ == "__main__":
main()
|
for _ in range(int(input())):
ws = input().split()
wl = len(ws[0])
dist = [0]*wl
for idx in range(wl):
dist[idx] = ord(ws[1][idx]) - ord(ws[0][idx])
if dist[idx] < 0:
dist[idx] += 26
dist = map(str, dist)
print('Distances: '+' '.join(dist)) |
print("ok")
lambda fct : dict( (x,[k for k in range(len(L)) if L[k] == x]) for x in set(L) )
|
stuff = 'Hello\nWorld'
print(stuff)
stuff = 'X\nY'
print(stuff)
print(len(stuff))
|
# Check whether a number is -ve, +ve or 0
num=int(input("Please enter a no. :"))
if num<0:
print("Number is negative.")
elif num>0:
print("Number is position")
else:
print("Number is 0")
|
#!/usr/bin/env python
# Diff Pads for pcbnew using bezier curves as an exit
from pcbnew import ActionPlugin, GetBoard
from .diffpads_dialog import init_diffpads_dialog
class DiffPadsPlugin(ActionPlugin):
def defaults(self):
self.name = "DiffPads"
self.category = "Modify PCB"
self.description = "Creates good-looking differential pads exits"
def Run(self):
init_diffpads_dialog(GetBoard())
|
from ..models import CoffeeShop, Newsletter,Book
from rest_framework import serializers
class CoffeeShopSerializer(serializers.ModelSerializer):
class Meta:
model = CoffeeShop
fields = '__all__'
class NewsletterSerializer(serializers.ModelSerializer):
class Meta:
model = Newsletter
fields = '__all__'
class BookSerializer(serializers.ModelSerializer):
shop = CoffeeShopSerializer(many=False,read_only=True)
shop_id = serializers.IntegerField(write_only=True)
class Meta:
model = Book
fields = '__all__'
|
#!/usr/bin/python
import re
def isPhoneno(num):
#phexrp=re.compile(r'\d\d\d-\d\d\d-\d\d\d\d')
try:
indphnexpp=re.compile(r'\d\d-\d{10}')
indrslt=indphnexpp.findall(num)
print(indrslt)
#result=phexrp.search(num)
#print('phone no is: '+result.group())
except AttributeError as error:
print ('in valid no')
#print('Indian no: +'+tuple(indrslt.groups()))
phno=input('Enter the messsage with a phone in it')
isPhoneno(phno)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 10:42:29 2018
功能:实现对每个好友聊天,输出该好友的聊天内容
@author: mynumber
"""
import wxpy
#
bot=wxpy.Bot(cache_path=True)
global my_friend
def set_friend(name='老关'):
friend=bot.friends().search(name)
if len(friend) == 0:
print('未找到好友,请重新输入姓名或昵称...')
else:
return friend[0]
def send(friend,message=None):
if friend is not None:
friend.send(message)
print('成功发送...')
else:
print('无此好友,请选择真正的好友...')
#调用如下
my_friend=set_friend('老关')
send(my_friend,'hello')
#可以返回好友的聊天记录
@bot.register(my_friend)
def print_others(msg):
print(msg)
|
# lets start with reading the excel file
# Cp = a0 + a1*T + a2*T^2 + a3*T^3 + a4*T^4 (J/mol/K)
# Hf0 (kJ/mol)
# Gf0 (kJ/mol)
# dHv (kJ/mol)
import xlrd
from utility.data_utility import readcol
try:
workbook = xlrd.open_workbook("data/thermo_data.xlsx")
except:
try:
workbook = xlrd.open_workbook("thermo_data.xlsx")
except:
workbook = xlrd.open_workbook("../data/thermo_data.xlsx")
sheet = workbook.sheet_by_index(0)
data_name = [sheet.cell_value(0,i) for i in range(sheet.ncols)]
def readonecol(name):
return readcol(sheet,name)
Tb = readonecol('Tb')
Tc = readonecol('Tc')
Hf0 = readonecol('Hf0')
HV = readonecol('HV')
a0 = readonecol('a0')
a1 = readonecol('a1')
a2 = readonecol('a2')
a3 = readonecol('a3')
a4 = readonecol('a4')
T0_f = 298.15 # K
|
import csv;
import os;
import codecs;
import ast;
from contextlib import contextmanager;
import sys, os;
import time;
import numpy as np;
@contextmanager
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
#This is the directory for the simulation data you want to parse
directory="";
# file(s) to read from
read_file_1 = "";
#simulation parameters
# Number of nodes
NumNodes = 40;
# Number of timesteps recorded:
# this code assumes two timesteps are recorded: pre- and post-shock
end_time = 2;
# Do you want to be verbose?
verbose = 0;
flatten = lambda list_of_lists: [item for sublist in list_of_lists for item in sublist]
roundlist = lambda list_to_round: [round(float(item),5) if item != '' else '' for item in list_to_round ]
heaviside = lambda x: 0.5 if x == 0 else 0 if x < 0 else 1;
heaviside_list = lambda l: [heaviside(x) for x in l];
pos1_list = lambda l: np.array([heaviside(x-0.0001) for x in l]);
for read_file in [read_file_1]:
NumBatches=0;
if os.path.isfile(directory+read_file):
print(read_file);
else:
print("ERROR: File not found: %s" %read_file);
continue;
r = open(directory+read_file,'r');
# find number of lines in file
end_line = len([line for line in r])-1;
r.close();
r = open(directory+read_file,'r');
w = open(directory+read_file[:-4]+'_NetworkStats.csv','w');
read_csv_file=csv.reader(r, delimiter=',');
header=read_csv_file.next();
write_csv_file=csv.writer(w);
# header:
# noise
# precost
# [d]*NumNodes
# [e]*NumNodes
# preUtility
# preDegree
# preFracSpillover
# postUtility
# postDegree
# postFracSpillover
# shocked
# fractneighborsshocked
write_csv_file.writerow(["Noise","Pre-Cost","triangle_benefit","spillover-benefit","pre-shock_utility","pre-shock_degree","pre-shock_frac_spillover","pre-shock_clustering","post-shock_utility","post-shock_degree","post-shock_frac_spillover","post-shock_clustering","Shocked?","Fraction_neighbors_shocked","num_nodes_shocked"]);
w.flush();
os.fsync(w);
first_line = True;
sim = [];
stats = [];
counter = 0;
for row in read_csv_file:
counter = counter + 1;
if row[0]!= "" or counter == end_line+1:
if not first_line:
#noise,precost,d,e
sim = np.array(sim).astype(np.float);
# 1 if shocked, else 0
shocked = sim[:NumNodes**2:NumNodes,0];
#num nodes shocked
num_shocked = np.sum(shocked);
preUtility = sim[:NumNodes**2:NumNodes,2];
premat1 = np.reshape(sim[:NumNodes**2,1],(NumNodes,NumNodes));
premat2 = np.reshape(sim[NumNodes**2:2*NumNodes**2,1],(NumNodes,NumNodes));
predeg1 = np.sum(premat1,axis=0);
predeg2 = np.sum(premat2,axis=0);
preDegree = np.add(predeg1,predeg2);
preSpilloverMat = np.multiply(premat1,premat2);# 0 if no edge, 1 if edge
preSpillover = np.sum(preSpilloverMat,axis=0); # total number of spillover edge pairs
# number of spillover pairs divided by (number of ties/2)
# value is 0 if no spillover/spillover undefined, 1 if all spillover
preFracSpillover = np.divide(2*preSpillover,preDegree);
preFracSpillover = np.array([s if k > 0 else 0 for s,k in zip(preFracSpillover,preDegree)]);
# number of 3-cycles
pre_num_3cycles_1 = (premat1.dot(premat1.dot(premat1))).diagonal()/2;#divide by 2 so we do not overcount
pre_num_3cycles_2 = (premat2.dot(premat2.dot(premat2))).diagonal()/2;#divide by 2 so we do not overcount
# number of triangles possible: k_i (k_i-1)/2
pre_num_tri_possible_1 = np.multiply(predeg1,predeg1-1)/2;
# this avoids nans for clustering coefficient
pre_num_tri_possible_1 = np.array([p if p>0 else 1 for p in pre_num_tri_possible_1]);
pre_num_tri_possible_2 = np.multiply(predeg2,predeg2-1)/2;
# this avoids nans for clustering coefficient
pre_num_tri_possible_2 = np.array([p if p>0 else 1 for p in pre_num_tri_possible_2]);
# pre-shock local clustering coefficient:
# by default, is 0 if degree is 1 or 0
pre_local_cluster1 = np.divide(pre_num_3cycles_1,pre_num_tri_possible_1);
pre_local_cluster2 = np.divide(pre_num_3cycles_2,pre_num_tri_possible_2);
pre_local_cluster = (pre_local_cluster1+pre_local_cluster2)/2;
postUtility = sim[:NumNodes**2:NumNodes,5];
postmat1 = np.reshape(sim[:NumNodes**2,4],(NumNodes,NumNodes));
postmat2 = np.reshape(sim[NumNodes**2:2*NumNodes**2,4],(NumNodes,NumNodes));
postdeg1 = np.sum(postmat1,axis=0);
postdeg2 = np.sum(postmat2,axis=0);
postDegree = np.add(postdeg1,postdeg2);
postSpilloverMat = np.multiply(postmat1,postmat2);
postSpillover = np.sum(postSpilloverMat,axis=0);
postFracSpillover = np.divide(2*postSpillover,postDegree);
postFracSpillover = np.array([s if k > 0 else 0 for s,k in zip(postFracSpillover,postDegree)]);
# number of 3-cycles
post_num_3cycles_1 = (postmat1.dot(postmat1.dot(postmat1))).diagonal()/2;#divide by 2 so we do not overcount
post_num_3cycles_2 = (postmat2.dot(postmat2.dot(postmat2))).diagonal()/2;#divide by 2 so we do not overcount
# number of triangles possible: k_i (k_i-1)/2
post_num_tri_possible_1 = np.multiply(postdeg1,postdeg1-1)/2;
# this avoids nans for clustering coefficient
post_num_tri_possible_1 = np.array([p if p>0 else 1 for p in post_num_tri_possible_1]);
post_num_tri_possible_2 = np.multiply(postdeg2,postdeg2-1)/2;
# this avoids nans for clustering coefficient
post_num_tri_possible_2 = np.array([p if p>0 else 1 for p in post_num_tri_possible_2]);
# post-shock local clustering coefficient:
# by default, is 0 if degree is 1 or 0
post_local_cluster1 = np.divide(post_num_3cycles_1,post_num_tri_possible_1);
post_local_cluster2 = np.divide(post_num_3cycles_2,post_num_tri_possible_2);
post_local_cluster = (post_local_cluster1+post_local_cluster2)/2;
shockedMat = np.array([shocked]*NumNodes);
fullpremat = np.add(premat1,premat2); # values = 0 if no edges, 1 is edge in 1 layer, 2 if edges in both layers
# this says whether neighbor AND shocked
neighborsshocked = [np.multiply(pos1_list(l),shocked) for l in fullpremat];
# number of shocked neighbors/number of neighbors
num_neighbors = np.array([np.sum(pos1_list(pre)) for pre in fullpremat])
#print(num_neighbors)
fractneighborsshocked = [np.divide(np.sum(nshock),n) if n > 0 else 0 for nshock,n in zip(neighborsshocked,num_neighbors)]
# record:
# [noise]*NumNodes
# [precost]*NumNodes
# [d]*NumNodes
# [e]*NumNodes
# preUtility
# preDegree
# preFracSpillover
# preClustering
# postUtility
# postDegree
# postFracSpillover
# postClustering
# shocked
# fractneighborsshocked
write_mat = np.transpose(np.array([[noise]*NumNodes,[precost]*NumNodes,[d]*NumNodes,[e]*NumNodes,preUtility,preDegree,preFracSpillover,pre_local_cluster,postUtility,postDegree,postFracSpillover,post_local_cluster,shocked,fractneighborsshocked,[num_shocked]*NumNodes]));
for write_line in write_mat:
write_csv_file.writerow([str(l) for l in write_line]);
w.flush();
os.fsync(w);
else:
first_line = False;
sim = [];
noise,precost,d,e = row[3:7];
sim.append([row[10]]+[row[14]]+row[17:20]+row[22:])
else:
sim.append([row[10]]+[row[14]]+row[17:20]+row[22:])
|
# encoding=utf8
"""
Author: 'jdwang'
Date: 'create date: 2017-01-13'; 'last updated date: 2017-01-13'
Email: '383287471@qq.com'
Describe:
"""
from __future__ import print_function
from regex_extracting.extracting.common.regex_base import RegexBase
__version__ = '1.3'
class PhoneType(RegexBase):
name = '手机类型'
def __init__(self,sentence):
# 要处理的输入句子
self.sentence = sentence
# region 1 初始化正则表达式
# 描述正则表达式
self.statement_regexs = [
'手机类型'
]
# 值正则表达式
self.value_regexs = [
'[234][gG](手机)?',
'(非?)智能(手?机)?',
'拍照手机|平板手机|商务手机|三防手机|音乐手机|时尚手机|电视手机|老人手机|儿童手机|女性手机',
'随意|随便|都可以|其他|别的'
]
# endregion
super(PhoneType, self).__int__()
self.regex_process()
if __name__ == '__main__':
price = PhoneType('价不高')
for info_meta_data in price.info_meta_data_list:
print('-'*80)
print(str(info_meta_data)) |
# -*- coding: utf-8 -*-
class Solution:
def shiftingLetters(self, S, shifts):
result = []
current_shift = 0
for shift in shifts:
current_shift += shift
current_shift %= 26
for i, c in enumerate(S):
result.append(self.shiftLetter(c, current_shift))
current_shift -= shifts[i]
current_shift %= 26
return "".join(result)
def shiftLetter(self, c, shift):
return chr(ord("a") + (ord(c) + shift - ord("a")) % 26)
if __name__ == "__main__":
solution = Solution()
assert "rpl" == solution.shiftingLetters("abc", [3, 5, 9])
|
from flask import Flask, request, abort
from coin.block import BlockChain
app = Flask(__name__)
app.config["DEBUG"] = True
block_chain: BlockChain = BlockChain()
@app.route("/", methods=["GET"])
def home():
return """ <h1>Crypto API</h1>"""
@app.route("/blocks", methods=["GET"])
def blocks():
return block_chain.get_json()
@app.route("/make_blocks", methods=["POST"])
def make_blocks():
if not request.json or "data" not in request.json:
abort(400)
block_data: str = request.json['data']
block_chain.generate_new_block(block_data)
return "success", 201
# To send a request: r = requests.post("http://127.0.0.1:5000/mine_blocks",
# json={"data": "llama"})
@app.route("/replace_chain", methods=["POST"])
def replace_blocks():
global block_chain
if not request.json or "chain" not in request.json:
abort(400)
block_chain = BlockChain(json=request.json)
return "Success", 201
if __name__ == "__main__":
app.run()
|
# This problem was asked by Uber.
# Given an array of integers, return a new array such that each element at index i of the new array is the product of all the numbers in the original array except the one at i.
# For example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output would be [2, 3, 6].
# Follow-up: what if you can't use division?
def productExceptSelf(self, nums):
answer = [1]*len(nums)
answer[0] = 1
for i in range(1, len(nums)):
answer[i] = answer[i-1] * nums[i-1]
rightProduct = 1
for i in range(len(nums)-1, -1, -1):
answer[i] = answer[i] * rightProduct
rightProduct *= nums[i]
return answer
input = [1,2,3,4,5]
print(productExceptSelf(1,input)) |
#!/usr/bin/python
from pyfaidx import Fasta
import sys
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.Blast.Applications import NcbiblastnCommandline
import subprocess
def return_bioseq_obj(sequence):
""" Returns biopython sequence object
"""
return Seq(sequence,'generic_dna')
#### LOAD THE REFERENCE FILE ####
mzebra_ref = Fasta(sys.argv[1])
inversion_file = sys.argv[2]
blast_database = sys.argv[3]
with open(inversion_file,'r') as INV_FILE:
for line in INV_FILE:
line = line.rstrip('\n')
scaffold = line.split('\t')[0]
start = int(line.split('\t')[1])
stop = int(line.split('\t')[2])
print (scaffold)
inversion = mzebra_ref[scaffold][start:stop].seq
inversion_seq = return_bioseq_obj(inversion)
if(start==0):
inversion_bp_1 = mzebra_ref[scaffold][start:start+1000].seq
elif(stop == len(inversion)):
inversion_bp_2 = mzebra_ref[scaffold][stop-1000:stop].seq
else:
inversion_bp_1 = mzebra_ref[scaffold][start:start+1000].seq
inversion_bp_2 = mzebra_ref[scaffold][stop-1000:stop].seq
with open('temp.fasta','w') as TEMP, open('scaffold.fasta','w') as SCAFFOLD:
#TEMP.write(">%s_%s_%s\n"%(scaffold,start,stop))
#TEMP.write(inversion)
TEMP.write(">bp1\n")
TEMP.write(inversion_bp_1+"\n")
TEMP.write(">bp2\n")
TEMP.write(inversion_bp_2)
out_file = scaffold+"_"+start+"_"+stop+".xml"
SCAFFOLD.write(">%s\n"%(scaffold))
SCAFFOLD.write("%s"%(mzebra_ref[scaffold][0:].seq))
subprocess.check_output("makeblastdb -in scaffold.fasta -input_type fasta -dbtype nucl -title m_zebra -out temp_mz_v0_db",shell=True)
subprocess.check_output("blastn -db %s -query temp.fasta -out %s -outfmt 5 -num_threads 2 -evalue 1e-20"%(sys.argv[3],out_file),shell=True)
#subprocess.check_output("rm temp_mz*",shell=True)
subprocess.check_output("rm scaffold.fasta",shell=True)
|
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import time
import matplotlib.pyplot as plt
def get_mnist_data():
mnist = tf.keras.datasets.mnist
(train_X, train_y), (test_X, test_y) = mnist.load_data()
train_X = np.array(train_X).reshape(len(train_X), 784)
# Prepend the column of 1s for bias
N, M = train_X.shape
all_X = np.ones((N, M))
all_X[:, :] = train_X
num_labels = len(np.unique(train_y))
train_y_eye = np.eye(num_labels)[train_y] # One liner trick!
test_y_eye = np.eye(num_labels)[test_y] # One liner trick!
# a,b,c,d = train_test_split(all_X, all_Y, test_size=0.00, random_state=0)
#return (all_X, all_X, all_Y, all_Y)
return train_X,train_y_eye,test_X,test_y_eye
train_x,train_y,test_x,test_y = get_mnist_data()
train_x = train_x.reshape(-1, 28, 28, 1)
test_x = test_x.reshape(-1, 28, 28, 1)
training_iters = 2000
learning_rate = 0.00001
batch_size = 4000
n_classes = 10
x_size = train_x.shape[1]
y_size = train_y.shape[1] # Number of outcomes (3 iris flowers)
# Symbols
X = tf.placeholder("float", shape=[None, 28, 28, 1], name="X")
y = tf.placeholder("float", shape=[None, y_size], name="Y")
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],padding='SAME')
def conv_net(x, weights, biases):
# here we call the conv2d function we had defined above and pass the input image x, weights wc1 and bias bc1.
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 14*14 matrix.
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
# here we call the conv2d function we had defined above and pass the input image x, weights wc2 and bias bc2.
convs = []
convs.append(conv1)
for i in range(len(weights.keys()) - 3):
#conv = conv2d(convs[i], weights['wc' + str(i+2)], biases['bc' + str(i+2)])
#conv = maxpool2d(conv, k=2)
convs.append( maxpool2d(conv2d(convs[i], weights['wc' + str(i+2)], biases['bc' + str(i+2)]), k=2))
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
last_conv = convs.pop()
# Convolution Layer
# here we call the conv2d function we had defined above and pass the input image x, weights wc2 and bias bc2.
# conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 7*7 matrix.
# conv2 = maxpool2d(conv2, k=2)
# conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
# # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 4*4.
# conv3 = maxpool2d(conv3, k=2)
# conv7 = conv2d(conv2, weights['wc3'], biases['bc3'])
# # Max Pooling (down-sampling), this chooses the max value from a 2*2 matrix window and outputs a 4*4.
# conv7 = maxpool2d(conv3, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(last_conv, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Output, class prediction
# finally we multiply the fully connected layer with the weights and add a bias term.
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
print(last_conv)
return out
weights = {
'wc1': tf.get_variable('W0', shape=(3,3,1,2), initializer=tf.keras.initializers.he_normal()),
'wc2': tf.get_variable('W1', shape=(3,3,2,4), initializer=tf.keras.initializers.he_normal()),
'wc3': tf.get_variable('W2', shape=(3,3,4,16), initializer=tf.keras.initializers.he_normal()),
'wc4': tf.get_variable('W3', shape=(3,3,16,32), initializer=tf.keras.initializers.he_normal()),
'wc5': tf.get_variable('W4', shape=(3,3,32,64), initializer=tf.keras.initializers.he_normal()),
'wc6': tf.get_variable('W5', shape=(3,3,64,128), initializer=tf.keras.initializers.he_normal()),
'wc7': tf.get_variable('W6', shape=(3,3,128,256), initializer=tf.keras.initializers.he_normal()),
'wc8': tf.get_variable('W7', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc9': tf.get_variable('W8', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc10': tf.get_variable('W9', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc11': tf.get_variable('W10', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc12': tf.get_variable('W11', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc13': tf.get_variable('W12', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc14': tf.get_variable('W13', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc15': tf.get_variable('W14', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc16': tf.get_variable('W15', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc17': tf.get_variable('W16', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc18': tf.get_variable('W17', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc19': tf.get_variable('W18', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wc20': tf.get_variable('W19', shape=(3,3,256,256), initializer=tf.keras.initializers.he_normal()),
'wd1': tf.get_variable('Wd1', shape=(256,16), initializer=tf.keras.initializers.he_normal()),
'out': tf.get_variable('Wout', shape=(16,10), initializer=tf.keras.initializers.he_normal()),
}
biases = {
'bc1': tf.get_variable('B0', shape=(2), initializer=tf.keras.initializers.he_normal()),
'bc2': tf.get_variable('B1', shape=(4), initializer=tf.keras.initializers.he_normal()),
'bc3': tf.get_variable('B2', shape=(16), initializer=tf.keras.initializers.he_normal()),
'bc4': tf.get_variable('B3', shape=(32), initializer=tf.keras.initializers.he_normal()),
'bc5': tf.get_variable('B4', shape=(64), initializer=tf.keras.initializers.he_normal()),
'bc6': tf.get_variable('B5', shape=(128), initializer=tf.keras.initializers.he_normal()),
'bc7': tf.get_variable('B6', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc8': tf.get_variable('B7', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc9': tf.get_variable('B8', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc10': tf.get_variable('B9', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc11': tf.get_variable('B10', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc12': tf.get_variable('B11', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc13': tf.get_variable('B12', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc14': tf.get_variable('B13', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc15': tf.get_variable('B14', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc16': tf.get_variable('B15', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc17': tf.get_variable('B16', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc18': tf.get_variable('B17', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc19': tf.get_variable('B18', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bc20': tf.get_variable('B19', shape=(256), initializer=tf.keras.initializers.he_normal()),
'bd1': tf.get_variable('Bd1', shape=(16), initializer=tf.keras.initializers.he_normal()),
'out': tf.get_variable('Bout', shape=(10), initializer=tf.keras.initializers.he_normal()),
}
pred = conv_net(X, weights, biases)
print(pred)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#Here you check whether the index of the maximum value of the predicted image is equal to the actual labelled image. and both will be a column vector.
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
#calculate accuracy across all the given images and average them out.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
tempos = []
fig, ax = plt.subplots()
#summary_writer = tf.summary.FileWriter('./Output', sess.graph)
iter_time = time.time()
for i in range(training_iters):
for batch in range(len(train_x)//batch_size):
batch_x = train_x[batch*batch_size:min((batch+1)*batch_size,len(train_x))]
batch_y = train_y[batch*batch_size:min((batch+1)*batch_size,len(train_y))]
# Run optimization op (backprop).
# Calculate batch loss and accuracy
opt = sess.run(optimizer, feed_dict={X: batch_x,
y: batch_y})
predict,loss, acc = sess.run([pred,cost, accuracy], feed_dict={X: batch_x,
y: batch_y})
#print("predict = ")
print("Iter " + str(i) + ", Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
time_passed = time.time() - iter_time
print("tempo atual: " + str(time_passed) )
print("Batch Finished!")
# Calculate accuracy for all 10000 mnist test images
test_acc,valid_loss = sess.run([accuracy,cost], feed_dict={X: test_x,y : test_y})
train_loss.append(loss)
test_loss.append(valid_loss)
train_accuracy.append(acc)
tempos.append(time_passed)
test_accuracy.append(test_acc)
print("Testing Accuracy:","{:.5f}".format(test_acc))
if(time_passed >= 750):
break
# summary_writer.close()
plt.plot(tempos, test_accuracy, '-', lw=2)
plt.grid(True)
plt.show() |
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import pickle
from sklearn.preprocessing import Normalizer
from sklearn.cluster import DBSCAN
def extract_meal_nomeal(filename1, filename2):
InsulinData_1 = pd.read_csv(filename1, low_memory=False)
CGM_1 = pd.read_csv(filename2, low_memory=False)
CGM_1 = CGM_1[['Time', 'Date', 'Sensor Glucose (mg/dL)']].dropna(how='any')
new_CGM_1 = InsulinData_1[(InsulinData_1['BWZ Carb Input (grams)'].notnull()) & (InsulinData_1['BWZ Carb Input (grams)'] != 0)]
new_CGM = new_CGM_1.copy()
CGM = CGM_1.copy()
new_CGM['Date'] = new_CGM['Date'].str.replace(' 00:00:00', '')
new_CGM['Timestamp'] = pd.to_datetime(new_CGM['Date'].apply(str)+' '+new_CGM['Time'])
# print(new_CGM['Timestamp'])
CGM['Date'] = CGM['Date'].str.replace(' 00:00:00', '')
date_time = pd.to_datetime(new_CGM['Date'].apply(str)+' '+new_CGM['Time']) # insulin timestamp
date_time_copy = date_time.copy().to_frame()
for i in range(int(new_CGM.shape[0])-1):
curr_time = date_time.iloc[i]
last_time = date_time.iloc[i+1]
diff = (curr_time - last_time).seconds/3600 # time difference--unit: hours
if diff <= 2:
date_time_copy.drop(date_time_copy[date_time_copy.iloc[:,0] == last_time].index, inplace=True) # after drop meal timestamp
twohours = timedelta(hours = 2)
halfhour = timedelta(hours = 0.5)
time_start_meal = date_time_copy.iloc[:,0]-halfhour
time_end_meal = date_time_copy.iloc[:,0]+twohours
date_time_copy['meal_start'] = time_start_meal
date_time_copy['meal_end'] = time_end_meal
# datetime copy[timestamp, start, end]
# print(date_time_copy.head())
# add timestamp into CGM
CGM['Date_Time'] = pd.to_datetime(CGM['Date'].apply(str)+' ' + CGM['Time'])
# extract meal data (order increasing by time)
k = 0
date_time_copy = date_time_copy.iloc[::-1]
for i in date_time_copy.index:
time_stamp = date_time_copy.loc[i,:].tolist()
temp_df = CGM[(CGM['Date_Time'] >= time_stamp[1]) & (CGM['Date_Time'] <= time_stamp[2])]
if temp_df.shape[0] != 30:
# print('666')
continue
sensor = temp_df['Sensor Glucose (mg/dL)'].to_numpy()
meal_amount = int(new_CGM[new_CGM['Timestamp']==time_stamp[0]]['BWZ Carb Input (grams)'])
meal_amount = np.array([meal_amount])
sensor = np.concatenate((sensor, meal_amount))
meal_data = sensor if k == 0 else np.vstack((meal_data, sensor))
k += 1
# extract no meal data
# output = no_meal_data (order by increasing time)
return meal_data
meal_data = extract_meal_nomeal('InsulinData.csv', 'CGMData.csv')
# print(meal_data.shape)
def ground_truth(input):
zero_twenty = np.array([])
twenty_fourty = np.array([])
forty_sixty =np.array([])
sixty_eighty =np.array([])
eighty_hun =np.array([])
hun_end=np.array([])
for k in range(input.shape[0]):
i = input[k,:]
if i[-1] <= 20:
zero_twenty = i[0:31] if zero_twenty.size == 0 else np.vstack((zero_twenty, i[0:31]))
elif i[-1]>20 and i[-1] <= 40:
twenty_fourty = i[0:31] if twenty_fourty.size == 0 else np.vstack((twenty_fourty, i[0:31]))
elif i[-1]>40 and i[-1] <= 60:
forty_sixty = i[0:31] if forty_sixty.size == 0 else np.vstack((forty_sixty, i[0:31]))
elif i[-1]>60 and i[-1] <= 80:
sixty_eighty = i[0:31] if sixty_eighty.size == 0 else np.vstack((sixty_eighty, i[0:31]))
elif i[-1]>80 and i[-1] <= 100:
eighty_hun = i[0:31] if eighty_hun.size == 0 else np.vstack((eighty_hun, i[0:31]))
else:
hun_end = i[0:31] if hun_end.size == 0 else np.vstack((hun_end, i[0:31]))
return (zero_twenty.shape[0]), (twenty_fourty.shape[0]), (forty_sixty.shape[0]), (sixty_eighty.shape[0]), (eighty_hun.shape[0]), (hun_end.shape[0])
def log(a):
if a == 0:
an = 0
else:
an = -a*np.log10(a)
return an
# K-means
def kmeans():
transformer = Normalizer(norm='l2').fit(meal_data)
meal_data_norm=transformer.transform(meal_data)
kmeans = KMeans(n_clusters=6, random_state=0).fit(meal_data_norm)
label_kmean = kmeans.labels_
entro = []
purity = []
for i in range(5):
kmean_dict = np.where(label_kmean == i)[0]
j = 0
for k in kmean_dict:
temp_data = meal_data[k,:]
temp_clus = temp_data if j == 0 else np.vstack((temp_clus, temp_data))
j += 1
a,b,c,d,e,f = ground_truth(temp_clus)
a,b,c,d,e,f = a/(temp_clus.shape[0]),b/(temp_clus.shape[0]),c/(temp_clus.shape[0]),d/(temp_clus.shape[0]),e/(temp_clus.shape[0]),f/(temp_clus.shape[0])
en = log(a) + log(b) + log(c) + log(d) + log(e) + log(f)
wei_en = temp_clus.shape[0]/(meal_data_norm.shape[0])*en
entro.append(wei_en)
pur = max(np.array([a,b,c,d,e,f]))
wei_pur = temp_clus.shape[0]/(meal_data_norm.shape[0])*pur
purity.append(wei_pur)
return (sum(entro)), (sum(purity)*3), (kmeans.inertia_*2)
#DBSCAN
def dbscan():
pca = PCA(n_components=3)
mealT=meal_data.T
meal_pca = pca.fit(mealT)
meal_decom = meal_pca.components_.T
m1 = DBSCAN(eps=0.018, min_samples=2)
m1.fit(meal_decom)
indic = (m1.core_sample_indices_)
label = (m1.labels_)
num_cluster = max(label)
final_sse = 0
for i in range(num_cluster):
dic_indi = np.where(label == i)[0]
q = 0
for k in dic_indi:
temp = meal_decom[k] if q == 0 else temp+meal_decom[k]
q += 1
center = temp.mean(axis=0)
error = temp-center
temp_sse = np.sum(error**2, axis=0)/3
sse = np.sum(temp_sse)
final_sse += sse
entro = []
purity = []
for i in range(num_cluster):
kmean_dict = np.where(label == i)[0]
j = 0
for k in kmean_dict:
temp_data = meal_data[k,:]
temp_clus = temp_data if j == 0 else np.vstack((temp_clus, temp_data))
j += 1
a,b,c,d,e,f = ground_truth(temp_clus)
nd = [a,b,c,d,e,f]
for p in range(len(nd)):
if nd[p] == 31:
nd[p] =1
a,b,c,d,e,f = nd[0],nd[1],nd[2],nd[3],nd[4],nd[5]
a,b,c,d,e,f = a/(temp_clus.shape[0]),b/(temp_clus.shape[0]),c/(temp_clus.shape[0]),d/(temp_clus.shape[0]),e/(temp_clus.shape[0]),f/(temp_clus.shape[0])
en = log(a) + log(b) + log(c) + log(d) + log(e) + log(f)
wei_en = temp_clus.shape[0]/(meal_decom.shape[0])*en
entro.append(wei_en)
pur = max(np.array([a,b,c,d,e,f]))
wei_pur = temp_clus.shape[0]/(meal_decom.shape[0])*pur
purity.append(wei_pur)
return (sum(entro)), (sum(purity)*3), (final_sse)
en1, pur1, sse1 = (kmeans())
en2, pur2, sse2 = (dbscan())
result = np.array([sse1, sse2, en1, en2, pur1, pur2])
df1 = pd.DataFrame(result[None])
df1.to_csv("Result.csv", index=False, header=False) |
from objects import Building
from tkinter import Tk, Label, Button, Frame, Canvas
from time import sleep
class MainMenu:
def __init__(self,name="Billionaire Business"):
width,height = 512,256
self.mainMenuWindow = Tk()
self.mainMenuWindow.title(name)
self.mainMenuWindow.geometry("{}x{}".format(width,height))
self.startCanvas = Canvas(
master=self.mainMenuWindow,
width=width,
height=height
)
Button(
master=self.startCanvas,
text="Start/Resume",
command=self.Start,
width=10,
height=3,
).pack()
self.startCanvas.pack()
self.mainMenuWindow.mainloop()
def Start(self):
Label(
master=self.startCanvas,
text="Game is Loading!"
).pack()
self.startCanvas.update()
sleep(1.5)
self.mainMenuWindow.destroy()
class MainGame():
def __init__(self,name="tk das"):
|
"""
Anonymous token-based surveys
"""
|
class OxygenModificationFactor():
def __init__(self, max_oer, k_mmHg) -> None:
super().__init__()
self.k_mmHg = k_mmHg
self.max_oer = max_oer
def calculate(self, po2):
return (self.max_oer * po2 + self.k_mmHg) / (self.max_oer * (po2 + self.k_mmHg))
|
from allauth.account.adapter import DefaultAccountAdapter
from django.conf import settings
from django.shortcuts import resolve_url
from datetime import datetime, timedelta
class AccountAdapter(DefaultAccountAdapter):
def get_login_redirect_url(self, request):
assert request.user.is_authenticated()
if not request.user.password:
url = '/accounts/password/set'
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
|
#!/usr/bin/python
import json
import re
from typing import Set
def filter_log(log_file_path: str, dont_check_libs: Set, time_limits: float, types: Set):
"""
过滤spotify/XCLogParser日志,将日志中超过time_limits,编译类型在types里的,不在dont_check_libs里的编译时间
:param log_file_path: 日志文件路径
:param dont_check_libs: 不检查的pod库
:param time_limits: 时间限制
:param types: 需要检查的编译类型,不过滤编译类型传空
"""
# 截取pod引用库的库名称,使用lib这个group获取
pattern = ".*\/Pods\/(?P<lib>[0-9a-zA-Z.-]+)\/.*"
with open(log_file_path, 'r') as fr:
data = json.load(fr)
data2 = list()
for item in data:
# 超过timelimits且编译类型在types中
if item["compilationDuration"] > time_limits and item["detailStepType"] in types:
match = re.match(pattern, item["documentURL"])
if match:
lib = match["lib"]
# 过滤不检查的库
if len(lib) > 0 and lib not in dont_check_libs:
data2.append(item)
else:
data2.append(item)
# 耗时排序,逆序
data2.sort(key=lambda x: x["compilationDuration"], reverse=True)
with open('data.json', 'w') as fw:
json.dump(data2, fw, indent=4)
# cCompilation
compile_type = {"swiftCompilation"}
file_path = "sudoku.json"
dont_check_libs = {"SwiftSoup", "RxSwift", "YYCache", "libwebp", "YYText", "SwifterSwift", "RxCocoa", "HandyJSON",
"SnapKit", "Starscream", "SNS", "Socket.IO-Client-Swift"}
filter_log(file_path, dont_check_libs, 0.2, compile_type)
|
from optparse import OptionParser
import pandas as pd
import numpy as np
from ..util import file_handling as fh, defines
def main():
# Handle input options and arguments
usage = "%prog"
parser = OptionParser(usage=usage)
#parser.add_option('-d', dest='dataset', default='',
# help='Dataset to process; (if not specified, all files in raw data directory will be used)')
(options, args) = parser.parse_args()
# process all the (given) data files
convert_data()
#combine_responses()
def convert_data():
input_dir = defines.data_raw_csv_dir
files = fh.ls(input_dir, '*.csv')
text_dir = fh.makedirs(defines.data_raw_text_dir)
label_dir = fh.makedirs(defines.data_raw_labels_dir)
responses = {}
for f in files:
# get basename of input file
basename = fh.get_basename(f)
print basename
# read the data into a dataframe
df = pd.read_csv(f, header=0, index_col=0)
nRows, nCols = df.shape
print nRows, nCols
print df.index[0:10]
index = df.index
#print index
# make the indices unique by prefixing with the question name
index = [basename + '_' + str(i) for i in index]
df.index = index
# pull out the non-label columns
[nRows, nCols] = df.shape
print nRows, nCols
col_sel = range(nCols)
col_sel.pop(0)
col_sel.pop(-1)
# make a new dataframe of just the labels and write it to a file
Y = df[col_sel]
Y = pd.DataFrame(np.array(Y > 0, dtype=int), index=Y.index, columns=Y.columns)
Y.to_csv(fh.make_filename(label_dir, basename, 'csv'))
# pull out the text and add it to a dictionary of all responses
X = df['Interviewer transcript']
for x in X.iteritems():
responses[x[0]] = x[1]
# write the texts to a file
output_filename = defines.data_raw_text_file
fh.write_to_json(responses, output_filename)
# concatenate all of the text from each respondent for possible future use
def combine_responses():
input_dir = defines.data_raw_text_dir
output_dir = fh.makedirs(defines.data_raw_concat_dir)
files = fh.ls(input_dir, '*.json')
all_text = {}
for f in files:
text = fh.read_json(f)
keys = text.keys()
for k in keys:
if all_text.has_key(k):
all_text[k] += ' ; ' + text[k]
else:
all_text[k] = text[k]
output_filename = fh.make_filename(output_dir, 'all_text', 'json')
fh.write_to_json(all_text, output_filename)
if __name__ == '__main__':
main()
|
import os
import argparse
class ArgParser:
"""
Class to Parse arguments coming from command line.
Reads all input arguments in command line and parses the data for other classes to use it.
It checks if the paths are valid. In the specific case of a location to save something, class tries to
create a new directory to save and returns that path if it is valid.
Attributes:
self.description: Text string to describe functionality intended for this parser.
self.save_path: String with absolute location of where user wants to save something.
self.data_path: String with absolute location of where the dataset is located.
self.predefined_save_path_name: String with user choice to define name of saving folder
"""
def __init__(self, description='', predefined_save_path='save'):
# Text explaining the purpose of the argument parser.
self.description = description
self.save_path = ''
self.data_path = ''
self.predefined_save_path_name = predefined_save_path
def check_save_path_exists(self, save_path):
"""
Validates if save path is valid or not. If it isn't valid, then it tries to create one and saves that.
Args:
save_path: String with absolute path.
Returns: Boolean to validate that everything went ok.
"""
if not os.path.exists(save_path):
try:
new_save_path = save_path + '/' + self.predefined_save_path_name
os.makedirs(new_save_path)
if not os.path.exists(new_save_path):
raise OSError("Couldn't find and create save path specified. \n"
"Check your permissions to run the script or create save path manually.\n")
self.save_path = new_save_path
except OSError:
raise OSError("Couldn't find and create save path specified. \n"
"Check your permissions to run the script or create save path manually.\n")
else:
self.save_path = save_path
return True
def check_data_path_exists(self, data_path):
"""
Checks if dataset path exists or not. If it doesn't exist, raises exception and stops the script.
Args:
data_path: String with absolute path where dataset is.
Returns: Boolean to validate that everything went ok.
"""
if not os.path.exists(data_path):
raise NameError("Data path inserted does not exist.\n"
"Please, correct your data path. \n")
self.data_path = data_path
return True
def parse(self):
"""
Main function of argument parser.
Adds desired options, parses and checks if they are valid or not.
Returns: Strings with dataset and saving paths.
"""
parser = argparse.ArgumentParser()
parser.add_argument('data', help="Path where the dataset is stored", type=str)
parser.add_argument('save', help="Path where the save path should be", type=str)
args = parser.parse_args()
exists_data_path = self.check_data_path_exists(args.data)
exists_save_path = self.check_save_path_exists(args.save)
if exists_data_path and exists_save_path:
return self.data_path, self.save_path
|
class Base:
def __init__(self, N):
self.numb = N
def out(self):
self.numb /=2
print(self.numb)
class Subclass(Base):
def out(self):
print("\n-----")
Base.out(self)
print("\n-----")
i = 0
while i < 10:
if 4 < i < 7:
obj = Subclass(i)
else:
obj = Base(i)
i += 1
obj.out()
obj1 = Base(3)
obj2 = Subclass(5)
obj1.out()
obj2.out()
|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
from eelbrain import datasets, plot
def test_plot_brain():
"""Test plot.brain plots"""
src = datasets.get_mne_sample(src='ico', sub=[0])['src']
p = plot.brain.dspm(src)
cb = p.plot_colorbar(show=False)
cb.close()
p = plot.brain.dspm(src, hemi='lh')
cb = p.plot_colorbar(show=False)
cb.close()
p = plot.brain.cluster(src, hemi='rh', views='parietal')
cb = p.plot_colorbar(show=False)
cb.close()
image = plot.brain.bin_table(src, tstart=0.1, tstop=0.3, tstep=0.1)
print repr(image)
print image
|
import json
import os
import textwrap
import warnings
import requests
from django.conf import settings
def post_to_slack(message):
if not getattr(settings, "SLACK_WEBHOOK_URL", None):
warnings.warn("settings.SLACK_WEBHOOK_URL is not set")
return
env = os.getenv(
"SERVER_ENVIRONMENT", getattr(settings, "SERVER_ENVIRONMENT", None)
)
if env in ["test", "development", "staging"]:
prefix = "TEST for {} environment: ".format(env)
message = prefix + message
url = settings.SLACK_WEBHOOK_URL
payload = {
"icon_emoji": ":satellite_antenna:",
"username": "Election Radar",
"text": textwrap.dedent(message),
}
requests.post(url, json.dumps(payload), timeout=2)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
create_user_hw_action_element_query = """
INSERT INTO public.user_hw_action AS uhwa
(user_id, hw_action_id, value, date_from, date_to, active, deleted)
VALUES
($1, $2, $3, $4, $5, $6, FALSE) RETURNING *;
"""
|
import unittest
import pysparkling
class BroadcastTest(unittest.TestCase):
def setUp(self) -> None:
self.context = pysparkling.Context()
def testSimple(self):
b = self.context.broadcast([1, 2, 3, 4, 5])
self.assertEqual(b.value, [1, 2, 3, 4, 5])
def testAppendFails(self):
b = self.context.broadcast([1, 2, 3, 4, 5])
with self.assertRaises(AttributeError):
b.value += [1] # type: ignore
|
from typing import Iterable
from pathlib import Path
import pytest
# For typing only
from _pytest.monkeypatch import MonkeyPatch
from flask import Flask
from flask.testing import FlaskClient
from hun_law.utils import Date
from ajdb_web.app import create_app, TestConfig
from ajdb.structure import ActSet
from ajdb.database import Database
from ajdb.config import AJDBConfig
from .data import TEST_ACT1, TEST_ACT2
@pytest.fixture
def app() -> Flask:
return create_app(TestConfig())
@pytest.fixture
def client(app: Flask) -> FlaskClient:
# That's the point of fixtures, pylint, ugh.
# pylint: disable=redefined-outer-name
return app.test_client()
@pytest.fixture
def fake_db(tmp_path: Path, monkeypatch: MonkeyPatch) -> Iterable[ActSet]:
monkeypatch.setattr(AJDBConfig, "STORAGE_PATH", tmp_path)
act_set = ActSet(acts=(TEST_ACT1, TEST_ACT2))
Database.save_act_set(act_set, Date.today())
yield act_set
|
from typing import Callable, Generator, Optional
from async_generator import async_generator, yield_
class InfluxDBResult:
__slots__ = ('_data', 'parser', 'query')
def __init__(self, data, parser=None, query=None):
self._data = data
self.parser = parser
self.query = query
@property
def data(self):
return self._data
@property
def series_count(self):
return len(self._count())
def __len__(self):
"""Returns number of total points in data"""
return sum(self._count())
def __repr__(self):
q = self.query[:80] + '...' if len(self.query) > 80 else self.query
return '<{} [q="{}"]>'.format(type(self).__name__, q)
def __iter__(self):
return iterpoints(self.data, parser=self.parser)
def show(self):
return list(self)
def _count(self):
return [len(series['values'])
for statement in self._data['results'] if 'series' in statement
for series in statement['series']]
class InfluxDBChunkedResult:
__slots__ = ('_gen', 'parser', 'query')
def __init__(self, gen, parser=None, query=None):
self._gen = gen
self.parser = parser
self.query = query
@property
def gen(self):
return self._gen
def __repr__(self):
q = self.query[:80] + '...' if len(self.query) > 80 else self.query
return '<{} [q="{}"]>'.format(type(self).__name__, q)
def __aiter__(self):
return self.iterpoints()
# python3.5 support
@async_generator
async def iterpoints(self):
async for chunk in self._gen:
for i in iterpoints(chunk, parser=self.parser):
await yield_(i)
# python3.5 support
@async_generator
async def iterchunks(self, wrap=False):
async for chunk in self._gen:
if wrap:
await yield_(InfluxDBResult(chunk, parser=self.parser, query=self.query))
else:
await yield_(chunk)
def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Generator:
"""Iterates a response JSON yielding data point by point.
Can be used with both regular and chunked responses.
By default, returns just a plain list of values representing each point,
without column names, or other metadata.
In case a specific format is needed, an optional ``parser`` argument can be passed.
``parser`` is a function that takes raw value list for each data point and a
metadata dictionary containing all or a subset of the following:
``{'columns', 'name', 'tags', 'statement_id'}``.
Sample parser function:
.. code:: python
def parser(x, meta):
return dict(zip(meta['columns'], x))
:param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query)
:param parser: Optional parser function
:return: Generator object
"""
for statement in resp['results']:
if 'series' not in statement:
continue
for series in statement['series']:
meta = {k: series[k] for k in series if k != 'values'}
meta['statement_id'] = statement['statement_id']
if parser is None:
return (x for x in series['values'])
else:
return (parser(x, meta) for x in series['values'])
return (x for x in [])
|
from .task import *
SCHEDULER_API_ENABLED = True
JOBS = [
{
'id': 'test_task',
'func': test_task,
'args': '',
'trigger': 'interval',
'seconds': 3600
},
]
|
#create the list of ages
#sort ages
#find average
ages = [15, 9, 5, 16, 20, 73, 54, 3, 34, 8]
ages.sort()
print(ages)
|
import random
class Color:
end = '\x1b[0m'
# bold = '\x1b[1m'
underline = '\x1b[4m'
red = '\033[1;31m'
green = '\033[1;32m'
orange = '\033[1;33m'
blue = '\033[1;34m'
purple = '\033[1;35m'
darkcyan = '\x1b[1;36m'
yellow = '\x1b[1;93m'
cyan = '\x1b[1;96m'
class Player:
attack = 1
defence = 1
health = 5
gold = 0
weapon = dict()
armor = dict()
class Monster:
name: ''
attack: 0
defence: 0
health: 0
gold: 0
monster_list = []
# Monsters are added in by a list : ['name', attack, defence, health, gold drop]
monster_list.append(['Monster 1', 0, 0, 5, 1])
monster_list.append(['Monster 2', 1, 1, 5, 2])
monster_list.append(['Monster 3', 2, 2, 10, 3])
monster_list.append(['Monster 4', 4, 4, 25, 4])
monster_list.append(['Monster 5', 10, 10, 100, 5])
class Item:
# Items are added in by a list : [id, 'name', stat_value]
weapon_list = []
weapon_list.append([1, 'Beginner\'s sword', 1])
weapon_list.append([2, 'Longsword', 2])
weapon_list.append([3, 'Greatsword', 3])
armor_list = []
armor_list.append([1, 'Beginner\'s armor', 1])
armor_list.append([2, 'Leather armor', 2])
armor_list.append([3, 'Plated armor', 3])
def roll_random(min_value, max_value):
random_number = int(random.randint(min_value, max_value))
print(random_number)
for i in range(len(Item.weapon_list)):
if random_number == Item.weapon_list[i][0]:
print(Item.weapon_list[i][1])
else:
print("No")
def welcome_text():
print('\n-----')
print('Hello, and welcome to a short demonstration of basic Python rpg')
print("In this demonstration, the user (that's you!) chooses what happens depending on the the input given.")
print('Feel free to mess around - if you accidentally quit the program, you can run it again using Shift+F10')
print('-----\n')
def color_test():
print(Color.red + "1234567890")
print(Color.green + "1234567890")
print(Color.orange + "1234567890")
print(Color.blue + "1234567890")
print(Color.purple + "1234567890")
print(Color.darkcyan + "1234567890")
print(Color.yellow + "1234567890")
print(Color.cyan + "1234567890" + Color.end)
def start_program():
while True:
command = input('Type in your input. (' + Color.cyan + 'help' + Color.end + ' for a list of commands)\n') + ''
command = command.lower()
check_command(command)
def choose_help():
print('The following inputs are valid commands:')
print('| ' + Color.red + 'stats' + Color.end + ' | ' + Color.blue + 'fight' + Color.end + ' ', end='')
print('| ' + Color.purple + 'buy' + Color.end + ' | ' + Color.cyan + 'quit' + Color.end + ' |' + Color.end)
start_program()
def back_quit():
print('Type ' + Color.cyan + 'back' + Color.end + ' to go back or ' + Color.cyan + 'quit' + Color.end + ' to quit.')
def choose_stats():
print('| ' + Color.red + "Attack: " + str(Player.attack) + Color.end + ' ', end='')
print('| ' + Color.blue + "Defence: " + str(Player.defence) + Color.end + ' ', end='')
print('| ' + Color.purple + "Health: " + str(Player.health) + Color.end + ' ', end='')
print('| ' + Color.yellow + "Gold: " + str(Player.gold) + Color.end + ' |')
start_program()
def choose_fight():
back_quit()
print('To fight a monster, type which level you would like to face (', end='')
monster_level = input(Color.cyan + '1-5' + Color.end + ')\n') + ''
if monster_level == 'back':
start_program()
if monster_level == 'quit':
choose_quit()
try:
int(monster_level)
except ValueError:
print(Color.red + "I'm sorry, I don't recognize that input - please try again!" + Color.end)
choose_fight()
if 0 < int(monster_level) < len(Monster.monster_list)+1:
fight_monster(monster_level)
elif int(monster_level) > len(Monster.monster_list) or int(monster_level) < 1:
print(Color.red + "You typed a wrong value, try again" + Color.end)
choose_fight()
else:
print(Color.red + "Don't know how you got here, taking you back to the main menu.." + Color.end)
start_program()
def choose_buy():
back_quit()
print("You can buy stat upgrades here! Your buy options are: ")
print('+1 ' + Color.red + 'attack' + Color.end + ' for 5 ' + Color.yellow + 'gold' + Color.end + '.')
print('+1 ' + Color.blue + 'defence' + Color.end + ' for 5 ' + Color.yellow + 'gold' + Color.end + '.')
print('+5 ' + Color.purple + 'health' + Color.end + ' for 10 ' + Color.yellow + 'gold' + Color.end + '.')
buy_input = input("Type in what you would like to buy\n")
check_buy(buy_input)
def check_buy(buy_option):
if buy_option is None or len(buy_option) < 1:
print(Color.red + "I'm sorry, I didn't recieve any input - please try again!\n" + Color.end)
elif buy_option == 'back':
start_program()
elif buy_option == 'quit':
choose_quit()
elif buy_option == 'attack':
buy_upgrade('attack')
elif buy_option == 'defence':
buy_upgrade('defence')
elif buy_option == 'health':
buy_upgrade('health')
else:
print("I'm sorry, I don't know that command - try again!")
choose_buy()
def buy_upgrade(option):
if option == 'attack' and Player.gold > 4:
Player.gold -= 5
Player.attack += 1
print("You've bought an " + Color.red + "attack " + Color.end + "upgrade!")
choose_buy()
elif option == 'defence' and Player.gold > 4:
Player.gold -= 5
Player.defence += 1
print("You've bought a " + Color.blue + "defence " + Color.end + "upgrade!")
choose_buy()
elif option == 'health' and Player.gold > 9:
Player.gold -= 10
Player.health += 5
print("You've bought a " + Color.purple + "health " + Color.end + "upgrade!")
choose_buy()
else:
print(Color.red + "You don't have enough gold for this upgrade!" + Color.end)
choose_buy()
def choose_quit():
print(Color.cyan + Color.underline + 'Thank you for taking your time to view this demonstration.' + Color.end)
quit()
def wrong_command():
print(Color.red + "I'm sorry, I don't recognize that command - please try again!" + Color.end)
def check_command(user_input):
if user_input is None or len(user_input) < 1:
print(Color.red + "I'm sorry, I didn't recieve any input - please try again!\n" + Color.end)
elif user_input == 'help':
choose_help()
elif user_input == 'stats':
choose_stats()
elif user_input == 'fight':
choose_fight()
elif user_input == 'buy':
choose_buy()
elif user_input == 'quit':
choose_quit()
else:
wrong_command()
start_program()
def fight_monster(level):
choice = int(level)
Monster.name = Monster.monster_list[choice-1][0]
Monster.attack = Monster.monster_list[choice-1][1]
Monster.defence = Monster.monster_list[choice-1][2]
Monster.health = Monster.monster_list[choice-1][3]
Monster.gold = Monster.monster_list[choice-1][4]
if Player.attack - Monster.defence <= 0:
print("You lose to " + Color.red + Monster.name + Color.end + ", try an easier monster!")
choose_fight()
elif Monster.attack - Player.defence <= 0:
print("You win over " + Color.red + Monster.name + Color.end + "! ", end='')
print("Here, have " + Color.yellow + str(Monster.gold) + " gold" + Color.end + "!")
Player.gold += Monster.gold
choose_fight()
elif (Monster.health / (Player.attack - Monster.defence)) < (Player.health / (Monster.attack - Player.defence)):
print("You win over " + Color.red + Monster.name + Color.end + " ! ", end='')
print("Here, have " + Color.yellow + str(Monster.gold) + " gold" + Color.end + "!")
Player.gold += Monster.gold
choose_fight()
elif (Player.health / (Monster.attack - Player.defence)) < (Monster.health / (Player.attack - Monster.defence)):
print("You lose to " + Monster.name + ", try an easier monster!")
choose_fight()
def main():
start_program()
welcome_text()
main()
# create variables for attack, defence, health, gold - DONE
# create fight option - DONE
# create error handling for string conversion to int - DONE
# create buy option - DONE
# expand fight option to multiple monsters - DONE
# expand fight option to a list of monsters with static stats not based on multiplication - DONE
# Work with the RNG, make it reusable for multiple purposes
# create drop table for monster
# create inventory variable (list)
# expand drop table for monster to include weapon and armor (static gear)
# expand drop table to randomize values on gear (dynamic gear)
|
# *_* coding=utf8 *_*
#!/usr/bin/env python
from unreal import enum
from unreal import config
from unreal import exception
from unreal.utils import ipv4
from unreal.utils import shortuuid
from unreal.handler import base
from unreal.handler.base import require_login, require_admin
CONF = config.CONF
class AdList(base.BaseHandler):
@require_login
def get(self):
user_id = self.user['id']
ad_list = self.db.query("SELECT *, sf_ad.id AS ad_id "
"FROM sf_ad JOIN url ON sf_ad.url_id=url.id "
"WHERE sf_ad.owner_id=%s AND sf_ad.status=%s", user_id, enum.AdStatus.Active)
self.render("manage/ad/list.html", ad_list=ad_list)
class SingleAdHandler(base.BaseHandler):
def get_own_ad(self, ad_id):
ad_record = self.db.get("SELECT *, sf_ad.id AS ad_id, url.id AS url_id "
"FROM sf_ad JOIN url ON sf_ad.url_id=url.id "
"WHERE sf_ad.id=%s AND sf_ad.status=%s", ad_id, enum.AdStatus.Active)
if ad_record is None:
raise exception.PromptRedirect("不存在的记录")
if self.user['id'] != ad_record['owner_id'] and not self.is_admin:
raise exception.PromptRedirect("您无权查看此记录")
return ad_record
class AdDetail(SingleAdHandler):
@require_login
def get(self, ad_id):
ad_record = self.get_own_ad(int(ad_id))
return self.render("manage/ad/detail.html", ad_record=ad_record)
class AdAdd(SingleAdHandler):
@require_login
def get(self):
return self.render("manage/ad/add.html")
class AdModify(SingleAdHandler):
@require_login
def get(self, ad_id):
ad_record = self.get_own_ad(int(ad_id))
return self.render("manage/ad/modify.html", ad_record=ad_record)
class AdPVLog(SingleAdHandler):
@require_login
def get(self, ad_id):
ad_record = self.get_own_ad(ad_id)
logs = self.db.query(
"SELECT * FROM pv_log WHERE url_id=%s "
"ORDER BY 1 DESC LIMIT 10", ad_record['id'])
map(lambda log: log.setdefault(
'ip_address', ipv4.to_address(log['remote_ip_v4'])), logs)
self.render("manage/ad/pv_log.html", ad_record=ad_record, logs=logs)
class AdAction(SingleAdHandler):
@require_login
def get(self):
method = self.get_argument("method")
if method == "delete":
self.delete()
else:
raise exception.PromptRedirect("错误的参数")
@require_login
def post(self):
method = self.get_argument("method")
if method == "add":
self.add()
elif method == "modify":
self.modify()
else:
raise exception.PromptRedirect("错误的参数")
def modify(self):
ad_id = int(self.get_argument('ad_id'))
name = self.get_argument('name')
server_ip = self.get_argument('server_ip')
link = self.get_argument('link')
comment = self.get_argument('comment')
kf_qq = self.get_argument("kf_qq")
url = self.get_argument("url")
ad_record = self.get_own_ad(ad_id)
self.db.execute(
"UPDATE sf_ad SET name=%s, server_ip=%s, link=%s, comment=%s, kf_qq=%s WHERE id=%s",
name, server_ip, link, comment, kf_qq, ad_id)
self.db.execute(
"UPDATE url SET url=%s WHERE id=%s", url, ad_record['url_id'])
return self.prompt_and_redirect("修改成功", "/manage/advertisement/detail/%s" % ad_id)
def delete(self):
ad_id = int(self.get_argument('ad_id'))
ad_record = self.get_own_ad(ad_id)
self.db.execute("UPDATE sf_ad SET status=%s WHERE id=%s",
enum.AdStatus.Deleted, ad_record['ad_id'])
return self.prompt_and_redirect("删除成功")
def add(self):
name = self.get_argument("name")
server_ip = self.get_argument("server_ip")
kf_qq = self.get_argument("kf_qq")
link = self.get_argument("link")
comment = self.get_argument("comment")
avalid_days = int(self.get_argument("avalid_days", 1))
url = self.get_argument("url")
uuid = shortuuid.uuid()
url_id = self.db.execute_lastrowid(
"INSERT INTO url(url, uuid, pv, create_time) VALUES(%s, %s, %s, NOW())",
url, uuid, 0)
self.db.execute("INSERT INTO sf_ad(name, url_id, comment, kf_qq, link, "
"server_ip, owner_id, status, expire_time, create_time) "
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, "
"ADDDATE(NOW(), INTERVAL %s DAY), NOW())",
name, url_id, comment, kf_qq, link, server_ip, self.user['id'],
enum.AdStatus.Active, avalid_days)
self.prompt_and_redirect("添加成功", "/manage/advertisement")
|
from selenium import webdriver
import time
from selenium.webdriver.common.action_chains import ActionChains
driver = webdriver.Chrome(executable_path="C:/Users/ABHAY/Selenium/chromedriver.exe")
driver.implicitly_wait(10)
driver.maximize_window()
driver.get("http://the-internet.herokuapp.com/hovers")
time.sleep(2)
action = ActionChains(driver)
element = driver.find_element_by_xpath("//div[@class='example']//div[1]//img[1]")
action.move_to_element(element).perform()
time.sleep(1)
text1 = driver.find_element_by_xpath("//h5[contains(text(),'name: user1')]").text
print(text1)
element1 = driver.find_element_by_xpath("//div[@class='row']//div[2]//img[1]")
action.move_to_element(element1).perform()
time.sleep(1)
text2 = driver.find_element_by_xpath("//h5[contains(text(),'name: user2')]").text
print(text2)
element2 = driver.find_element_by_xpath("//*[@id='content']/div/div[3]/img")
action.move_to_element(element2).perform()
text3 = driver.find_element_by_xpath("//h5[contains(text(),'name: user3')]").text
print(text3)
time.sleep(2)
driver.quit() |
"""A dispatcher for directory watcher events
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
import os
from . import dirwatch_base
class DirWatcherDispatcher(object):
"""Dispatches directory watcher events to multiple handlers.
"""
__slots__ = (
'_dirwatcher',
'_configs',
)
def __init__(self, dirwatcher):
self._dirwatcher = dirwatcher
self._configs = []
self._dirwatcher.on_created = self._on_created
self._dirwatcher.on_deleted = self._on_deleted
self._dirwatcher.on_modified = self._on_modified
@property
def dirwatcher(self):
"""Gets the dirwatcher which this dispatcher is tied to.
"""
return self._dirwatcher
def register(self, path, events):
"""Registers a handler for a list of events at the given path.
"""
if path is None or not isinstance(events, dict):
return
self._configs.append({
'path': path,
'events': events
})
self._configs.sort(key=lambda x: x['path'], reverse=True)
def _trigger_handler(self, path, event):
"""Triggers a handler for the given path and event.
"""
watch_dir = os.path.dirname(path)
for config in self._configs:
if not fnmatch.fnmatch(watch_dir, config['path']):
continue
events = config['events']
if event not in events:
continue
func = events[event]
if callable(func):
func(path)
return
def _on_created(self, path):
"""Handles path created events from the directory watcher.
"""
self._trigger_handler(path, dirwatch_base.DirWatcherEvent.CREATED)
def _on_deleted(self, path):
"""Handles path deleted events from the directory watcher.
"""
self._trigger_handler(path, dirwatch_base.DirWatcherEvent.DELETED)
def _on_modified(self, path):
"""Handles path modified events from the directory watcher.
"""
self._trigger_handler(path, dirwatch_base.DirWatcherEvent.MODIFIED)
|
import numpy as np
import vecIO
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
import csv
import re
def process_clue(clue):
word_array = clue.split(' ')
outvec = np.zeros(300, dtype=np.float32)
for word in word_array:
outvec = outvec + vecIO.get_vector(word)
outvec = outvec / len(word_array)
return outvec
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.fc1 = nn.Linear(300, 300)
def forward(self, x):
return F.softmax(self.fc1(x))
def custom_loss_fn (process_phrase, answer):
hadamard = process_phrase * answer
return 1- math.sqrt(dot(hadamard, hadamard))
loss_fn = nn.MSELoss()
clueProcessor = Model()
optimizer = optim.SGD(clueProcessor.parameters(), lr=0.01, momentum=0.9)
#load vectors
vecIO.load_vectors('../wiki-news-300d-1M.vec')
#load training data
with open('../clues2.csv', 'r') as training_file:
reader = csv.reader(training_file)
i = 0
for row in reader:
#clean strings
clue = re.sub(r'[^\w\s]','',row[0].lower()) #get clue STRING, not vector
ans = vecIO.get_vector(re.sub(r'[^\w\s]','',row[1].lower())) #get answer VECTOR, not string
pred = clueProcessor(clue) #get VECTOR prediction for clue STRING
#get loss
loss = loss_fn(pred, ans)
#optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
print(loss.item())
if i == 500: break
i += 1 |
"""
Robotframework-SauceLabs
"""
from pathlib import Path
from setuptools import setup
LIBRARY_NAME = "SauceLabs"
CWD = Path(__file__).parent
readme_file = CWD / "README.md"
# Get the long description from the README file
with readme_file.open(encoding="utf-8") as f:
long_description = f.read()
CLASSIFIERS = """
Development Status :: 3 - Alpha
Topic :: Software Development :: Testing
Operating System :: OS Independent
License :: OSI Approved :: Apache Software License
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Topic :: Software Development :: Testing
Framework :: Robot Framework
Framework :: Robot Framework :: Library
""".strip().splitlines()
setup(
name="robotframework-{}".format(LIBRARY_NAME.lower()),
version="0.2.1",
description=" A Library for Working with Sauce Labs ",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/joshgrant/robotframework-saucelabs",
author="Josh Grant",
author_email="joshua.m.grant@gmail.com",
license="MIT",
classifiers=CLASSIFIERS,
keywords="robot framework testing automation selenium seleniumlibrary"
"testability async javascript softwaretesting",
platforms="any",
packages=[LIBRARY_NAME],
package_dir={"": "src"},
) |
from django.contrib import admin
from .models import Comment, Question,Comment
class QuestionAdmin(admin.ModelAdmin):
"""
This is the admin class for Question model
"""
readonly_fields=("publish","update")
list_display=("author","score","publish")
list_filter=("author","score")
fieldsets=(
(None,{"fields":("author","slug","publish","update","tags")}),
("Info",{"classes":("collapse",),"fields":("score","title","body","liked_by","disliked_by")})
)
class CommentAdmin(admin.ModelAdmin):
"""
This is the admin class for Comment model
"""
readonly_fields=("update","publish")
list_display=("question","author","publish","update")
list_filter=("question","author","publish")
search_fields=("content","pk")
fieldsets=(
(None,{"fields":("question","publish","update")}),
("Info",{"fields":("score","content","liked_by","disliked_by"),"classes":"collapse"})
)
class Meta:
ordering=("publish",)
# Register your models here.
admin.site.register(Question,QuestionAdmin)
admin.site.register(Comment,CommentAdmin)
|
import os
from flask import Flask, render_template, request, flash, url_for, safe_join
from flask import send_from_directory
from werkzeug.utils import secure_filename, redirect
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from glob import glob
import librosa as lr
import librosa.display
app = Flask(__name__)
UPLOAD_FOLDER = 'static/uploads'
RESULT_FOLDER = 'static/results'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = {'wav', 'mp3'}
if __name__ == '__main__':
app.run(debug=True)
@app.route('/main')
def main():
return render_template('main.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload')
def upload_form():
return render_template('upload.html')
@app.route('/uploader', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return 'wrong format'
file = request.files['file']
if file and allowed_file(file.filename):
filename = safe_join(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, safe_join(file.filename)))
return redirect(url_for('audio_transform', filename=filename))
return 'wrong format, file not uploaded'
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@app.route('/audio_transform/<filename>')
def audio_transform(filename):
audio_data = os.path.join(UPLOAD_FOLDER, safe_join(filename))
x, sr = lr.load(audio_data)
D = np.abs(lr.stft(x))
lr.display.specshow(lr.amplitude_to_db(D, ref=np.max), y_axis='log', x_axis='time')
plt.title("Power spectogram")
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
plt.show()
#plt.savefig(os.path.join(RESULT_FOLDER, safe_join(filename)))
return 'Done'
#@app.route('/show_image')
#def show_image():
## return '<img src = os.path.join(RESULT_FOLDER, safe_join(filename))>'
|
from follow import follow
def grep(pattern, lines):
for line in lines:
if pattern in line:
yield line
def main():
systemlog = open('/var/log/system.log')
loglines = follow(systemlog)
eventlines = grep('Event', loglines)
for line in eventlines:
print(line)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.